Merge "Sync oslo incubator"

This commit is contained in:
Jenkins 2014-04-15 15:04:44 +00:00 committed by Gerrit Code Review
commit fc5470ea21
64 changed files with 1030 additions and 492 deletions

View File

@ -0,0 +1,17 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -1,5 +1,5 @@
#
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -19,6 +19,7 @@
from __future__ import print_function
import argparse
import imp
import os
import re
@ -28,6 +29,7 @@ import textwrap
from oslo.config import cfg
import six
import stevedore.named
from heat.openstack.common import gettextutils
from heat.openstack.common import importutils
@ -39,6 +41,7 @@ BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
@ -47,11 +50,12 @@ OPT_TYPES = {
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
@ -60,34 +64,60 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
WORDWRAP_WIDTH = 60
def generate(srcfiles):
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in srcfiles:
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
extra_modules = os.getenv("HEAT_CONFIG_GENERATOR_EXTRA_MODULES", "")
if extra_modules:
for module_name in extra_modules.split(','):
module_name = module_name.strip()
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
@ -121,8 +151,10 @@ def _import_module(mod_str):
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
@ -133,7 +165,7 @@ def _guess_groups(opt, mod_obj):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
@ -202,7 +234,7 @@ def _sanitize_default(name, value):
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value == socket.gethostname() and 'host' in name:
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'heat'
elif value.strip() != value:
return '"%s"' % value
@ -220,7 +252,8 @@ def _print_opt(opt):
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
@ -250,6 +283,11 @@ def _print_opt(opt):
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -26,7 +25,7 @@ import uuid
def generate_request_id():
return 'req-%s' % str(uuid.uuid4())
return b'req-' + str(uuid.uuid4()).encode('ascii')
class RequestContext(object):
@ -99,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
return arg
return None
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@ -71,7 +70,7 @@ class ModelBase(six.Iterator):
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -368,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
return
# FIXME(johannes): The usage of the .message attribute has been
@ -490,7 +489,7 @@ def _thread_yield(dbapi_con, con_record):
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
@ -646,7 +645,7 @@ def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':

View File

@ -1,4 +1,3 @@
#
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
@ -255,6 +254,14 @@ def get_table(engine, name):
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
.. warning::
Do not use this method when creating ForeignKeys in database migrations
because sqlalchemy needs the same MetaData object to hold information
about the parent table and the reference table in the ForeignKey. This
method uses a unique MetaData object per table object so it won't work
with ForeignKey creation.
"""
metadata = MetaData()
metadata.bind = engine

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -30,7 +29,7 @@ import eventlet.backdoor
import greenlet
from oslo.config import cfg
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LI
from heat.openstack.common import log as logging
help_for_backdoor_port = (
@ -138,8 +137,10 @@ def initialize_if_enabled():
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
@ -25,7 +24,7 @@ import traceback
import six
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LE
class save_and_reraise_exception(object):
@ -50,9 +49,22 @@ class save_and_reraise_exception(object):
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
If another exception occurs and reraise flag is False,
the saved exception will not be logged.
If the caller wants to raise new exception during exception handling
he/she sets reraise to False initially with an ability to set it back to
True if needed::
except Exception:
with save_and_reraise_exception(reraise=False) as ctxt:
[if statements to determine whether to raise a new exception]
# Not raising a new exception, so reraise
ctxt.reraise = True
"""
def __init__(self):
self.reraise = True
def __init__(self, reraise=True):
self.reraise = reraise
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
@ -60,10 +72,11 @@ class save_and_reraise_exception(object):
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
if self.reraise:
logging.error(_LE('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
@ -89,8 +102,8 @@ def forever_retry_uncaught_exceptions(infunc):
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
_LE('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -20,7 +19,6 @@ import os
import tempfile
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -60,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime

View File

@ -21,16 +21,10 @@ import six
class Config(fixtures.Fixture):
"""Override some configuration values.
"""Allows overriding configuration settings for the test.
The keyword arguments are the names of configuration options to
override and their values.
`conf` will be reset on cleanup.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the reset() method, which is registered by addCleanup().
"""
def __init__(self, conf=cfg.CONF):
@ -38,9 +32,54 @@ class Config(fixtures.Fixture):
def setUp(self):
super(Config, self).setUp()
# NOTE(morganfainberg): unregister must be added to cleanup before
# reset is because cleanup works in reverse order of registered items,
# and a reset must occur before unregistering options can occur.
self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
self._registered_config_opts = {}
def config(self, **kw):
"""Override configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a `group` argument is supplied, the overrides are applied to
the specified configuration option group, otherwise the overrides
are applied to the ``default`` group.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
def _unregister_config_opts(self):
for group in self._registered_config_opts:
self.conf.unregister_opts(self._registered_config_opts[group],
group=group)
def register_opt(self, opt, group=None):
"""Register a single option for the test run.
Options registered in this manner will automatically be unregistered
during cleanup.
If a `group` argument is supplied, it will register the new option
to that group, otherwise the option is registered to the ``default``
group.
"""
self.conf.register_opt(opt, group=group)
self._registered_config_opts.setdefault(group, set()).add(opt)
def register_opts(self, opts, group=None):
"""Register multiple options for the test run.
This works in the same manner as register_opt() but takes a list of
options as the first argument. All arguments will be registered to the
same group if the ``group`` argument is supplied, otherwise all options
will be registered to the ``default`` group.
"""
for opt in opts:
self.register_opt(opt, group=group)

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -49,4 +48,4 @@ class LockFixture(fixtures.Fixture):
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
self.lock = self.mgr.__enter__()

View File

@ -0,0 +1,34 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
def get_logging_handle_error_fixture():
"""returns a fixture to make logging raise formatting exceptions.
Usage:
self.useFixture(logging.get_logging_handle_error_fixture())
"""
return fixtures.MonkeyPatch('logging.Handler.handleError',
_handleError)
def _handleError(self, record):
"""Monkey patch for logging.Handler.handleError.
The default handleError just logs the error to stderr but we want
the option of actually raising an exception.
"""
raise

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
@ -16,6 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the heattest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
import fixtures
import mock
@ -23,14 +33,15 @@ import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
self.new = new
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
@ -39,12 +50,13 @@ class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.kwargs = kwargs
self.new = new
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
_p = mock.patch(self.obj, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
@ -16,8 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the heattest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
import fixtures
import mox
from six.moves import mox
class MoxStubout(fixtures.Fixture):

View File

@ -1,4 +1,3 @@
#
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
@ -29,7 +28,6 @@ import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
@ -249,47 +247,22 @@ class Message(six.text_type):
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
@ -301,13 +274,14 @@ class Message(six.text_type):
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -59,6 +58,13 @@ def import_module(import_str):
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'heat.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
@ -37,17 +36,9 @@ import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
# however the function and object call signatures
# remained the same. This whole try/except block should
# be removed and replaced with a call to six.moves once
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
import xmlrpc.client as xmlrpclib
import six
import six.moves.xmlrpc_client as xmlrpclib
from heat.openstack.common import gettextutils
from heat.openstack.common import importutils

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -16,6 +15,7 @@
import contextlib
import errno
import fcntl
import functools
import os
import shutil
@ -29,7 +29,7 @@ import weakref
from oslo.config import cfg
from heat.openstack.common import fileutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE, _LI
from heat.openstack.common import log as logging
@ -41,7 +41,7 @@ util_opts = [
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("HEAT_LOCK_PATH"),
help=('Directory to use for lock files.'))
help='Directory to use for lock files.')
]
@ -53,7 +53,7 @@ def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
@ -75,12 +75,12 @@ class _InterProcessLock(object):
self.lockfile = None
self.fname = name
def __enter__(self):
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_('Created lock path: %s'), basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
@ -91,24 +91,40 @@ class _InterProcessLock(object):
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug(_('Got file lock "%s"'), self.fname)
return self
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{
'filename': self.fname,
'exception': e,
})
def __exit__(self, exc_type, exc_val, exc_tb):
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
LOG.debug(_('Released file lock "%s"'), self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
@ -117,7 +133,7 @@ class _InterProcessLock(object):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
@ -125,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
@ -133,35 +149,106 @@ class _PosixLock(_InterProcessLock):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
class _PosixLock(object):
def __init__(self, name):
# Hash the name because it's not valid to have POSIX semaphore
# names with things like / in them. Then use base64 to encode
# the digest() instead taking the hexdigest() because the
# result is shorter and most systems can't have shm sempahore
# names longer than 31 characters.
h = hashlib.sha1()
h.update(name.encode('ascii'))
self.name = str((b'/' + base64.urlsafe_b64encode(
h.digest())).decode('ascii'))
def acquire(self, timeout=None):
self.semaphore = posix_ipc.Semaphore(self.name,
flags=posix_ipc.O_CREAT,
initial_value=1)
self.semaphore.acquire(timeout)
return self
def __enter__(self):
self.acquire()
return self
def release(self):
self.semaphore.release()
self.semaphore.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
try:
semaphore = posix_ipc.Semaphore(self.name)
except posix_ipc.ExistentialError:
return False
else:
semaphore.close()
return True
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
FileLock = _WindowsLock
else:
import fcntl
import base64
import hashlib
import posix_ipc
InterProcessLock = _PosixLock
FileLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def external_lock(name, lock_file_prefix=None):
with internal_lock(name):
LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
{'lock': name})
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not CONF.lock_path:
if not local_lock_path:
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
# unnecessarily raise the RequiredOptError below.
if InterProcessLock is not _PosixLock:
raise cfg.RequiredOptError('lock_path')
local_lock_path = 'posixlock:/'
lock_file_path = os.path.join(CONF.lock_path, name)
return os.path.join(local_lock_path, name)
return InterProcessLock(lock_file_path)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
# NOTE(bnemec): If an explicit lock_path was passed to us then it
# means the caller is relying on file-based locking behavior, so
# we can't use posix locks for those calls.
if lock_path:
return FileLock(lock_file_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove a external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
@ -172,12 +259,12 @@ def internal_lock(name):
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False):
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
@ -192,15 +279,17 @@ def lock(name, lock_file_prefix=None, external=False):
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
if external and not CONF.disable_process_locking:
lock = external_lock(name, lock_file_prefix)
else:
lock = internal_lock(name)
with lock:
yield lock
int_lock = internal_lock(name)
with int_lock:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
def synchronized(name, lock_file_prefix=None, external=False):
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@ -228,12 +317,12 @@ def synchronized(name, lock_file_prefix=None, external=False):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@ -16,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
@ -116,10 +115,21 @@ logging_cli_opts = [
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
help='Syslog facility to receive log lines')
]
generic_log_opts = [
@ -133,18 +143,18 @@ log_opts = [
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
help='Format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
help='Format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
help='Data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
help='Prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
@ -153,15 +163,17 @@ log_opts = [
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='list of logger=LEVEL pairs'),
help='List of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
help='Publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
help='Make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
@ -293,18 +305,39 @@ class ContextAdapter(BaseLoggerAdapter):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
@ -325,7 +358,7 @@ class ContextAdapter(BaseLoggerAdapter):
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
@ -421,12 +454,12 @@ def _load_log_config(log_config_append):
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
@ -460,15 +493,32 @@ def _find_facility_from_conf():
return facility
def _setup_logging_from_conf():
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
@ -502,7 +552,9 @@ def _setup_logging_from_conf():
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
@ -560,18 +612,50 @@ class ContextFormatter(logging.Formatter):
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -28,4 +27,4 @@ class PublishErrorsHandler(logging.Handler):
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
dict(error=record.getMessage()))

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
@ -21,7 +20,7 @@ import sys
from eventlet import event
from eventlet import greenthread
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LE, _LW
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils
@ -80,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
LOG.warn(_LW('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
@ -127,14 +126,14 @@ class DynamicLoopingCall(LoopingCallBase):
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
LOG.debug('Dynamic looping call sleeping for %.02f '
'seconds', idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:

View File

@ -1,4 +1,3 @@
#
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
@ -18,7 +17,17 @@
Network-related utilities and helper functions.
"""
from heat.openstack.common.py3kcompat import urlutils
# TODO(jd) Use six.moves once
# https://bitbucket.org/gutworth/six/pull-request/28
# is merged
try:
import urllib.parse
SplitResult = urllib.parse.SplitResult
except ImportError:
import urlparse
SplitResult = urlparse.SplitResult
from six.moves.urllib import parse
def parse_host_port(address, default_port=None):
@ -65,16 +74,35 @@ def parse_host_port(address, default_port=None):
return (host, None if port is None else int(port))
class ModifiedSplitResult(SplitResult):
"""Split results class for urlsplit."""
# NOTE(dims): The functions below are needed for Python 2.6.x.
# We can remove these when we drop support for 2.6.x.
@property
def hostname(self):
netloc = self.netloc.split('@', 1)[-1]
host, port = parse_host_port(netloc)
return host
@property
def port(self):
netloc = self.netloc.split('@', 1)[-1]
host, port = parse_host_port(netloc)
return port
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
This function papers over Python issue9374 when needed.
The parameters are the same as urlparse.urlsplit.
"""
scheme, netloc, path, query, fragment = urlutils.urlsplit(
scheme, netloc, path, query, fragment = parse.urlsplit(
url, scheme, allow_fragments)
if allow_fragments and '#' in path:
path, fragment = path.split('#', 1)
if '?' in path:
path, query = path.split('?', 1)
return urlutils.SplitResult(scheme, netloc, path, query, fragment)
return ModifiedSplitResult(scheme, netloc,
path, query, fragment)

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -20,7 +19,7 @@ import uuid
from oslo.config import cfg
from heat.openstack.common import context
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
@ -143,9 +142,9 @@ def notify(context, publisher_id, event_type, priority, payload):
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
LOG.exception(_LE("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
@ -162,8 +161,8 @@ def _get_drivers():
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
LOG.exception(_LE("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values()

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -14,10 +13,10 @@
# under the License.
"""
A temporary helper which emulates heat.messaging.Notifier.
A temporary helper which emulates oslo.messaging.Notifier.
This helper method allows us to do the tedious porting to the new Notifier API
as a standalone commit so that the commit which switches us to heat.messaging
as a standalone commit so that the commit which switches us to oslo.messaging
is smaller and easier to review. This file will be removed as part of that
commit.
"""

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -17,7 +16,7 @@
from oslo.config import cfg
from heat.openstack.common import context as req_context
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LE
from heat.openstack.common import log as logging
from heat.openstack.common import rpc
@ -43,6 +42,6 @@ def notify(context, message):
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -19,7 +18,7 @@
from oslo.config import cfg
from heat.openstack.common import context as req_context
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LE
from heat.openstack.common import log as logging
from heat.openstack.common import rpc
@ -49,6 +48,6 @@ def notify(context, message):
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
@ -47,6 +46,27 @@ policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
@ -56,16 +76,18 @@ as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import re
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from heat.openstack.common import fileutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
from heat.openstack.common.py3kcompat import urlutils
policy_opts = [
@ -119,11 +141,16 @@ class Rules(dict):
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
@ -155,27 +182,31 @@ class Enforcer(object):
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True):
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
@ -195,15 +226,19 @@ class Enforcer(object):
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
if force_reload:
self.use_conf = force_reload
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(
self.policy_path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self):
"""Locate the policy json data file.
@ -249,7 +284,7 @@ class Enforcer(object):
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
LOG.debug("Rule %s will be now enforced" % rule)
self.load_rules()
@ -264,7 +299,7 @@ class Enforcer(object):
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
@ -472,7 +507,7 @@ def _parse_check(rule):
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
@ -482,7 +517,7 @@ def _parse_check(rule):
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
@ -752,7 +787,7 @@ def _parse_text_rule(rule):
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %r") % rule)
LOG.exception(_LE("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()
@ -825,8 +860,8 @@ class HttpCheck(Check):
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlutils.urlencode(data)
f = urlutils.urlopen(url, post_data)
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@ -839,6 +874,8 @@ class GenericCheck(Check):
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
# TODO(termie): do dict inspection via dot syntax
@ -849,6 +886,12 @@ class GenericCheck(Check):
# present in Target return false
return False
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
leftval = creds[self.kind]
except KeyError:
return False
return match == six.text_type(leftval)

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -38,7 +37,7 @@ import six
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE
from heat.openstack.common import local
from heat.openstack.common import log as logging
from heat.openstack.common.rpc import common as rpc_common
@ -73,7 +72,7 @@ class Pool(pools.Pool):
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf)
def empty(self):
@ -175,7 +174,7 @@ class ConnectionContext(rpc_common.Connection):
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
return self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
@ -288,7 +287,7 @@ def unpack_context(conf, msg):
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
return ctx
@ -340,7 +339,7 @@ def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
class _ThreadPoolWithWait(object):
@ -433,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
@ -470,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
LOG.debug('Expected exception during message handling (%s)' %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
@ -478,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
LOG.error(_LE('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
@ -552,10 +551,10 @@ _reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
LOG.debug('Making synchronous call on %s ...', topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
LOG.debug('MSG_ID is %s' % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
@ -581,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
LOG.debug('Making asynchronous cast on %s...', topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
@ -590,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
LOG.debug('Making asynchronous fanout cast...')
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
@ -618,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
LOG.debug('Sending %(event_type)s on %(topic)s',
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -23,7 +22,7 @@ import traceback
from oslo.config import cfg
import six
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import local
@ -86,7 +85,7 @@ class RPCException(Exception):
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
@ -290,7 +289,7 @@ def serialize_remote_exception(failure_info, log_failure=True):
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)

View File

@ -1,4 +1,3 @@
#
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -141,8 +140,8 @@ def multicall(conf, context, topic, msg, timeout=None):
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
version = msg.get('version')
namespace = msg.get('namespace')
try:
consumer = CONSUMERS[topic][0]
@ -186,8 +185,8 @@ def fanout_cast(conf, context, topic, msg):
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
version = msg.get('version')
namespace = msg.get('namespace')
for consumer in CONSUMERS.get(topic, []):
try:

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -30,7 +29,7 @@ from oslo.config import cfg
import six
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE, _LI
from heat.openstack.common import network_utils
from heat.openstack.common.rpc import amqp as rpc_amqp
from heat.openstack.common.rpc import common as rpc_common
@ -39,9 +38,9 @@ from heat.openstack.common import sslutils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled). '
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
'be available on some distributions'
help='If SSL is enabled, the SSL version to use. Valid '
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
'be available on some distributions.'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
@ -51,8 +50,8 @@ kombu_opts = [
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
help='SSL certification authority file '
'(valid only if SSL enabled)'),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
@ -64,33 +63,33 @@ kombu_opts = [
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
help='Connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
help='The RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
help='The RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
help='The RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
help='How frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
help='How long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
help='Maximum number of RabbitMQ connection retries. '
'Default is 0 (infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
'If you change this option, you must wipe the '
'RabbitMQ database.'),
]
@ -154,12 +153,12 @@ class ConsumerBase(object):
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_("Failed to process message"
" ... skipping it."))
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
LOG.exception(_LE("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
@ -496,7 +495,7 @@ class Connection(object):
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
LOG.info(_LI("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
@ -518,7 +517,7 @@ class Connection(object):
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
@ -569,9 +568,9 @@ class Connection(object):
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
@ -623,7 +622,7 @@ class Connection(object):
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
@ -641,11 +640,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
@ -684,7 +683,7 @@ class Connection(object):
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
@ -24,7 +23,7 @@ from oslo.config import cfg
import six
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE, _LI
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
@ -189,7 +188,7 @@ class ConsumerBase(object):
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
LOG.exception(_LE("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
@ -505,7 +504,7 @@ class Connection(object):
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
except qpid_exceptions.MessagingError:
pass
broker = self.brokers[next(self.next_broker_indices)]
@ -513,15 +512,15 @@ class Connection(object):
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
msg = _LE("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
LOG.info(_LI('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
@ -534,14 +533,14 @@ class Connection(object):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect()
@ -573,7 +572,7 @@ class Connection(object):
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
@ -588,11 +587,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
def _consume():
@ -600,7 +599,7 @@ class Connection(object):
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
LOG.exception(_LE("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
@ -627,7 +626,7 @@ class Connection(object):
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -28,7 +27,7 @@ import six
from six import moves
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LE, _LI
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common.rpc import common as rpc_common
@ -94,12 +93,12 @@ def _serialize(data):
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
LOG.error(_LE("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
LOG.debug("Deserializing: %s", data)
return jsonutils.loads(data)
@ -134,9 +133,9 @@ class ZmqSocket(object):
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug("-> bind: %(bind)s", str_data)
try:
if bind:
@ -156,7 +155,7 @@ class ZmqSocket(object):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
LOG.debug("Subscribing to %s", msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
@ -193,7 +192,7 @@ class ZmqSocket(object):
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_("ZeroMQ socket could not be closed."))
LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
@ -265,7 +264,7 @@ class InternalContext(object):
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
@ -278,13 +277,13 @@ class InternalContext(object):
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
LOG.error(_LE("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
@ -303,7 +302,7 @@ class InternalContext(object):
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
@ -337,7 +336,7 @@ class ConsumerBase(object):
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
LOG.error(_LE("RPC message did not include method."))
return
# Internal method
@ -369,7 +368,7 @@ class ZmqBaseReactor(ConsumerBase):
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
@ -381,12 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
LOG.info(_LI("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
LOG.info(_LI("Consuming socket"))
while True:
self.consume(sock)
@ -436,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
LOG.info(_LI("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
@ -474,14 +473,14 @@ class ZmqProxy(ZmqBaseReactor):
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
LOG.error(_LE("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
@ -496,8 +495,8 @@ class ZmqProxy(ZmqBaseReactor):
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
LOG.error(_LE("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
@ -505,11 +504,11 @@ class ZmqProxy(ZmqBaseReactor):
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
@ -542,7 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
proxy = self.proxies[sock]
@ -561,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
@ -589,14 +588,14 @@ class Connection(rpc_common.Connection):
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
LOG.info(_LI("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
@ -648,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
LOG.debug("Creating payload")
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
@ -661,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
@ -674,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
LOG.debug("Received message: %s", msg)
LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
@ -720,10 +719,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results
if not queues:

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -23,7 +22,7 @@ import contextlib
import eventlet
from oslo.config import cfg
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _, _LI
from heat.openstack.common import log as logging
@ -214,7 +213,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright 2011-2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -23,7 +22,7 @@ import json
from oslo.config import cfg
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LW
from heat.openstack.common import log as logging
from heat.openstack.common.rpc import matchmaker as mm
@ -54,9 +53,8 @@ class RingExchange(mm.Exchange):
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ring.ringfile, 'r')
self.ring = json.load(fh)
fh.close()
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
self.ring = json.load(fh)
self.ring0 = {}
for k in self.ring.keys():
@ -74,8 +72,8 @@ class RoundRobinRingExchange(RingExchange):
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
@ -92,8 +90,8 @@ class FanoutRingExchange(RingExchange):
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])

View File

@ -1,4 +1,3 @@
#
# Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@ -16,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
from heat.openstack.common import rpc
from heat.openstack.common.rpc import dispatcher as rpc_dispatcher
@ -45,7 +43,7 @@ class Service(service.Service):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -1,4 +1,3 @@
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
@ -24,7 +23,6 @@ import os
import random
import signal
import sys
import threading
import time
try:
@ -36,12 +34,14 @@ except ImportError:
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from heat.openstack.common import eventlet_backdoor
from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _LE, _LI, _LW
from heat.openstack.common import importutils
from heat.openstack.common import log as logging
from heat.openstack.common import systemd
from heat.openstack.common import threadgroup
@ -164,7 +164,7 @@ class ServiceLauncher(Launcher):
status = None
signo = 0
LOG.debug(_('Full set of CONF:'))
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
@ -173,7 +173,7 @@ class ServiceLauncher(Launcher):
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
@ -185,7 +185,7 @@ class ServiceLauncher(Launcher):
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_('Exception during rpc cleanup.'))
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
@ -236,7 +236,7 @@ class ProcessLauncher(object):
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
@ -267,13 +267,13 @@ class ProcessLauncher(object):
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
@ -306,7 +306,7 @@ class ProcessLauncher(object):
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
@ -325,7 +325,7 @@ class ProcessLauncher(object):
os._exit(status)
LOG.info(_('Started child %d'), pid)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
@ -335,7 +335,7 @@ class ProcessLauncher(object):
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@ -352,15 +352,15 @@ class ProcessLauncher(object):
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
@ -382,22 +382,25 @@ class ProcessLauncher(object):
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:'))
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
try:
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
for pid in self.children:
try:
@ -408,7 +411,7 @@ class ProcessLauncher(object):
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
@ -420,10 +423,11 @@ class Service(object):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = threading.Event()
self._done = event.Event()
def reset(self):
self._done = threading.Event()
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
@ -432,7 +436,8 @@ class Service(object):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
self._done.set()
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
@ -443,7 +448,7 @@ class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = threading.Event()
self.done = event.Event()
def add(self, service):
self.services.append(service)
@ -457,7 +462,8 @@ class Services(object):
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
self.done.set()
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
@ -467,7 +473,7 @@ class Services(object):
def restart(self):
self.stop()
self.done = threading.Event()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@ -482,14 +488,16 @@ class Services(object):
"""
service.start()
systemd.notify_once()
done.wait()
def launch(service, workers=None):
if workers:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
else:
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher

View File

@ -1,4 +1,3 @@
#
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -25,15 +24,15 @@ ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
"connecting clients."),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
"the server securely."),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
"the server securely."),
]

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
@ -18,6 +17,7 @@
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
@ -27,16 +27,21 @@ import six
from heat.openstack.common.gettextutils import _
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
@ -93,7 +98,8 @@ def bool_from_string(subject, strict=False, default=False):
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
@ -102,7 +108,7 @@ def safe_decode(text, incoming=None, errors='strict'):
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
@ -132,7 +138,7 @@ def safe_decode(text, incoming=None, errors='strict'):
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
@ -145,7 +151,7 @@ def safe_encode(text, incoming=None,
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
@ -153,49 +159,59 @@ def safe_encode(text, incoming=None,
sys.getdefaultencoding())
if isinstance(text, six.text_type):
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text
return text.encode(encoding, errors)
else:
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
match = BYTE_REGEX.search(text)
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):

View File

@ -0,0 +1,104 @@
# Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
@ -14,10 +13,22 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the heattest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
"""Common utilities used in testing"""
import logging
import os
import tempfile
import fixtures
import testtools
@ -35,6 +46,7 @@ class BaseTestCase(testtools.TestCase):
self._fake_logs()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.tempdirs = []
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
@ -70,3 +82,18 @@ class BaseTestCase(testtools.TestCase):
)
else:
logging.basicConfig(format=_LOG_FORMAT, level=level)
def create_tempfiles(self, files, ext='.conf'):
tempfiles = []
for (basename, contents) in files:
if not os.path.isabs(basename):
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
else:
path = basename + ext
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
tempfiles.append(path)
try:
os.write(fd, contents)
finally:
os.close(fd)
return tempfiles

View File

@ -1,4 +1,3 @@
#
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -12,10 +11,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from eventlet import greenpool
from eventlet import greenthread
from heat.openstack.common import log as logging
from heat.openstack.common import loopingcall
@ -52,7 +51,7 @@ class Thread(object):
class ThreadGroup(object):
"""The point of the ThreadGroup classis to:
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
@ -87,7 +86,7 @@ class ThreadGroup(object):
self.threads.remove(thread)
def stop(self):
current = greenthread.getcurrent()
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
@ -115,7 +114,7 @@ class ThreadGroup(object):
pass
except Exception as ex:
LOG.exception(ex)
current = greenthread.getcurrent()
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating

View File

@ -1,4 +1,3 @@
#
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#

View File

@ -1,4 +1,3 @@
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#

View File

@ -8,7 +8,6 @@ module=excutils
module=fixture
module=gettextutils
module=importutils
module=install_venv_common
module=jsonutils
module=local
module=log
@ -24,7 +23,6 @@ module=timeutils
module=uuidutils
module=config
module=strutils
module=py3kcompat
module=versionutils
module=test
module=crypto

View File

@ -11,6 +11,7 @@ oslo.config>=1.2.0
paramiko>=1.9.0
PasteDeploy>=1.5.0
pbr>=0.6,!=0.7,<1.0
posix_ipc
pycrypto>=2.6
python-ceilometerclient>=1.0.6
python-cinderclient>=1.0.6

View File

@ -1,15 +1,25 @@
#!/bin/bash
TEMPDIR=`mktemp -d /tmp/tmp.XXXXXXXXXX`
trap "rm -rf $TEMPDIR" EXIT
CFGFILE=heat.conf.sample
GENERATOR=tools/config/generate_sample.sh
mkdir -p $TEMPDIR/{a,b}/etc/heat
cp etc/heat/$CFGFILE $TEMPDIR/a/etc/heat
$GENERATOR -b ./ -p heat -o $TEMPDIR/b/etc/heat &> /dev/null
if [ ! -f "$TEMPDIR/b/etc/heat/$CFGFILE" ]; then
echo "Error: $CFGFILE can not be generated by $GENERATOR." 1>&2
exit 2
elif ! (cd $TEMPDIR; diff -Naur a b); then
echo "Error: $CFGFILE is not up to date, please run $GENERATOR." 1>&2
exit 42
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-heat}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

View File

@ -4,8 +4,8 @@ print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
--long help,base-dir:,package-name:,output-dir: -- "$@")
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
@ -21,6 +21,8 @@ while true; do
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
@ -38,6 +40,16 @@ while true; do
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
@ -53,7 +65,7 @@ then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
@ -77,12 +89,20 @@ find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
EXTRA_MODULES_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$EXTRA_MODULES_FILE"
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$EXTRA_MODULES_FILE"
source "$RC_FILE"
fi
for mod in ${HEAT_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${HEAT_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
@ -90,7 +110,7 @@ OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
DEFAULT_MODULEPATH=heat.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $FILES > $OUTPUTFILE
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)