Sync oslo incubator
Synchronize all olso modules, removing the ones that were removed that we don't use anymore. Change-Id: Idca29b32ef0561c9c436d5a692a02acbe79027b1
This commit is contained in:
parent
4f7da1b7fc
commit
aef33d2d71
@ -0,0 +1,17 @@
|
|||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
@ -1,5 +1,5 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 SINA Corporation
|
# Copyright 2012 SINA Corporation
|
||||||
|
# Copyright 2014 Cisco Systems, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import imp
|
import imp
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -28,6 +29,7 @@ import textwrap
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
import stevedore.named
|
||||||
|
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
@ -39,6 +41,7 @@ BOOLOPT = "BoolOpt"
|
|||||||
INTOPT = "IntOpt"
|
INTOPT = "IntOpt"
|
||||||
FLOATOPT = "FloatOpt"
|
FLOATOPT = "FloatOpt"
|
||||||
LISTOPT = "ListOpt"
|
LISTOPT = "ListOpt"
|
||||||
|
DICTOPT = "DictOpt"
|
||||||
MULTISTROPT = "MultiStrOpt"
|
MULTISTROPT = "MultiStrOpt"
|
||||||
|
|
||||||
OPT_TYPES = {
|
OPT_TYPES = {
|
||||||
@ -47,11 +50,12 @@ OPT_TYPES = {
|
|||||||
INTOPT: 'integer value',
|
INTOPT: 'integer value',
|
||||||
FLOATOPT: 'floating point value',
|
FLOATOPT: 'floating point value',
|
||||||
LISTOPT: 'list value',
|
LISTOPT: 'list value',
|
||||||
|
DICTOPT: 'dict value',
|
||||||
MULTISTROPT: 'multi valued',
|
MULTISTROPT: 'multi valued',
|
||||||
}
|
}
|
||||||
|
|
||||||
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||||
FLOATOPT, LISTOPT,
|
FLOATOPT, LISTOPT, DICTOPT,
|
||||||
MULTISTROPT]))
|
MULTISTROPT]))
|
||||||
|
|
||||||
PY_EXT = ".py"
|
PY_EXT = ".py"
|
||||||
@ -60,34 +64,60 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|||||||
WORDWRAP_WIDTH = 60
|
WORDWRAP_WIDTH = 60
|
||||||
|
|
||||||
|
|
||||||
def generate(srcfiles):
|
def raise_extension_exception(extmanager, ep, err):
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def generate(argv):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='generate sample configuration file',
|
||||||
|
)
|
||||||
|
parser.add_argument('-m', dest='modules', action='append')
|
||||||
|
parser.add_argument('-l', dest='libraries', action='append')
|
||||||
|
parser.add_argument('srcfiles', nargs='*')
|
||||||
|
parsed_args = parser.parse_args(argv)
|
||||||
|
|
||||||
mods_by_pkg = dict()
|
mods_by_pkg = dict()
|
||||||
for filepath in srcfiles:
|
for filepath in parsed_args.srcfiles:
|
||||||
pkg_name = filepath.split(os.sep)[1]
|
pkg_name = filepath.split(os.sep)[1]
|
||||||
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||||
os.path.basename(filepath).split('.')[0]])
|
os.path.basename(filepath).split('.')[0]])
|
||||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||||
# NOTE(lzyeval): place top level modules before packages
|
# NOTE(lzyeval): place top level modules before packages
|
||||||
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
|
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||||
pkg_names.sort()
|
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||||
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
|
|
||||||
ext_names.sort()
|
|
||||||
pkg_names.extend(ext_names)
|
pkg_names.extend(ext_names)
|
||||||
|
|
||||||
# opts_by_group is a mapping of group name to an options list
|
# opts_by_group is a mapping of group name to an options list
|
||||||
# The options list is a list of (module, options) tuples
|
# The options list is a list of (module, options) tuples
|
||||||
opts_by_group = {'DEFAULT': []}
|
opts_by_group = {'DEFAULT': []}
|
||||||
|
|
||||||
extra_modules = os.getenv("HEAT_CONFIG_GENERATOR_EXTRA_MODULES", "")
|
if parsed_args.modules:
|
||||||
if extra_modules:
|
for module_name in parsed_args.modules:
|
||||||
for module_name in extra_modules.split(','):
|
|
||||||
module_name = module_name.strip()
|
|
||||||
module = _import_module(module_name)
|
module = _import_module(module_name)
|
||||||
if module:
|
if module:
|
||||||
for group, opts in _list_opts(module):
|
for group, opts in _list_opts(module):
|
||||||
opts_by_group.setdefault(group, []).append((module_name,
|
opts_by_group.setdefault(group, []).append((module_name,
|
||||||
opts))
|
opts))
|
||||||
|
|
||||||
|
# Look for entry points defined in libraries (or applications) for
|
||||||
|
# option discovery, and include their return values in the output.
|
||||||
|
#
|
||||||
|
# Each entry point should be a function returning an iterable
|
||||||
|
# of pairs with the group name (or None for the default group)
|
||||||
|
# and the list of Opt instances for that group.
|
||||||
|
if parsed_args.libraries:
|
||||||
|
loader = stevedore.named.NamedExtensionManager(
|
||||||
|
'oslo.config.opts',
|
||||||
|
names=list(set(parsed_args.libraries)),
|
||||||
|
invoke_on_load=False,
|
||||||
|
on_load_failure_callback=raise_extension_exception
|
||||||
|
)
|
||||||
|
for ext in loader:
|
||||||
|
for group, opts in ext.plugin():
|
||||||
|
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
|
||||||
|
opt_list.append((ext.name, opts))
|
||||||
|
|
||||||
for pkg_name in pkg_names:
|
for pkg_name in pkg_names:
|
||||||
mods = mods_by_pkg.get(pkg_name)
|
mods = mods_by_pkg.get(pkg_name)
|
||||||
mods.sort()
|
mods.sort()
|
||||||
@ -121,8 +151,10 @@ def _import_module(mod_str):
|
|||||||
|
|
||||||
def _is_in_group(opt, group):
|
def _is_in_group(opt, group):
|
||||||
"Check if opt is in group."
|
"Check if opt is in group."
|
||||||
for key, value in group._opts.items():
|
for value in group._opts.values():
|
||||||
if value['opt'] == opt:
|
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||||
|
# newly released oslo.config support '==' operator.
|
||||||
|
if not(value['opt'] != opt):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -133,7 +165,7 @@ def _guess_groups(opt, mod_obj):
|
|||||||
return 'DEFAULT'
|
return 'DEFAULT'
|
||||||
|
|
||||||
# what other groups is it in?
|
# what other groups is it in?
|
||||||
for key, value in cfg.CONF.items():
|
for value in cfg.CONF.values():
|
||||||
if isinstance(value, cfg.CONF.GroupAttr):
|
if isinstance(value, cfg.CONF.GroupAttr):
|
||||||
if _is_in_group(opt, value._group):
|
if _is_in_group(opt, value._group):
|
||||||
return value._group.name
|
return value._group.name
|
||||||
@ -202,7 +234,7 @@ def _sanitize_default(name, value):
|
|||||||
return value.replace(BASEDIR, '')
|
return value.replace(BASEDIR, '')
|
||||||
elif value == _get_my_ip():
|
elif value == _get_my_ip():
|
||||||
return '10.0.0.1'
|
return '10.0.0.1'
|
||||||
elif value == socket.gethostname() and 'host' in name:
|
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
|
||||||
return 'heat'
|
return 'heat'
|
||||||
elif value.strip() != value:
|
elif value.strip() != value:
|
||||||
return '"%s"' % value
|
return '"%s"' % value
|
||||||
@ -220,7 +252,8 @@ def _print_opt(opt):
|
|||||||
except (ValueError, AttributeError) as err:
|
except (ValueError, AttributeError) as err:
|
||||||
sys.stderr.write("%s\n" % str(err))
|
sys.stderr.write("%s\n" % str(err))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
opt_help = u'%s (%s)' % (opt_help,
|
||||||
|
OPT_TYPES[opt_type])
|
||||||
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||||
if opt.deprecated_opts:
|
if opt.deprecated_opts:
|
||||||
for deprecated_opt in opt.deprecated_opts:
|
for deprecated_opt in opt.deprecated_opts:
|
||||||
@ -250,6 +283,11 @@ def _print_opt(opt):
|
|||||||
elif opt_type == LISTOPT:
|
elif opt_type == LISTOPT:
|
||||||
assert(isinstance(opt_default, list))
|
assert(isinstance(opt_default, list))
|
||||||
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
||||||
|
elif opt_type == DICTOPT:
|
||||||
|
assert(isinstance(opt_default, dict))
|
||||||
|
opt_default_strlist = [str(key) + ':' + str(value)
|
||||||
|
for (key, value) in opt_default.items()]
|
||||||
|
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||||
elif opt_type == MULTISTROPT:
|
elif opt_type == MULTISTROPT:
|
||||||
assert(isinstance(opt_default, list))
|
assert(isinstance(opt_default, list))
|
||||||
if not opt_default:
|
if not opt_default:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -26,7 +25,7 @@ import uuid
|
|||||||
|
|
||||||
|
|
||||||
def generate_request_id():
|
def generate_request_id():
|
||||||
return 'req-%s' % str(uuid.uuid4())
|
return b'req-' + str(uuid.uuid4()).encode('ascii')
|
||||||
|
|
||||||
|
|
||||||
class RequestContext(object):
|
class RequestContext(object):
|
||||||
@ -99,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
|
|||||||
return arg
|
return arg
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
# Copyright 2013 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@ -71,7 +70,7 @@ class ModelBase(six.Iterator):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
columns = dict(object_mapper(self).columns).keys()
|
columns = list(dict(object_mapper(self).columns).keys())
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
# property for an Instance.
|
# property for an Instance.
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Mirantis.inc
|
# Copyright 2013 Mirantis.inc
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -368,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
return [columns]
|
return [columns]
|
||||||
return columns[len(uniqbase):].split("0")[1:]
|
return columns[len(uniqbase):].split("0")[1:]
|
||||||
|
|
||||||
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
|
if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
|
||||||
return
|
return
|
||||||
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
@ -490,7 +489,7 @@ def _thread_yield(dbapi_con, con_record):
|
|||||||
|
|
||||||
|
|
||||||
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
||||||
"""Ensures that MySQL and DB2 connections are alive.
|
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.
|
||||||
|
|
||||||
Borrowed from:
|
Borrowed from:
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||||
@ -646,7 +645,7 @@ def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
|||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||||
|
|
||||||
if engine.name in ['mysql', 'ibm_db_sa']:
|
if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
|
||||||
ping_callback = functools.partial(_ping_listener, engine)
|
ping_callback = functools.partial(_ping_listener, engine)
|
||||||
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
||||||
if engine.name == 'mysql':
|
if engine.name == 'mysql':
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation
|
# Copyright 2010-2011 OpenStack Foundation
|
||||||
# Copyright 2012-2013 IBM Corp.
|
# Copyright 2012-2013 IBM Corp.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
# Copyright 2010-2011 OpenStack Foundation.
|
||||||
@ -255,6 +254,14 @@ def get_table(engine, name):
|
|||||||
|
|
||||||
Needed because the models don't work for us in migrations
|
Needed because the models don't work for us in migrations
|
||||||
as models will be far out of sync with the current data.
|
as models will be far out of sync with the current data.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Do not use this method when creating ForeignKeys in database migrations
|
||||||
|
because sqlalchemy needs the same MetaData object to hold information
|
||||||
|
about the parent table and the reference table in the ForeignKey. This
|
||||||
|
method uses a unique MetaData object per table object so it won't work
|
||||||
|
with ForeignKey creation.
|
||||||
"""
|
"""
|
||||||
metadata = MetaData()
|
metadata = MetaData()
|
||||||
metadata.bind = engine
|
metadata.bind = engine
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -30,7 +29,7 @@ import eventlet.backdoor
|
|||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LI
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
help_for_backdoor_port = (
|
help_for_backdoor_port = (
|
||||||
@ -138,8 +137,10 @@ def initialize_if_enabled():
|
|||||||
# In the case of backdoor port being zero, a port number is assigned by
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
# listen(). In any case, pull the port number out here.
|
# listen(). In any case, pull the port number out here.
|
||||||
port = sock.getsockname()[1]
|
port = sock.getsockname()[1]
|
||||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
LOG.info(
|
||||||
{'port': port, 'pid': os.getpid()})
|
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()}
|
||||||
|
)
|
||||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
locals=backdoor_locals)
|
locals=backdoor_locals)
|
||||||
return port
|
return port
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2012, Red Hat, Inc.
|
# Copyright 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -25,7 +24,7 @@ import traceback
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LE
|
||||||
|
|
||||||
|
|
||||||
class save_and_reraise_exception(object):
|
class save_and_reraise_exception(object):
|
||||||
@ -50,9 +49,22 @@ class save_and_reraise_exception(object):
|
|||||||
decide_if_need_reraise()
|
decide_if_need_reraise()
|
||||||
if not should_be_reraised:
|
if not should_be_reraised:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
|
|
||||||
|
If another exception occurs and reraise flag is False,
|
||||||
|
the saved exception will not be logged.
|
||||||
|
|
||||||
|
If the caller wants to raise new exception during exception handling
|
||||||
|
he/she sets reraise to False initially with an ability to set it back to
|
||||||
|
True if needed::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception(reraise=False) as ctxt:
|
||||||
|
[if statements to determine whether to raise a new exception]
|
||||||
|
# Not raising a new exception, so reraise
|
||||||
|
ctxt.reraise = True
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self, reraise=True):
|
||||||
self.reraise = True
|
self.reraise = reraise
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.type_, self.value, self.tb, = sys.exc_info()
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
@ -60,10 +72,11 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
if exc_type is not None:
|
if exc_type is not None:
|
||||||
logging.error(_('Original exception being dropped: %s'),
|
if self.reraise:
|
||||||
traceback.format_exception(self.type_,
|
logging.error(_LE('Original exception being dropped: %s'),
|
||||||
self.value,
|
traceback.format_exception(self.type_,
|
||||||
self.tb))
|
self.value,
|
||||||
|
self.tb))
|
||||||
return False
|
return False
|
||||||
if self.reraise:
|
if self.reraise:
|
||||||
six.reraise(self.type_, self.value, self.tb)
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
@ -89,8 +102,8 @@ def forever_retry_uncaught_exceptions(infunc):
|
|||||||
if (cur_time - last_log_time > 60 or
|
if (cur_time - last_log_time > 60 or
|
||||||
this_exc_message != last_exc_message):
|
this_exc_message != last_exc_message):
|
||||||
logging.exception(
|
logging.exception(
|
||||||
_('Unexpected exception occurred %d time(s)... '
|
_LE('Unexpected exception occurred %d time(s)... '
|
||||||
'retrying.') % exc_count)
|
'retrying.') % exc_count)
|
||||||
last_log_time = cur_time
|
last_log_time = cur_time
|
||||||
last_exc_message = this_exc_message
|
last_exc_message = this_exc_message
|
||||||
exc_count = 0
|
exc_count = 0
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -20,7 +19,6 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -60,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
|
|||||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
LOG.debug(_("Reloading cached file %s") % filename)
|
LOG.debug("Reloading cached file %s" % filename)
|
||||||
with open(filename) as fap:
|
with open(filename) as fap:
|
||||||
cache_info['data'] = fap.read()
|
cache_info['data'] = fap.read()
|
||||||
cache_info['mtime'] = mtime
|
cache_info['mtime'] = mtime
|
||||||
|
@ -21,16 +21,10 @@ import six
|
|||||||
|
|
||||||
|
|
||||||
class Config(fixtures.Fixture):
|
class Config(fixtures.Fixture):
|
||||||
"""Override some configuration values.
|
"""Allows overriding configuration settings for the test.
|
||||||
|
|
||||||
The keyword arguments are the names of configuration options to
|
`conf` will be reset on cleanup.
|
||||||
override and their values.
|
|
||||||
|
|
||||||
If a group argument is supplied, the overrides are applied to
|
|
||||||
the specified configuration option group.
|
|
||||||
|
|
||||||
All overrides are automatically cleared at the end of the current
|
|
||||||
test by the reset() method, which is registered by addCleanup().
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf=cfg.CONF):
|
def __init__(self, conf=cfg.CONF):
|
||||||
@ -38,9 +32,54 @@ class Config(fixtures.Fixture):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(Config, self).setUp()
|
super(Config, self).setUp()
|
||||||
|
# NOTE(morganfainberg): unregister must be added to cleanup before
|
||||||
|
# reset is because cleanup works in reverse order of registered items,
|
||||||
|
# and a reset must occur before unregistering options can occur.
|
||||||
|
self.addCleanup(self._unregister_config_opts)
|
||||||
self.addCleanup(self.conf.reset)
|
self.addCleanup(self.conf.reset)
|
||||||
|
self._registered_config_opts = {}
|
||||||
|
|
||||||
def config(self, **kw):
|
def config(self, **kw):
|
||||||
|
"""Override configuration values.
|
||||||
|
|
||||||
|
The keyword arguments are the names of configuration options to
|
||||||
|
override and their values.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, the overrides are applied to
|
||||||
|
the specified configuration option group, otherwise the overrides
|
||||||
|
are applied to the ``default`` group.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
group = kw.pop('group', None)
|
group = kw.pop('group', None)
|
||||||
for k, v in six.iteritems(kw):
|
for k, v in six.iteritems(kw):
|
||||||
self.conf.set_override(k, v, group)
|
self.conf.set_override(k, v, group)
|
||||||
|
|
||||||
|
def _unregister_config_opts(self):
|
||||||
|
for group in self._registered_config_opts:
|
||||||
|
self.conf.unregister_opts(self._registered_config_opts[group],
|
||||||
|
group=group)
|
||||||
|
|
||||||
|
def register_opt(self, opt, group=None):
|
||||||
|
"""Register a single option for the test run.
|
||||||
|
|
||||||
|
Options registered in this manner will automatically be unregistered
|
||||||
|
during cleanup.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, it will register the new option
|
||||||
|
to that group, otherwise the option is registered to the ``default``
|
||||||
|
group.
|
||||||
|
"""
|
||||||
|
self.conf.register_opt(opt, group=group)
|
||||||
|
self._registered_config_opts.setdefault(group, set()).add(opt)
|
||||||
|
|
||||||
|
def register_opts(self, opts, group=None):
|
||||||
|
"""Register multiple options for the test run.
|
||||||
|
|
||||||
|
This works in the same manner as register_opt() but takes a list of
|
||||||
|
options as the first argument. All arguments will be registered to the
|
||||||
|
same group if the ``group`` argument is supplied, otherwise all options
|
||||||
|
will be registered to the ``default`` group.
|
||||||
|
"""
|
||||||
|
for opt in opts:
|
||||||
|
self.register_opt(opt, group=group)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -49,4 +48,4 @@ class LockFixture(fixtures.Fixture):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LockFixture, self).setUp()
|
super(LockFixture, self).setUp()
|
||||||
self.addCleanup(self.mgr.__exit__, None, None, None)
|
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||||
self.mgr.__enter__()
|
self.lock = self.mgr.__enter__()
|
||||||
|
34
heat/openstack/common/fixture/logging.py
Normal file
34
heat/openstack/common/fixture/logging.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
|
||||||
|
def get_logging_handle_error_fixture():
|
||||||
|
"""returns a fixture to make logging raise formatting exceptions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
self.useFixture(logging.get_logging_handle_error_fixture())
|
||||||
|
"""
|
||||||
|
return fixtures.MonkeyPatch('logging.Handler.handleError',
|
||||||
|
_handleError)
|
||||||
|
|
||||||
|
|
||||||
|
def _handleError(self, record):
|
||||||
|
"""Monkey patch for logging.Handler.handleError.
|
||||||
|
|
||||||
|
The default handleError just logs the error to stderr but we want
|
||||||
|
the option of actually raising an exception.
|
||||||
|
"""
|
||||||
|
raise
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
@ -16,6 +15,17 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the heattest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
@ -23,14 +33,15 @@ import mock
|
|||||||
class PatchObject(fixtures.Fixture):
|
class PatchObject(fixtures.Fixture):
|
||||||
"""Deal with code around mock."""
|
"""Deal with code around mock."""
|
||||||
|
|
||||||
def __init__(self, obj, attr, **kwargs):
|
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
|
||||||
self.obj = obj
|
self.obj = obj
|
||||||
self.attr = attr
|
self.attr = attr
|
||||||
self.kwargs = kwargs
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(PatchObject, self).setUp()
|
super(PatchObject, self).setUp()
|
||||||
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
|
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
|
||||||
self.mock = _p.start()
|
self.mock = _p.start()
|
||||||
self.addCleanup(_p.stop)
|
self.addCleanup(_p.stop)
|
||||||
|
|
||||||
@ -39,12 +50,13 @@ class Patch(fixtures.Fixture):
|
|||||||
|
|
||||||
"""Deal with code around mock.patch."""
|
"""Deal with code around mock.patch."""
|
||||||
|
|
||||||
def __init__(self, obj, **kwargs):
|
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
|
||||||
self.obj = obj
|
self.obj = obj
|
||||||
self.kwargs = kwargs
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(Patch, self).setUp()
|
super(Patch, self).setUp()
|
||||||
_p = mock.patch(self.obj, **self.kwargs)
|
_p = mock.patch(self.obj, self.new, **self.kwargs)
|
||||||
self.mock = _p.start()
|
self.mock = _p.start()
|
||||||
self.addCleanup(_p.stop)
|
self.addCleanup(_p.stop)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
@ -16,8 +15,19 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the heattest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mox
|
from six.moves import mox
|
||||||
|
|
||||||
|
|
||||||
class MoxStubout(fixtures.Fixture):
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -29,7 +28,6 @@ import gettext
|
|||||||
import locale
|
import locale
|
||||||
from logging import handlers
|
from logging import handlers
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
|
|
||||||
from babel import localedata
|
from babel import localedata
|
||||||
import six
|
import six
|
||||||
@ -249,47 +247,22 @@ class Message(six.text_type):
|
|||||||
if other is None:
|
if other is None:
|
||||||
params = (other,)
|
params = (other,)
|
||||||
elif isinstance(other, dict):
|
elif isinstance(other, dict):
|
||||||
params = self._trim_dictionary_parameters(other)
|
# Merge the dictionaries
|
||||||
|
# Copy each item in case one does not support deep copy.
|
||||||
|
params = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
for key, val in self.params.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
|
for key, val in other.items():
|
||||||
|
params[key] = self._copy_param(val)
|
||||||
else:
|
else:
|
||||||
params = self._copy_param(other)
|
params = self._copy_param(other)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _trim_dictionary_parameters(self, dict_param):
|
|
||||||
"""Return a dict that only has matching entries in the msgid."""
|
|
||||||
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
|
||||||
# to avoid carrying a lot of unnecessary weight around in the message
|
|
||||||
# object, for example if someone passes in Message() % locals() but
|
|
||||||
# only some params are used, and additionally we prevent errors for
|
|
||||||
# non-deepcopyable objects by unicoding() them.
|
|
||||||
|
|
||||||
# Look for %(param) keys in msgid;
|
|
||||||
# Skip %% and deal with the case where % is first character on the line
|
|
||||||
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
|
||||||
|
|
||||||
# If we don't find any %(param) keys but have a %s
|
|
||||||
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
|
||||||
# Apparently the full dictionary is the parameter
|
|
||||||
params = self._copy_param(dict_param)
|
|
||||||
else:
|
|
||||||
params = {}
|
|
||||||
# Save our existing parameters as defaults to protect
|
|
||||||
# ourselves from losing values if we are called through an
|
|
||||||
# (erroneous) chain that builds a valid Message with
|
|
||||||
# arguments, and then does something like "msg % kwds"
|
|
||||||
# where kwds is an empty dictionary.
|
|
||||||
src = {}
|
|
||||||
if isinstance(self.params, dict):
|
|
||||||
src.update(self.params)
|
|
||||||
src.update(dict_param)
|
|
||||||
for key in keys:
|
|
||||||
params[key] = self._copy_param(src[key])
|
|
||||||
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _copy_param(self, param):
|
def _copy_param(self, param):
|
||||||
try:
|
try:
|
||||||
return copy.deepcopy(param)
|
return copy.deepcopy(param)
|
||||||
except TypeError:
|
except Exception:
|
||||||
# Fallback to casting to unicode this will handle the
|
# Fallback to casting to unicode this will handle the
|
||||||
# python code-like objects that can't be deep-copied
|
# python code-like objects that can't be deep-copied
|
||||||
return six.text_type(param)
|
return six.text_type(param)
|
||||||
@ -301,13 +274,14 @@ class Message(six.text_type):
|
|||||||
def __radd__(self, other):
|
def __radd__(self, other):
|
||||||
return self.__add__(other)
|
return self.__add__(other)
|
||||||
|
|
||||||
def __str__(self):
|
if six.PY2:
|
||||||
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
def __str__(self):
|
||||||
# and it expects specifically a UnicodeError in order to proceed.
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
msg = _('Message objects do not support str() because they may '
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
'contain non-ascii characters. '
|
msg = _('Message objects do not support str() because they may '
|
||||||
'Please use unicode() or translate() instead.')
|
'contain non-ascii characters. '
|
||||||
raise UnicodeError(msg)
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages(domain):
|
def get_available_languages(domain):
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -59,6 +58,13 @@ def import_module(import_str):
|
|||||||
return sys.modules[import_str]
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def import_versioned_module(version, submodule=None):
|
||||||
|
module = 'heat.v%s' % version
|
||||||
|
if submodule:
|
||||||
|
module = '.'.join((module, submodule))
|
||||||
|
return import_module(module)
|
||||||
|
|
||||||
|
|
||||||
def try_import(import_str, default=None):
|
def try_import(import_str, default=None):
|
||||||
"""Try to import a module and if it fails return default."""
|
"""Try to import a module and if it fails return default."""
|
||||||
try:
|
try:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -37,17 +36,9 @@ import functools
|
|||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
try:
|
|
||||||
import xmlrpclib
|
|
||||||
except ImportError:
|
|
||||||
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
|
||||||
# however the function and object call signatures
|
|
||||||
# remained the same. This whole try/except block should
|
|
||||||
# be removed and replaced with a call to six.moves once
|
|
||||||
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
|
||||||
import xmlrpc.client as xmlrpclib
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import six.moves.xmlrpc_client as xmlrpclib
|
||||||
|
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -16,6 +15,7 @@
|
|||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
|
import fcntl
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@ -29,7 +29,7 @@ import weakref
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common import fileutils
|
from heat.openstack.common import fileutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ util_opts = [
|
|||||||
help='Whether to disable inter-process locks'),
|
help='Whether to disable inter-process locks'),
|
||||||
cfg.StrOpt('lock_path',
|
cfg.StrOpt('lock_path',
|
||||||
default=os.environ.get("HEAT_LOCK_PATH"),
|
default=os.environ.get("HEAT_LOCK_PATH"),
|
||||||
help=('Directory to use for lock files.'))
|
help='Directory to use for lock files.')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ def set_defaults(lock_path):
|
|||||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||||
|
|
||||||
|
|
||||||
class _InterProcessLock(object):
|
class _FileLock(object):
|
||||||
"""Lock implementation which allows multiple locks, working around
|
"""Lock implementation which allows multiple locks, working around
|
||||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||||
not require any cleanup. Since the lock is always held on a file
|
not require any cleanup. Since the lock is always held on a file
|
||||||
@ -75,12 +75,12 @@ class _InterProcessLock(object):
|
|||||||
self.lockfile = None
|
self.lockfile = None
|
||||||
self.fname = name
|
self.fname = name
|
||||||
|
|
||||||
def __enter__(self):
|
def acquire(self):
|
||||||
basedir = os.path.dirname(self.fname)
|
basedir = os.path.dirname(self.fname)
|
||||||
|
|
||||||
if not os.path.exists(basedir):
|
if not os.path.exists(basedir):
|
||||||
fileutils.ensure_tree(basedir)
|
fileutils.ensure_tree(basedir)
|
||||||
LOG.info(_('Created lock path: %s'), basedir)
|
LOG.info(_LI('Created lock path: %s'), basedir)
|
||||||
|
|
||||||
self.lockfile = open(self.fname, 'w')
|
self.lockfile = open(self.fname, 'w')
|
||||||
|
|
||||||
@ -91,24 +91,40 @@ class _InterProcessLock(object):
|
|||||||
# Also upon reading the MSDN docs for locking(), it seems
|
# Also upon reading the MSDN docs for locking(), it seems
|
||||||
# to have a laughable 10 attempts "blocking" mechanism.
|
# to have a laughable 10 attempts "blocking" mechanism.
|
||||||
self.trylock()
|
self.trylock()
|
||||||
LOG.debug(_('Got file lock "%s"'), self.fname)
|
LOG.debug('Got file lock "%s"', self.fname)
|
||||||
return self
|
return True
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
# external locks synchronise things like iptables
|
# external locks synchronise things like iptables
|
||||||
# updates - give it some time to prevent busy spinning
|
# updates - give it some time to prevent busy spinning
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
else:
|
else:
|
||||||
raise
|
raise threading.ThreadError(_("Unable to acquire lock on"
|
||||||
|
" `%(filename)s` due to"
|
||||||
|
" %(exception)s") %
|
||||||
|
{
|
||||||
|
'filename': self.fname,
|
||||||
|
'exception': e,
|
||||||
|
})
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
try:
|
try:
|
||||||
self.unlock()
|
self.unlock()
|
||||||
self.lockfile.close()
|
self.lockfile.close()
|
||||||
|
LOG.debug('Released file lock "%s"', self.fname)
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
LOG.exception(_LE("Could not release the acquired lock `%s`"),
|
||||||
self.fname)
|
self.fname)
|
||||||
LOG.debug(_('Released file lock "%s"'), self.fname)
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
return os.path.exists(self.fname)
|
||||||
|
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
@ -117,7 +133,7 @@ class _InterProcessLock(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class _WindowsLock(_InterProcessLock):
|
class _WindowsLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||||
|
|
||||||
@ -125,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
|
|||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
|
||||||
|
|
||||||
class _PosixLock(_InterProcessLock):
|
class _FcntlLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
|
||||||
@ -133,35 +149,106 @@ class _PosixLock(_InterProcessLock):
|
|||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
class _PosixLock(object):
|
||||||
|
def __init__(self, name):
|
||||||
|
# Hash the name because it's not valid to have POSIX semaphore
|
||||||
|
# names with things like / in them. Then use base64 to encode
|
||||||
|
# the digest() instead taking the hexdigest() because the
|
||||||
|
# result is shorter and most systems can't have shm sempahore
|
||||||
|
# names longer than 31 characters.
|
||||||
|
h = hashlib.sha1()
|
||||||
|
h.update(name.encode('ascii'))
|
||||||
|
self.name = str((b'/' + base64.urlsafe_b64encode(
|
||||||
|
h.digest())).decode('ascii'))
|
||||||
|
|
||||||
|
def acquire(self, timeout=None):
|
||||||
|
self.semaphore = posix_ipc.Semaphore(self.name,
|
||||||
|
flags=posix_ipc.O_CREAT,
|
||||||
|
initial_value=1)
|
||||||
|
self.semaphore.acquire(timeout)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.semaphore.release()
|
||||||
|
self.semaphore.close()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
try:
|
||||||
|
semaphore = posix_ipc.Semaphore(self.name)
|
||||||
|
except posix_ipc.ExistentialError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
semaphore.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
import msvcrt
|
import msvcrt
|
||||||
InterProcessLock = _WindowsLock
|
InterProcessLock = _WindowsLock
|
||||||
|
FileLock = _WindowsLock
|
||||||
else:
|
else:
|
||||||
import fcntl
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import posix_ipc
|
||||||
InterProcessLock = _PosixLock
|
InterProcessLock = _PosixLock
|
||||||
|
FileLock = _FcntlLock
|
||||||
|
|
||||||
_semaphores = weakref.WeakValueDictionary()
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
_semaphores_lock = threading.Lock()
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
def external_lock(name, lock_file_prefix=None):
|
def _get_lock_path(name, lock_file_prefix, lock_path=None):
|
||||||
with internal_lock(name):
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
|
# separators
|
||||||
{'lock': name})
|
name = name.replace(os.sep, '_')
|
||||||
|
if lock_file_prefix:
|
||||||
|
sep = '' if lock_file_prefix.endswith('-') else '-'
|
||||||
|
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
# separators
|
|
||||||
name = name.replace(os.sep, '_')
|
|
||||||
if lock_file_prefix:
|
|
||||||
sep = '' if lock_file_prefix.endswith('-') else '-'
|
|
||||||
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
|
||||||
|
|
||||||
if not CONF.lock_path:
|
if not local_lock_path:
|
||||||
|
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
|
||||||
|
# unnecessarily raise the RequiredOptError below.
|
||||||
|
if InterProcessLock is not _PosixLock:
|
||||||
raise cfg.RequiredOptError('lock_path')
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
local_lock_path = 'posixlock:/'
|
||||||
|
|
||||||
lock_file_path = os.path.join(CONF.lock_path, name)
|
return os.path.join(local_lock_path, name)
|
||||||
|
|
||||||
return InterProcessLock(lock_file_path)
|
|
||||||
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
|
LOG.debug('Attempting to grab external lock "%(lock)s"',
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
|
||||||
|
|
||||||
|
# NOTE(bnemec): If an explicit lock_path was passed to us then it
|
||||||
|
# means the caller is relying on file-based locking behavior, so
|
||||||
|
# we can't use posix locks for those calls.
|
||||||
|
if lock_path:
|
||||||
|
return FileLock(lock_file_path)
|
||||||
|
return InterProcessLock(lock_file_path)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_external_lock_file(name, lock_file_prefix=None):
|
||||||
|
"""Remove a external lock file when it's not used anymore
|
||||||
|
This will be helpful when we have a lot of lock files
|
||||||
|
"""
|
||||||
|
with internal_lock(name):
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix)
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
except OSError:
|
||||||
|
LOG.info(_LI('Failed to remove file %(file)s'),
|
||||||
|
{'file': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
def internal_lock(name):
|
def internal_lock(name):
|
||||||
@ -172,12 +259,12 @@ def internal_lock(name):
|
|||||||
sem = threading.Semaphore()
|
sem = threading.Semaphore()
|
||||||
_semaphores[name] = sem
|
_semaphores[name] = sem
|
||||||
|
|
||||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
|
||||||
return sem
|
return sem
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def lock(name, lock_file_prefix=None, external=False):
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Context based lock
|
"""Context based lock
|
||||||
|
|
||||||
This function yields a `threading.Semaphore` instance (if we don't use
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
@ -192,15 +279,17 @@ def lock(name, lock_file_prefix=None, external=False):
|
|||||||
workers both run a a method decorated with @synchronized('mylock',
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
external=True), only one of them will execute at a time.
|
external=True), only one of them will execute at a time.
|
||||||
"""
|
"""
|
||||||
if external and not CONF.disable_process_locking:
|
int_lock = internal_lock(name)
|
||||||
lock = external_lock(name, lock_file_prefix)
|
with int_lock:
|
||||||
else:
|
if external and not CONF.disable_process_locking:
|
||||||
lock = internal_lock(name)
|
ext_lock = external_lock(name, lock_file_prefix, lock_path)
|
||||||
with lock:
|
with ext_lock:
|
||||||
yield lock
|
yield ext_lock
|
||||||
|
else:
|
||||||
|
yield int_lock
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False):
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Synchronization decorator.
|
"""Synchronization decorator.
|
||||||
|
|
||||||
Decorating a method like so::
|
Decorating a method like so::
|
||||||
@ -228,12 +317,12 @@ def synchronized(name, lock_file_prefix=None, external=False):
|
|||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
def inner(*args, **kwargs):
|
def inner(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
with lock(name, lock_file_prefix, external):
|
with lock(name, lock_file_prefix, external, lock_path):
|
||||||
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
LOG.debug('Got semaphore / lock "%(function)s"',
|
||||||
{'function': f.__name__})
|
{'function': f.__name__})
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
finally:
|
finally:
|
||||||
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
LOG.debug('Semaphore / lock released "%(function)s"',
|
||||||
{'function': f.__name__})
|
{'function': f.__name__})
|
||||||
return inner
|
return inner
|
||||||
return wrap
|
return wrap
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@ -16,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Openstack logging handler.
|
"""OpenStack logging handler.
|
||||||
|
|
||||||
This module adds to logging functionality by adding the option to specify
|
This module adds to logging functionality by adding the option to specify
|
||||||
a context object when calling the various log methods. If the context object
|
a context object when calling the various log methods. If the context object
|
||||||
@ -116,10 +115,21 @@ logging_cli_opts = [
|
|||||||
'--log-file paths'),
|
'--log-file paths'),
|
||||||
cfg.BoolOpt('use-syslog',
|
cfg.BoolOpt('use-syslog',
|
||||||
default=False,
|
default=False,
|
||||||
help='Use syslog for logging.'),
|
help='Use syslog for logging. '
|
||||||
|
'Existing syslog format is DEPRECATED during I, '
|
||||||
|
'and then will be changed in J to honor RFC5424'),
|
||||||
|
cfg.BoolOpt('use-syslog-rfc-format',
|
||||||
|
# TODO(bogdando) remove or use True after existing
|
||||||
|
# syslog format deprecation in J
|
||||||
|
default=False,
|
||||||
|
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||||
|
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||||
|
'MSG part of the syslog message. The old format '
|
||||||
|
'without APP-NAME is deprecated in I, '
|
||||||
|
'and will be removed in J.'),
|
||||||
cfg.StrOpt('syslog-log-facility',
|
cfg.StrOpt('syslog-log-facility',
|
||||||
default='LOG_USER',
|
default='LOG_USER',
|
||||||
help='syslog facility to receive log lines')
|
help='Syslog facility to receive log lines')
|
||||||
]
|
]
|
||||||
|
|
||||||
generic_log_opts = [
|
generic_log_opts = [
|
||||||
@ -133,18 +143,18 @@ log_opts = [
|
|||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [%(request_id)s %(user_identity)s] '
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
'%(instance)s%(message)s',
|
'%(instance)s%(message)s',
|
||||||
help='format string to use for log messages with context'),
|
help='Format string to use for log messages with context'),
|
||||||
cfg.StrOpt('logging_default_format_string',
|
cfg.StrOpt('logging_default_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [-] %(instance)s%(message)s',
|
'%(name)s [-] %(instance)s%(message)s',
|
||||||
help='format string to use for log messages without context'),
|
help='Format string to use for log messages without context'),
|
||||||
cfg.StrOpt('logging_debug_format_suffix',
|
cfg.StrOpt('logging_debug_format_suffix',
|
||||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||||
help='data to append to log format when level is DEBUG'),
|
help='Data to append to log format when level is DEBUG'),
|
||||||
cfg.StrOpt('logging_exception_prefix',
|
cfg.StrOpt('logging_exception_prefix',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||||
'%(instance)s',
|
'%(instance)s',
|
||||||
help='prefix each line of exception output with this format'),
|
help='Prefix each line of exception output with this format'),
|
||||||
cfg.ListOpt('default_log_levels',
|
cfg.ListOpt('default_log_levels',
|
||||||
default=[
|
default=[
|
||||||
'amqp=WARN',
|
'amqp=WARN',
|
||||||
@ -153,15 +163,17 @@ log_opts = [
|
|||||||
'qpid=WARN',
|
'qpid=WARN',
|
||||||
'sqlalchemy=WARN',
|
'sqlalchemy=WARN',
|
||||||
'suds=INFO',
|
'suds=INFO',
|
||||||
|
'oslo.messaging=INFO',
|
||||||
'iso8601=WARN',
|
'iso8601=WARN',
|
||||||
|
'requests.packages.urllib3.connectionpool=WARN'
|
||||||
],
|
],
|
||||||
help='list of logger=LEVEL pairs'),
|
help='List of logger=LEVEL pairs'),
|
||||||
cfg.BoolOpt('publish_errors',
|
cfg.BoolOpt('publish_errors',
|
||||||
default=False,
|
default=False,
|
||||||
help='publish error events'),
|
help='Publish error events'),
|
||||||
cfg.BoolOpt('fatal_deprecations',
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
default=False,
|
default=False,
|
||||||
help='make deprecations fatal'),
|
help='Make deprecations fatal'),
|
||||||
|
|
||||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||||
# a full instance (and could include more information), and other times we
|
# a full instance (and could include more information), and other times we
|
||||||
@ -293,18 +305,39 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.project = project_name
|
self.project = project_name
|
||||||
self.version = version_string
|
self.version = version_string
|
||||||
|
self._deprecated_messages_sent = dict()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def handlers(self):
|
def handlers(self):
|
||||||
return self.logger.handlers
|
return self.logger.handlers
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
"""Call this method when a deprecated feature is used.
|
||||||
|
|
||||||
|
If the system is configured for fatal deprecations then the message
|
||||||
|
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||||
|
be raised.
|
||||||
|
|
||||||
|
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||||
|
|
||||||
|
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||||
|
fatal deprecations.
|
||||||
|
|
||||||
|
"""
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
if CONF.fatal_deprecations:
|
if CONF.fatal_deprecations:
|
||||||
self.critical(stdmsg, *args, **kwargs)
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
raise DeprecatedConfig(msg=stdmsg)
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
else:
|
|
||||||
self.warn(stdmsg, *args, **kwargs)
|
# Using a list because a tuple with dict can't be stored in a set.
|
||||||
|
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||||
|
|
||||||
|
if args in sent_args:
|
||||||
|
# Already logged this message, so don't log it again.
|
||||||
|
return
|
||||||
|
|
||||||
|
sent_args.append(args)
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
def process(self, msg, kwargs):
|
||||||
# NOTE(mrodden): catch any Message/other object and
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
@ -325,7 +358,7 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
extra.update(_dictify_context(context))
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
instance = kwargs.pop('instance', None)
|
||||||
instance_uuid = (extra.get('instance_uuid', None) or
|
instance_uuid = (extra.get('instance_uuid') or
|
||||||
kwargs.pop('instance_uuid', None))
|
kwargs.pop('instance_uuid', None))
|
||||||
instance_extra = ''
|
instance_extra = ''
|
||||||
if instance:
|
if instance:
|
||||||
@ -421,12 +454,12 @@ def _load_log_config(log_config_append):
|
|||||||
raise LogConfigError(log_config_append, str(exc))
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name):
|
def setup(product_name, version='unknown'):
|
||||||
"""Setup logging."""
|
"""Setup logging."""
|
||||||
if CONF.log_config_append:
|
if CONF.log_config_append:
|
||||||
_load_log_config(CONF.log_config_append)
|
_load_log_config(CONF.log_config_append)
|
||||||
else:
|
else:
|
||||||
_setup_logging_from_conf()
|
_setup_logging_from_conf(product_name, version)
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
|
|
||||||
|
|
||||||
@ -460,15 +493,32 @@ def _find_facility_from_conf():
|
|||||||
return facility
|
return facility
|
||||||
|
|
||||||
|
|
||||||
def _setup_logging_from_conf():
|
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.binary_name = _get_binary_name()
|
||||||
|
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
msg = super(RFCSysLogHandler, self).format(record)
|
||||||
|
msg = self.binary_name + ' ' + msg
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging_from_conf(project, version):
|
||||||
log_root = getLogger(None).logger
|
log_root = getLogger(None).logger
|
||||||
for handler in log_root.handlers:
|
for handler in log_root.handlers:
|
||||||
log_root.removeHandler(handler)
|
log_root.removeHandler(handler)
|
||||||
|
|
||||||
if CONF.use_syslog:
|
if CONF.use_syslog:
|
||||||
facility = _find_facility_from_conf()
|
facility = _find_facility_from_conf()
|
||||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||||
facility=facility)
|
# after existing syslog format deprecation in J
|
||||||
|
if CONF.use_syslog_rfc_format:
|
||||||
|
syslog = RFCSysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
else:
|
||||||
|
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
log_root.addHandler(syslog)
|
log_root.addHandler(syslog)
|
||||||
|
|
||||||
logpath = _get_log_file_path()
|
logpath = _get_log_file_path()
|
||||||
@ -502,7 +552,9 @@ def _setup_logging_from_conf():
|
|||||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
'be removed in the next release')
|
'be removed in the next release')
|
||||||
else:
|
else:
|
||||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
handler.setFormatter(ContextFormatter(project=project,
|
||||||
|
version=version,
|
||||||
|
datefmt=datefmt))
|
||||||
|
|
||||||
if CONF.debug:
|
if CONF.debug:
|
||||||
log_root.setLevel(logging.DEBUG)
|
log_root.setLevel(logging.DEBUG)
|
||||||
@ -560,18 +612,50 @@ class ContextFormatter(logging.Formatter):
|
|||||||
For information about what variables are available for the formatter see:
|
For information about what variables are available for the formatter see:
|
||||||
http://docs.python.org/library/logging.html#formatter
|
http://docs.python.org/library/logging.html#formatter
|
||||||
|
|
||||||
|
If available, uses the context value stored in TLS - local.store.context
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Initialize ContextFormatter instance
|
||||||
|
|
||||||
|
Takes additional keyword arguments which can be used in the message
|
||||||
|
format string.
|
||||||
|
|
||||||
|
:keyword project: project name
|
||||||
|
:type project: string
|
||||||
|
:keyword version: project version
|
||||||
|
:type version: string
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.project = kwargs.pop('project', 'unknown')
|
||||||
|
self.version = kwargs.pop('version', 'unknown')
|
||||||
|
|
||||||
|
logging.Formatter.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
def format(self, record):
|
def format(self, record):
|
||||||
"""Uses contextstring if request_id is set, otherwise default."""
|
"""Uses contextstring if request_id is set, otherwise default."""
|
||||||
|
|
||||||
|
# store project info
|
||||||
|
record.project = self.project
|
||||||
|
record.version = self.version
|
||||||
|
|
||||||
|
# store request info
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
d = _dictify_context(context)
|
||||||
|
for k, v in d.items():
|
||||||
|
setattr(record, k, v)
|
||||||
|
|
||||||
# NOTE(sdague): default the fancier formatting params
|
# NOTE(sdague): default the fancier formatting params
|
||||||
# to an empty string so we don't throw an exception if
|
# to an empty string so we don't throw an exception if
|
||||||
# they get used
|
# they get used
|
||||||
for key in ('instance', 'color'):
|
for key in ('instance', 'color', 'user_identity'):
|
||||||
if key not in record.__dict__:
|
if key not in record.__dict__:
|
||||||
record.__dict__[key] = ''
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
if record.__dict__.get('request_id', None):
|
if record.__dict__.get('request_id'):
|
||||||
self._fmt = CONF.logging_context_format_string
|
self._fmt = CONF.logging_context_format_string
|
||||||
else:
|
else:
|
||||||
self._fmt = CONF.logging_default_format_string
|
self._fmt = CONF.logging_default_format_string
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -28,4 +27,4 @@ class PublishErrorsHandler(logging.Handler):
|
|||||||
notifier.api.notify(None, 'error.publisher',
|
notifier.api.notify(None, 'error.publisher',
|
||||||
'error_notification',
|
'error_notification',
|
||||||
notifier.api.ERROR,
|
notifier.api.ERROR,
|
||||||
dict(error=record.msg))
|
dict(error=record.getMessage()))
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -21,7 +20,7 @@ import sys
|
|||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LE, _LW
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import timeutils
|
from heat.openstack.common import timeutils
|
||||||
|
|
||||||
@ -80,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
|||||||
break
|
break
|
||||||
delay = interval - timeutils.delta_seconds(start, end)
|
delay = interval - timeutils.delta_seconds(start, end)
|
||||||
if delay <= 0:
|
if delay <= 0:
|
||||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
LOG.warn(_LW('task run outlasted interval by %s sec') %
|
||||||
-delay)
|
-delay)
|
||||||
greenthread.sleep(delay if delay > 0 else 0)
|
greenthread.sleep(delay if delay > 0 else 0)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in fixed duration looping call'))
|
LOG.exception(_LE('in fixed duration looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -127,14 +126,14 @@ class DynamicLoopingCall(LoopingCallBase):
|
|||||||
|
|
||||||
if periodic_interval_max is not None:
|
if periodic_interval_max is not None:
|
||||||
idle = min(idle, periodic_interval_max)
|
idle = min(idle, periodic_interval_max)
|
||||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
LOG.debug('Dynamic looping call sleeping for %.02f '
|
||||||
'seconds'), idle)
|
'seconds', idle)
|
||||||
greenthread.sleep(idle)
|
greenthread.sleep(idle)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in dynamic looping call'))
|
LOG.exception(_LE('in dynamic looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
# Copyright 2012 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -18,7 +17,17 @@
|
|||||||
Network-related utilities and helper functions.
|
Network-related utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from heat.openstack.common.py3kcompat import urlutils
|
# TODO(jd) Use six.moves once
|
||||||
|
# https://bitbucket.org/gutworth/six/pull-request/28
|
||||||
|
# is merged
|
||||||
|
try:
|
||||||
|
import urllib.parse
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
except ImportError:
|
||||||
|
import urlparse
|
||||||
|
SplitResult = urlparse.SplitResult
|
||||||
|
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
|
|
||||||
def parse_host_port(address, default_port=None):
|
def parse_host_port(address, default_port=None):
|
||||||
@ -65,16 +74,35 @@ def parse_host_port(address, default_port=None):
|
|||||||
return (host, None if port is None else int(port))
|
return (host, None if port is None else int(port))
|
||||||
|
|
||||||
|
|
||||||
|
class ModifiedSplitResult(SplitResult):
|
||||||
|
"""Split results class for urlsplit."""
|
||||||
|
|
||||||
|
# NOTE(dims): The functions below are needed for Python 2.6.x.
|
||||||
|
# We can remove these when we drop support for 2.6.x.
|
||||||
|
@property
|
||||||
|
def hostname(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return host
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
netloc = self.netloc.split('@', 1)[-1]
|
||||||
|
host, port = parse_host_port(netloc)
|
||||||
|
return port
|
||||||
|
|
||||||
|
|
||||||
def urlsplit(url, scheme='', allow_fragments=True):
|
def urlsplit(url, scheme='', allow_fragments=True):
|
||||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||||
This function papers over Python issue9374 when needed.
|
This function papers over Python issue9374 when needed.
|
||||||
|
|
||||||
The parameters are the same as urlparse.urlsplit.
|
The parameters are the same as urlparse.urlsplit.
|
||||||
"""
|
"""
|
||||||
scheme, netloc, path, query, fragment = urlutils.urlsplit(
|
scheme, netloc, path, query, fragment = parse.urlsplit(
|
||||||
url, scheme, allow_fragments)
|
url, scheme, allow_fragments)
|
||||||
if allow_fragments and '#' in path:
|
if allow_fragments and '#' in path:
|
||||||
path, fragment = path.split('#', 1)
|
path, fragment = path.split('#', 1)
|
||||||
if '?' in path:
|
if '?' in path:
|
||||||
path, query = path.split('?', 1)
|
path, query = path.split('?', 1)
|
||||||
return urlutils.SplitResult(scheme, netloc, path, query, fragment)
|
return ModifiedSplitResult(scheme, netloc,
|
||||||
|
path, query, fragment)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -20,7 +19,7 @@ import uuid
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common import context
|
from heat.openstack.common import context
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
from heat.openstack.common import jsonutils
|
from heat.openstack.common import jsonutils
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
@ -143,9 +142,9 @@ def notify(context, publisher_id, event_type, priority, payload):
|
|||||||
try:
|
try:
|
||||||
driver.notify(context, msg)
|
driver.notify(context, msg)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
LOG.exception(_LE("Problem '%(e)s' attempting to "
|
||||||
"send to notification system. "
|
"send to notification system. "
|
||||||
"Payload=%(payload)s")
|
"Payload=%(payload)s")
|
||||||
% dict(e=e, payload=payload))
|
% dict(e=e, payload=payload))
|
||||||
|
|
||||||
|
|
||||||
@ -162,8 +161,8 @@ def _get_drivers():
|
|||||||
driver = importutils.import_module(notification_driver)
|
driver = importutils.import_module(notification_driver)
|
||||||
_drivers[notification_driver] = driver
|
_drivers[notification_driver] = driver
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.exception(_("Failed to load notifier %s. "
|
LOG.exception(_LE("Failed to load notifier %s. "
|
||||||
"These notifications will not be sent.") %
|
"These notifications will not be sent.") %
|
||||||
notification_driver)
|
notification_driver)
|
||||||
return _drivers.values()
|
return _drivers.values()
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
# Copyright 2013 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -14,10 +13,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A temporary helper which emulates heat.messaging.Notifier.
|
A temporary helper which emulates oslo.messaging.Notifier.
|
||||||
|
|
||||||
This helper method allows us to do the tedious porting to the new Notifier API
|
This helper method allows us to do the tedious porting to the new Notifier API
|
||||||
as a standalone commit so that the commit which switches us to heat.messaging
|
as a standalone commit so that the commit which switches us to oslo.messaging
|
||||||
is smaller and easier to review. This file will be removed as part of that
|
is smaller and easier to review. This file will be removed as part of that
|
||||||
commit.
|
commit.
|
||||||
"""
|
"""
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -17,7 +16,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common import context as req_context
|
from heat.openstack.common import context as req_context
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LE
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import rpc
|
from heat.openstack.common import rpc
|
||||||
|
|
||||||
@ -43,6 +42,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message)
|
rpc.notify(context, topic, message)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"),
|
"Payload=%(message)s"),
|
||||||
{"topic": topic, "message": message})
|
{"topic": topic, "message": message})
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -19,7 +18,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common import context as req_context
|
from heat.openstack.common import context as req_context
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LE
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import rpc
|
from heat.openstack.common import rpc
|
||||||
|
|
||||||
@ -49,6 +48,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message, envelope=True)
|
rpc.notify(context, topic, message, envelope=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"),
|
"Payload=%(message)s"),
|
||||||
{"topic": topic, "message": message})
|
{"topic": topic, "message": message})
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -47,6 +46,27 @@ policy rule::
|
|||||||
|
|
||||||
project_id:%(project_id)s and not role:dunce
|
project_id:%(project_id)s and not role:dunce
|
||||||
|
|
||||||
|
It is possible to perform policy checks on the following user
|
||||||
|
attributes (obtained through the token): user_id, domain_id or
|
||||||
|
project_id::
|
||||||
|
|
||||||
|
domain_id:<some_value>
|
||||||
|
|
||||||
|
Attributes sent along with API calls can be used by the policy engine
|
||||||
|
(on the right side of the expression), by using the following syntax::
|
||||||
|
|
||||||
|
<some_value>:user.id
|
||||||
|
|
||||||
|
Contextual attributes of objects identified by their IDs are loaded
|
||||||
|
from the database. They are also available to the policy engine and
|
||||||
|
can be checked through the `target` keyword::
|
||||||
|
|
||||||
|
<some_value>:target.role.name
|
||||||
|
|
||||||
|
All these attributes (related to users, API calls, and context) can be
|
||||||
|
checked against each other or against constants, be it literals (True,
|
||||||
|
<a_number>) or strings.
|
||||||
|
|
||||||
Finally, two special policy checks should be mentioned; the policy
|
Finally, two special policy checks should be mentioned; the policy
|
||||||
check "@" will always accept an access, and the policy check "!" will
|
check "@" will always accept an access, and the policy check "!" will
|
||||||
always reject an access. (Note that if a rule is either the empty
|
always reject an access. (Note that if a rule is either the empty
|
||||||
@ -56,16 +76,18 @@ as it allows particular rules to be explicitly disabled.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
import ast
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
import six.moves.urllib.parse as urlparse
|
||||||
|
import six.moves.urllib.request as urlrequest
|
||||||
|
|
||||||
from heat.openstack.common import fileutils
|
from heat.openstack.common import fileutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE
|
||||||
from heat.openstack.common import jsonutils
|
from heat.openstack.common import jsonutils
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.py3kcompat import urlutils
|
|
||||||
|
|
||||||
|
|
||||||
policy_opts = [
|
policy_opts = [
|
||||||
@ -119,11 +141,16 @@ class Rules(dict):
|
|||||||
|
|
||||||
# If the default rule isn't actually defined, do something
|
# If the default rule isn't actually defined, do something
|
||||||
# reasonably intelligent
|
# reasonably intelligent
|
||||||
if not self.default_rule or self.default_rule not in self:
|
if not self.default_rule:
|
||||||
raise KeyError(key)
|
raise KeyError(key)
|
||||||
|
|
||||||
if isinstance(self.default_rule, BaseCheck):
|
if isinstance(self.default_rule, BaseCheck):
|
||||||
return self.default_rule
|
return self.default_rule
|
||||||
|
|
||||||
|
# We need to check this or we can get infinite recursion
|
||||||
|
if self.default_rule not in self:
|
||||||
|
raise KeyError(key)
|
||||||
|
|
||||||
elif isinstance(self.default_rule, six.string_types):
|
elif isinstance(self.default_rule, six.string_types):
|
||||||
return self[self.default_rule]
|
return self[self.default_rule]
|
||||||
|
|
||||||
@ -155,27 +182,31 @@ class Enforcer(object):
|
|||||||
is called this will be overwritten.
|
is called this will be overwritten.
|
||||||
:param default_rule: Default rule to use, CONF.default_rule will
|
:param default_rule: Default rule to use, CONF.default_rule will
|
||||||
be used if none is specified.
|
be used if none is specified.
|
||||||
|
:param use_conf: Whether to load rules from cache or config file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, policy_file=None, rules=None, default_rule=None):
|
def __init__(self, policy_file=None, rules=None,
|
||||||
|
default_rule=None, use_conf=True):
|
||||||
self.rules = Rules(rules, default_rule)
|
self.rules = Rules(rules, default_rule)
|
||||||
self.default_rule = default_rule or CONF.policy_default_rule
|
self.default_rule = default_rule or CONF.policy_default_rule
|
||||||
|
|
||||||
self.policy_path = None
|
self.policy_path = None
|
||||||
self.policy_file = policy_file or CONF.policy_file
|
self.policy_file = policy_file or CONF.policy_file
|
||||||
|
self.use_conf = use_conf
|
||||||
|
|
||||||
def set_rules(self, rules, overwrite=True):
|
def set_rules(self, rules, overwrite=True, use_conf=False):
|
||||||
"""Create a new Rules object based on the provided dict of rules.
|
"""Create a new Rules object based on the provided dict of rules.
|
||||||
|
|
||||||
:param rules: New rules to use. It should be an instance of dict.
|
:param rules: New rules to use. It should be an instance of dict.
|
||||||
:param overwrite: Whether to overwrite current rules or update them
|
:param overwrite: Whether to overwrite current rules or update them
|
||||||
with the new rules.
|
with the new rules.
|
||||||
|
:param use_conf: Whether to reload rules from cache or config file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not isinstance(rules, dict):
|
if not isinstance(rules, dict):
|
||||||
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||||
"got %s instead") % type(rules))
|
"got %s instead") % type(rules))
|
||||||
|
self.use_conf = use_conf
|
||||||
if overwrite:
|
if overwrite:
|
||||||
self.rules = Rules(rules, self.default_rule)
|
self.rules = Rules(rules, self.default_rule)
|
||||||
else:
|
else:
|
||||||
@ -195,15 +226,19 @@ class Enforcer(object):
|
|||||||
:param force_reload: Whether to overwrite current rules.
|
:param force_reload: Whether to overwrite current rules.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not self.policy_path:
|
if force_reload:
|
||||||
self.policy_path = self._get_policy_path()
|
self.use_conf = force_reload
|
||||||
|
|
||||||
reloaded, data = fileutils.read_cached_file(self.policy_path,
|
if self.use_conf:
|
||||||
force_reload=force_reload)
|
if not self.policy_path:
|
||||||
if reloaded or not self.rules:
|
self.policy_path = self._get_policy_path()
|
||||||
rules = Rules.load_json(data, self.default_rule)
|
|
||||||
self.set_rules(rules)
|
reloaded, data = fileutils.read_cached_file(
|
||||||
LOG.debug(_("Rules successfully reloaded"))
|
self.policy_path, force_reload=force_reload)
|
||||||
|
if reloaded or not self.rules:
|
||||||
|
rules = Rules.load_json(data, self.default_rule)
|
||||||
|
self.set_rules(rules)
|
||||||
|
LOG.debug("Rules successfully reloaded")
|
||||||
|
|
||||||
def _get_policy_path(self):
|
def _get_policy_path(self):
|
||||||
"""Locate the policy json data file.
|
"""Locate the policy json data file.
|
||||||
@ -249,7 +284,7 @@ class Enforcer(object):
|
|||||||
|
|
||||||
# NOTE(flaper87): Not logging target or creds to avoid
|
# NOTE(flaper87): Not logging target or creds to avoid
|
||||||
# potential security issues.
|
# potential security issues.
|
||||||
LOG.debug(_("Rule %s will be now enforced") % rule)
|
LOG.debug("Rule %s will be now enforced" % rule)
|
||||||
|
|
||||||
self.load_rules()
|
self.load_rules()
|
||||||
|
|
||||||
@ -264,7 +299,7 @@ class Enforcer(object):
|
|||||||
# Evaluate the rule
|
# Evaluate the rule
|
||||||
result = self.rules[rule](target, creds, self)
|
result = self.rules[rule](target, creds, self)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.debug(_("Rule [%s] doesn't exist") % rule)
|
LOG.debug("Rule [%s] doesn't exist" % rule)
|
||||||
# If the rule doesn't exist, fail closed
|
# If the rule doesn't exist, fail closed
|
||||||
result = False
|
result = False
|
||||||
|
|
||||||
@ -472,7 +507,7 @@ def _parse_check(rule):
|
|||||||
try:
|
try:
|
||||||
kind, match = rule.split(':', 1)
|
kind, match = rule.split(':', 1)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to understand rule %s") % rule)
|
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
||||||
# If the rule is invalid, we'll fail closed
|
# If the rule is invalid, we'll fail closed
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
|
|
||||||
@ -482,7 +517,7 @@ def _parse_check(rule):
|
|||||||
elif None in _checks:
|
elif None in _checks:
|
||||||
return _checks[None](kind, match)
|
return _checks[None](kind, match)
|
||||||
else:
|
else:
|
||||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
LOG.error(_LE("No handler for matches of kind %s") % kind)
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
|
|
||||||
|
|
||||||
@ -752,7 +787,7 @@ def _parse_text_rule(rule):
|
|||||||
return state.result
|
return state.result
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Couldn't parse the rule
|
# Couldn't parse the rule
|
||||||
LOG.exception(_("Failed to understand rule %r") % rule)
|
LOG.exception(_LE("Failed to understand rule %r") % rule)
|
||||||
|
|
||||||
# Fail closed
|
# Fail closed
|
||||||
return FalseCheck()
|
return FalseCheck()
|
||||||
@ -825,8 +860,8 @@ class HttpCheck(Check):
|
|||||||
url = ('http:' + self.match) % target
|
url = ('http:' + self.match) % target
|
||||||
data = {'target': jsonutils.dumps(target),
|
data = {'target': jsonutils.dumps(target),
|
||||||
'credentials': jsonutils.dumps(creds)}
|
'credentials': jsonutils.dumps(creds)}
|
||||||
post_data = urlutils.urlencode(data)
|
post_data = urlparse.urlencode(data)
|
||||||
f = urlutils.urlopen(url, post_data)
|
f = urlrequest.urlopen(url, post_data)
|
||||||
return f.read() == "True"
|
return f.read() == "True"
|
||||||
|
|
||||||
|
|
||||||
@ -839,6 +874,8 @@ class GenericCheck(Check):
|
|||||||
|
|
||||||
tenant:%(tenant_id)s
|
tenant:%(tenant_id)s
|
||||||
role:compute:admin
|
role:compute:admin
|
||||||
|
True:%(user.enabled)s
|
||||||
|
'Member':%(role.name)s
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO(termie): do dict inspection via dot syntax
|
# TODO(termie): do dict inspection via dot syntax
|
||||||
@ -849,6 +886,12 @@ class GenericCheck(Check):
|
|||||||
# present in Target return false
|
# present in Target return false
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.kind in creds:
|
try:
|
||||||
return match == six.text_type(creds[self.kind])
|
# Try to interpret self.kind as a literal
|
||||||
return False
|
leftval = ast.literal_eval(self.kind)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
leftval = creds[self.kind]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
return match == six.text_type(leftval)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -38,7 +37,7 @@ import six
|
|||||||
|
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE
|
||||||
from heat.openstack.common import local
|
from heat.openstack.common import local
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
from heat.openstack.common.rpc import common as rpc_common
|
||||||
@ -73,7 +72,7 @@ class Pool(pools.Pool):
|
|||||||
|
|
||||||
# TODO(comstud): Timeout connections not used in a while
|
# TODO(comstud): Timeout connections not used in a while
|
||||||
def create(self):
|
def create(self):
|
||||||
LOG.debug(_('Pool creating new connection'))
|
LOG.debug('Pool creating new connection')
|
||||||
return self.connection_cls(self.conf)
|
return self.connection_cls(self.conf)
|
||||||
|
|
||||||
def empty(self):
|
def empty(self):
|
||||||
@ -175,7 +174,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
ack_on_error)
|
ack_on_error)
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
self.connection.consume_in_thread()
|
return self.connection.consume_in_thread()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""Proxy all other calls to the Connection instance."""
|
"""Proxy all other calls to the Connection instance."""
|
||||||
@ -288,7 +287,7 @@ def unpack_context(conf, msg):
|
|||||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||||
context_dict['conf'] = conf
|
context_dict['conf'] = conf
|
||||||
ctx = RpcContext.from_dict(context_dict)
|
ctx = RpcContext.from_dict(context_dict)
|
||||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
|
||||||
return ctx
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
@ -340,7 +339,7 @@ def _add_unique_id(msg):
|
|||||||
"""Add unique_id for checking duplicate messages."""
|
"""Add unique_id for checking duplicate messages."""
|
||||||
unique_id = uuid.uuid4().hex
|
unique_id = uuid.uuid4().hex
|
||||||
msg.update({UNIQUE_ID: unique_id})
|
msg.update({UNIQUE_ID: unique_id})
|
||||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
|
||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
class _ThreadPoolWithWait(object):
|
||||||
@ -433,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# the previous context is stored in local.store.context
|
# the previous context is stored in local.store.context
|
||||||
if hasattr(local.store, 'context'):
|
if hasattr(local.store, 'context'):
|
||||||
del local.store.context
|
del local.store.context
|
||||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
|
||||||
self.msg_id_cache.check_duplicate_message(message_data)
|
self.msg_id_cache.check_duplicate_message(message_data)
|
||||||
ctxt = unpack_context(self.conf, message_data)
|
ctxt = unpack_context(self.conf, message_data)
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
@ -470,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# This final None tells multicall that it is done.
|
# This final None tells multicall that it is done.
|
||||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
LOG.debug('Expected exception during message handling (%s)' %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
ctxt.reply(None, e._exc_info,
|
ctxt.reply(None, e._exc_info,
|
||||||
connection_pool=self.connection_pool,
|
connection_pool=self.connection_pool,
|
||||||
@ -478,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# sys.exc_info() is deleted by LOG.exception().
|
# sys.exc_info() is deleted by LOG.exception().
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.error(_('Exception during message handling'),
|
LOG.error(_LE('Exception during message handling'),
|
||||||
exc_info=exc_info)
|
exc_info=exc_info)
|
||||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
||||||
|
|
||||||
@ -552,10 +551,10 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
|||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||||
"""Make a call that returns multiple times."""
|
"""Make a call that returns multiple times."""
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
LOG.debug('Making synchronous call on %s ...', topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
LOG.debug('MSG_ID is %s' % (msg_id))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
|
|
||||||
@ -581,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
|
|||||||
|
|
||||||
def cast(conf, context, topic, msg, connection_pool):
|
def cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
"""Sends a message on a topic without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
LOG.debug('Making asynchronous cast on %s...', topic)
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -590,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
|
|||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
LOG.debug('Making asynchronous fanout cast...')
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -618,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
|||||||
|
|
||||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||||
"""Sends a notification event on a topic."""
|
"""Sends a notification event on a topic."""
|
||||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
LOG.debug('Sending %(event_type)s on %(topic)s',
|
||||||
dict(event_type=msg.get('event_type'),
|
dict(event_type=msg.get('event_type'),
|
||||||
topic=topic))
|
topic=topic))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -23,7 +22,7 @@ import traceback
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
from heat.openstack.common import jsonutils
|
from heat.openstack.common import jsonutils
|
||||||
from heat.openstack.common import local
|
from heat.openstack.common import local
|
||||||
@ -86,7 +85,7 @@ class RPCException(Exception):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_('Exception in string format operation'))
|
LOG.exception(_LE('Exception in string format operation'))
|
||||||
for name, value in six.iteritems(kwargs):
|
for name, value in six.iteritems(kwargs):
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
@ -290,7 +289,7 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
|||||||
tb = traceback.format_exception(*failure_info)
|
tb = traceback.format_exception(*failure_info)
|
||||||
failure = failure_info[1]
|
failure = failure_info[1]
|
||||||
if log_failure:
|
if log_failure:
|
||||||
LOG.error(_("Returning exception %s to caller"),
|
LOG.error(_LE("Returning exception %s to caller"),
|
||||||
six.text_type(failure))
|
six.text_type(failure))
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -141,8 +140,8 @@ def multicall(conf, context, topic, msg, timeout=None):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
consumer = CONSUMERS[topic][0]
|
consumer = CONSUMERS[topic][0]
|
||||||
@ -186,8 +185,8 @@ def fanout_cast(conf, context, topic, msg):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
for consumer in CONSUMERS.get(topic, []):
|
||||||
try:
|
try:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -30,7 +29,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from heat.openstack.common import network_utils
|
from heat.openstack.common import network_utils
|
||||||
from heat.openstack.common.rpc import amqp as rpc_amqp
|
from heat.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
from heat.openstack.common.rpc import common as rpc_common
|
||||||
@ -39,9 +38,9 @@ from heat.openstack.common import sslutils
|
|||||||
kombu_opts = [
|
kombu_opts = [
|
||||||
cfg.StrOpt('kombu_ssl_version',
|
cfg.StrOpt('kombu_ssl_version',
|
||||||
default='',
|
default='',
|
||||||
help='SSL version to use (valid only if SSL enabled). '
|
help='If SSL is enabled, the SSL version to use. Valid '
|
||||||
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
|
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
|
||||||
'be available on some distributions'
|
'be available on some distributions.'
|
||||||
),
|
),
|
||||||
cfg.StrOpt('kombu_ssl_keyfile',
|
cfg.StrOpt('kombu_ssl_keyfile',
|
||||||
default='',
|
default='',
|
||||||
@ -51,8 +50,8 @@ kombu_opts = [
|
|||||||
help='SSL cert file (valid only if SSL enabled)'),
|
help='SSL cert file (valid only if SSL enabled)'),
|
||||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
cfg.StrOpt('kombu_ssl_ca_certs',
|
||||||
default='',
|
default='',
|
||||||
help=('SSL certification authority file '
|
help='SSL certification authority file '
|
||||||
'(valid only if SSL enabled)')),
|
'(valid only if SSL enabled)'),
|
||||||
cfg.StrOpt('rabbit_host',
|
cfg.StrOpt('rabbit_host',
|
||||||
default='localhost',
|
default='localhost',
|
||||||
help='The RabbitMQ broker address where a single node is used'),
|
help='The RabbitMQ broker address where a single node is used'),
|
||||||
@ -64,33 +63,33 @@ kombu_opts = [
|
|||||||
help='RabbitMQ HA cluster host:port pairs'),
|
help='RabbitMQ HA cluster host:port pairs'),
|
||||||
cfg.BoolOpt('rabbit_use_ssl',
|
cfg.BoolOpt('rabbit_use_ssl',
|
||||||
default=False,
|
default=False,
|
||||||
help='connect over SSL for RabbitMQ'),
|
help='Connect over SSL for RabbitMQ'),
|
||||||
cfg.StrOpt('rabbit_userid',
|
cfg.StrOpt('rabbit_userid',
|
||||||
default='guest',
|
default='guest',
|
||||||
help='the RabbitMQ userid'),
|
help='The RabbitMQ userid'),
|
||||||
cfg.StrOpt('rabbit_password',
|
cfg.StrOpt('rabbit_password',
|
||||||
default='guest',
|
default='guest',
|
||||||
help='the RabbitMQ password',
|
help='The RabbitMQ password',
|
||||||
secret=True),
|
secret=True),
|
||||||
cfg.StrOpt('rabbit_virtual_host',
|
cfg.StrOpt('rabbit_virtual_host',
|
||||||
default='/',
|
default='/',
|
||||||
help='the RabbitMQ virtual host'),
|
help='The RabbitMQ virtual host'),
|
||||||
cfg.IntOpt('rabbit_retry_interval',
|
cfg.IntOpt('rabbit_retry_interval',
|
||||||
default=1,
|
default=1,
|
||||||
help='how frequently to retry connecting with RabbitMQ'),
|
help='How frequently to retry connecting with RabbitMQ'),
|
||||||
cfg.IntOpt('rabbit_retry_backoff',
|
cfg.IntOpt('rabbit_retry_backoff',
|
||||||
default=2,
|
default=2,
|
||||||
help='how long to backoff for between retries when connecting '
|
help='How long to backoff for between retries when connecting '
|
||||||
'to RabbitMQ'),
|
'to RabbitMQ'),
|
||||||
cfg.IntOpt('rabbit_max_retries',
|
cfg.IntOpt('rabbit_max_retries',
|
||||||
default=0,
|
default=0,
|
||||||
help='maximum retries with trying to connect to RabbitMQ '
|
help='Maximum number of RabbitMQ connection retries. '
|
||||||
'(the default of 0 implies an infinite retry count)'),
|
'Default is 0 (infinite retry count)'),
|
||||||
cfg.BoolOpt('rabbit_ha_queues',
|
cfg.BoolOpt('rabbit_ha_queues',
|
||||||
default=False,
|
default=False,
|
||||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
|
||||||
'You need to wipe RabbitMQ database when '
|
'If you change this option, you must wipe the '
|
||||||
'changing this option.'),
|
'RabbitMQ database.'),
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -154,12 +153,12 @@ class ConsumerBase(object):
|
|||||||
callback(msg)
|
callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
if self.ack_on_error:
|
if self.ack_on_error:
|
||||||
LOG.exception(_("Failed to process message"
|
LOG.exception(_LE("Failed to process message"
|
||||||
" ... skipping it."))
|
" ... skipping it."))
|
||||||
message.ack()
|
message.ack()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_("Failed to process message"
|
LOG.exception(_LE("Failed to process message"
|
||||||
" ... will requeue."))
|
" ... will requeue."))
|
||||||
message.requeue()
|
message.requeue()
|
||||||
else:
|
else:
|
||||||
message.ack()
|
message.ack()
|
||||||
@ -496,7 +495,7 @@ class Connection(object):
|
|||||||
be handled by the caller.
|
be handled by the caller.
|
||||||
"""
|
"""
|
||||||
if self.connection:
|
if self.connection:
|
||||||
LOG.info(_("Reconnecting to AMQP server on "
|
LOG.info(_LI("Reconnecting to AMQP server on "
|
||||||
"%(hostname)s:%(port)d") % params)
|
"%(hostname)s:%(port)d") % params)
|
||||||
try:
|
try:
|
||||||
self.connection.release()
|
self.connection.release()
|
||||||
@ -518,7 +517,7 @@ class Connection(object):
|
|||||||
self.channel._new_queue('ae.undeliver')
|
self.channel._new_queue('ae.undeliver')
|
||||||
for consumer in self.consumers:
|
for consumer in self.consumers:
|
||||||
consumer.reconnect(self.channel)
|
consumer.reconnect(self.channel)
|
||||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||||
params)
|
params)
|
||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
@ -569,9 +568,9 @@ class Connection(object):
|
|||||||
sleep_time = min(sleep_time, self.interval_max)
|
sleep_time = min(sleep_time, self.interval_max)
|
||||||
|
|
||||||
log_info['sleep_time'] = sleep_time
|
log_info['sleep_time'] = sleep_time
|
||||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
|
||||||
'unreachable: %(err_str)s. Trying again in '
|
'unreachable: %(err_str)s. Trying again in '
|
||||||
'%(sleep_time)d seconds.') % log_info)
|
'%(sleep_time)d seconds.') % log_info)
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
@ -623,7 +622,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
@ -641,11 +640,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, socket.timeout):
|
if isinstance(exc, socket.timeout):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
info['do_consume'] = True
|
info['do_consume'] = True
|
||||||
|
|
||||||
@ -684,7 +683,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publish():
|
def _publish():
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -24,7 +23,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
from heat.openstack.common import jsonutils
|
from heat.openstack.common import jsonutils
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
@ -189,7 +188,7 @@ class ConsumerBase(object):
|
|||||||
msg = rpc_common.deserialize_msg(message.content)
|
msg = rpc_common.deserialize_msg(message.content)
|
||||||
self.callback(msg)
|
self.callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
LOG.exception(_LE("Failed to process message... skipping it."))
|
||||||
finally:
|
finally:
|
||||||
# TODO(sandy): Need support for optional ack_on_error.
|
# TODO(sandy): Need support for optional ack_on_error.
|
||||||
self.session.acknowledge(message)
|
self.session.acknowledge(message)
|
||||||
@ -505,7 +504,7 @@ class Connection(object):
|
|||||||
if self.connection.opened():
|
if self.connection.opened():
|
||||||
try:
|
try:
|
||||||
self.connection.close()
|
self.connection.close()
|
||||||
except qpid_exceptions.ConnectionError:
|
except qpid_exceptions.MessagingError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
broker = self.brokers[next(self.next_broker_indices)]
|
broker = self.brokers[next(self.next_broker_indices)]
|
||||||
@ -513,15 +512,15 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
self.connection_create(broker)
|
self.connection_create(broker)
|
||||||
self.connection.open()
|
self.connection.open()
|
||||||
except qpid_exceptions.ConnectionError as e:
|
except qpid_exceptions.MessagingError as e:
|
||||||
msg_dict = dict(e=e, delay=delay)
|
msg_dict = dict(e=e, delay=delay)
|
||||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
msg = _LE("Unable to connect to AMQP server: %(e)s. "
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
"Sleeping %(delay)s seconds") % msg_dict
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
delay = min(2 * delay, 60)
|
delay = min(delay + 1, 5)
|
||||||
else:
|
else:
|
||||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
LOG.info(_LI('Connected to AMQP server on %s'), broker)
|
||||||
break
|
break
|
||||||
|
|
||||||
self.session = self.connection.session()
|
self.session = self.connection.session()
|
||||||
@ -534,14 +533,14 @@ class Connection(object):
|
|||||||
consumer.reconnect(self.session)
|
consumer.reconnect(self.session)
|
||||||
self._register_consumer(consumer)
|
self._register_consumer(consumer)
|
||||||
|
|
||||||
LOG.debug(_("Re-established AMQP queues"))
|
LOG.debug("Re-established AMQP queues")
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return method(*args, **kwargs)
|
return method(*args, **kwargs)
|
||||||
except (qpid_exceptions.Empty,
|
except (qpid_exceptions.Empty,
|
||||||
qpid_exceptions.ConnectionError) as e:
|
qpid_exceptions.MessagingError) as e:
|
||||||
if error_callback:
|
if error_callback:
|
||||||
error_callback(e)
|
error_callback(e)
|
||||||
self.reconnect()
|
self.reconnect()
|
||||||
@ -573,7 +572,7 @@ class Connection(object):
|
|||||||
"""
|
"""
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
@ -588,11 +587,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
if isinstance(exc, qpid_exceptions.Empty):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
|
|
||||||
def _consume():
|
def _consume():
|
||||||
@ -600,7 +599,7 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
self._lookup_consumer(nxt_receiver).consume()
|
self._lookup_consumer(nxt_receiver).consume()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Error processing message. Skipping it."))
|
LOG.exception(_LE("Error processing message. Skipping it."))
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
for iteration in itertools.count(0):
|
||||||
if limit and iteration >= limit:
|
if limit and iteration >= limit:
|
||||||
@ -627,7 +626,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publisher_send():
|
def _publisher_send():
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -28,7 +27,7 @@ import six
|
|||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
from heat.openstack.common import jsonutils
|
from heat.openstack.common import jsonutils
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
from heat.openstack.common.rpc import common as rpc_common
|
||||||
@ -94,12 +93,12 @@ def _serialize(data):
|
|||||||
return jsonutils.dumps(data, ensure_ascii=True)
|
return jsonutils.dumps(data, ensure_ascii=True)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("JSON serialization failed."))
|
LOG.error(_LE("JSON serialization failed."))
|
||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
def _deserialize(data):
|
||||||
"""Deserialization wrapper."""
|
"""Deserialization wrapper."""
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
LOG.debug("Deserializing: %s", data)
|
||||||
return jsonutils.loads(data)
|
return jsonutils.loads(data)
|
||||||
|
|
||||||
|
|
||||||
@ -134,9 +133,9 @@ class ZmqSocket(object):
|
|||||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||||
'subscribe': subscribe, 'bind': bind}
|
'subscribe': subscribe, 'bind': bind}
|
||||||
|
|
||||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
|
||||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
|
||||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
LOG.debug("-> bind: %(bind)s", str_data)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if bind:
|
if bind:
|
||||||
@ -156,7 +155,7 @@ class ZmqSocket(object):
|
|||||||
"""Subscribe."""
|
"""Subscribe."""
|
||||||
if not self.can_sub:
|
if not self.can_sub:
|
||||||
raise RPCException("Cannot subscribe on this socket.")
|
raise RPCException("Cannot subscribe on this socket.")
|
||||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
LOG.debug("Subscribing to %s", msg_filter)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||||
@ -193,7 +192,7 @@ class ZmqSocket(object):
|
|||||||
# it would be much worse if some of the code calling this
|
# it would be much worse if some of the code calling this
|
||||||
# were to fail. For now, lets log, and later evaluate
|
# were to fail. For now, lets log, and later evaluate
|
||||||
# if we can safely raise here.
|
# if we can safely raise here.
|
||||||
LOG.error(_("ZeroMQ socket could not be closed."))
|
LOG.error(_LE("ZeroMQ socket could not be closed."))
|
||||||
self.sock = None
|
self.sock = None
|
||||||
|
|
||||||
def recv(self, **kwargs):
|
def recv(self, **kwargs):
|
||||||
@ -265,7 +264,7 @@ class InternalContext(object):
|
|||||||
|
|
||||||
def _get_response(self, ctx, proxy, topic, data):
|
def _get_response(self, ctx, proxy, topic, data):
|
||||||
"""Process a curried message and cast the result to topic."""
|
"""Process a curried message and cast the result to topic."""
|
||||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
LOG.debug("Running func with context: %s", ctx.to_dict())
|
||||||
data.setdefault('version', None)
|
data.setdefault('version', None)
|
||||||
data.setdefault('args', {})
|
data.setdefault('args', {})
|
||||||
|
|
||||||
@ -278,13 +277,13 @@ class InternalContext(object):
|
|||||||
# ignore these since they are just from shutdowns
|
# ignore these since they are just from shutdowns
|
||||||
pass
|
pass
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
LOG.debug("Expected exception during message handling (%s)" %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(e._exc_info,
|
rpc_common.serialize_remote_exception(e._exc_info,
|
||||||
log_failure=False)}
|
log_failure=False)}
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Exception during message handling"))
|
LOG.error(_LE("Exception during message handling"))
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||||
|
|
||||||
@ -303,7 +302,7 @@ class InternalContext(object):
|
|||||||
self._get_response(ctx, proxy, topic, payload),
|
self._get_response(ctx, proxy, topic, payload),
|
||||||
ctx.replies)
|
ctx.replies)
|
||||||
|
|
||||||
LOG.debug(_("Sending reply"))
|
LOG.debug("Sending reply")
|
||||||
_multi_send(_cast, ctx, topic, {
|
_multi_send(_cast, ctx, topic, {
|
||||||
'method': '-process_reply',
|
'method': '-process_reply',
|
||||||
'args': {
|
'args': {
|
||||||
@ -337,7 +336,7 @@ class ConsumerBase(object):
|
|||||||
# processed internally. (non-valid method name)
|
# processed internally. (non-valid method name)
|
||||||
method = data.get('method')
|
method = data.get('method')
|
||||||
if not method:
|
if not method:
|
||||||
LOG.error(_("RPC message did not include method."))
|
LOG.error(_LE("RPC message did not include method."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Internal method
|
# Internal method
|
||||||
@ -369,7 +368,7 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
def register(self, proxy, in_addr, zmq_type_in,
|
def register(self, proxy, in_addr, zmq_type_in,
|
||||||
in_bind=True, subscribe=None):
|
in_bind=True, subscribe=None):
|
||||||
|
|
||||||
LOG.info(_("Registering reactor"))
|
LOG.info(_LI("Registering reactor"))
|
||||||
|
|
||||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||||
raise RPCException("Bad input socktype")
|
raise RPCException("Bad input socktype")
|
||||||
@ -381,12 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
self.proxies[inq] = proxy
|
self.proxies[inq] = proxy
|
||||||
self.sockets.append(inq)
|
self.sockets.append(inq)
|
||||||
|
|
||||||
LOG.info(_("In reactor registered"))
|
LOG.info(_LI("In reactor registered"))
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consume(sock):
|
def _consume(sock):
|
||||||
LOG.info(_("Consuming socket"))
|
LOG.info(_LI("Consuming socket"))
|
||||||
while True:
|
while True:
|
||||||
self.consume(sock)
|
self.consume(sock)
|
||||||
|
|
||||||
@ -436,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
if topic not in self.topic_proxy:
|
if topic not in self.topic_proxy:
|
||||||
def publisher(waiter):
|
def publisher(waiter):
|
||||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
LOG.info(_LI("Creating proxy for topic: %s"), topic)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# The topic is received over the network,
|
# The topic is received over the network,
|
||||||
@ -474,14 +473,14 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
try:
|
try:
|
||||||
wait_sock_creation.wait()
|
wait_sock_creation.wait()
|
||||||
except RPCException:
|
except RPCException:
|
||||||
LOG.error(_("Topic socket file creation failed."))
|
LOG.error(_LE("Topic socket file creation failed."))
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
self.topic_proxy[topic].put_nowait(data)
|
||||||
except eventlet.queue.Full:
|
except eventlet.queue.Full:
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
LOG.error(_LE("Local per-topic backlog buffer full for topic "
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Runs the ZmqProxy service."""
|
"""Runs the ZmqProxy service."""
|
||||||
@ -496,8 +495,8 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except os.error:
|
except os.error:
|
||||||
if not os.path.isdir(ipc_dir):
|
if not os.path.isdir(ipc_dir):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Required IPC directory does not exist at"
|
LOG.error(_LE("Required IPC directory does not exist at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
try:
|
try:
|
||||||
self.register(consumption_proxy,
|
self.register(consumption_proxy,
|
||||||
consume_in,
|
consume_in,
|
||||||
@ -505,11 +504,11 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except zmq.ZMQError:
|
except zmq.ZMQError:
|
||||||
if os.access(ipc_dir, os.X_OK):
|
if os.access(ipc_dir, os.X_OK):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Permission denied to IPC directory at"
|
LOG.error(_LE("Permission denied to IPC directory at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
|
||||||
"Socket may already be in use."))
|
"Socket may already be in use."))
|
||||||
|
|
||||||
super(ZmqProxy, self).consume_in_thread()
|
super(ZmqProxy, self).consume_in_thread()
|
||||||
|
|
||||||
@ -542,7 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
def consume(self, sock):
|
def consume(self, sock):
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||||
data = sock.recv()
|
data = sock.recv()
|
||||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
proxy = self.proxies[sock]
|
||||||
|
|
||||||
@ -561,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
# Unmarshal only after verifying the message.
|
# Unmarshal only after verifying the message.
|
||||||
ctx = RpcContext.unmarshal(data[3])
|
ctx = RpcContext.unmarshal(data[3])
|
||||||
else:
|
else:
|
||||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
|
||||||
return
|
return
|
||||||
|
|
||||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
self.pool.spawn_n(self.process, proxy, ctx, request)
|
||||||
@ -589,14 +588,14 @@ class Connection(rpc_common.Connection):
|
|||||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
||||||
|
|
||||||
if topic in self.topics:
|
if topic in self.topics:
|
||||||
LOG.info(_("Skipping topic registration. Already registered."))
|
LOG.info(_LI("Skipping topic registration. Already registered."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Receive messages from (local) proxy
|
# Receive messages from (local) proxy
|
||||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||||
(CONF.rpc_zmq_ipc_dir, topic)
|
(CONF.rpc_zmq_ipc_dir, topic)
|
||||||
|
|
||||||
LOG.debug(_("Consumer is a zmq.%s"),
|
LOG.debug("Consumer is a zmq.%s",
|
||||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||||
|
|
||||||
self.reactor.register(proxy, inaddr, sock_type,
|
self.reactor.register(proxy, inaddr, sock_type,
|
||||||
@ -648,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
# Replies always come into the reply service.
|
# Replies always come into the reply service.
|
||||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||||
|
|
||||||
LOG.debug(_("Creating payload"))
|
LOG.debug("Creating payload")
|
||||||
# Curry the original request into a reply method.
|
# Curry the original request into a reply method.
|
||||||
mcontext = RpcContext.marshal(context)
|
mcontext = RpcContext.marshal(context)
|
||||||
payload = {
|
payload = {
|
||||||
@ -661,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
LOG.debug("Creating queue socket for reply waiter")
|
||||||
|
|
||||||
# Messages arriving async.
|
# Messages arriving async.
|
||||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||||
@ -674,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
zmq.SUB, subscribe=msg_id, bind=False
|
zmq.SUB, subscribe=msg_id, bind=False
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.debug(_("Sending cast"))
|
LOG.debug("Sending cast")
|
||||||
_cast(addr, context, topic, payload, envelope)
|
_cast(addr, context, topic, payload, envelope)
|
||||||
|
|
||||||
LOG.debug(_("Cast sent; Waiting reply"))
|
LOG.debug("Cast sent; Waiting reply")
|
||||||
# Blocks until receives reply
|
# Blocks until receives reply
|
||||||
msg = msg_waiter.recv()
|
msg = msg_waiter.recv()
|
||||||
LOG.debug(_("Received message: %s"), msg)
|
LOG.debug("Received message: %s", msg)
|
||||||
LOG.debug(_("Unpacking response"))
|
LOG.debug("Unpacking response")
|
||||||
|
|
||||||
if msg[2] == 'cast': # Legacy version
|
if msg[2] == 'cast': # Legacy version
|
||||||
raw_msg = _deserialize(msg[-1])[-1]
|
raw_msg = _deserialize(msg[-1])[-1]
|
||||||
@ -720,10 +719,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
|||||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||||
"""
|
"""
|
||||||
conf = CONF
|
conf = CONF
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||||
|
|
||||||
queues = _get_matchmaker().queues(topic)
|
queues = _get_matchmaker().queues(topic)
|
||||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
LOG.debug("Sending message(s) to: %s", queues)
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
# Don't stack if we have no matchmaker results
|
||||||
if not queues:
|
if not queues:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -23,7 +22,7 @@ import contextlib
|
|||||||
import eventlet
|
import eventlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _, _LI
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -214,7 +213,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.hosts.discard(host)
|
self.hosts.discard(host)
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
self.backend_unregister(key, '.'.join((key, host)))
|
||||||
|
|
||||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||||
{'key': key, 'host': host})
|
{'key': key, 'host': host})
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 Cloudscaling Group, Inc
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
# Copyright 2011-2013 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -23,7 +22,7 @@ import json
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LW
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.rpc import matchmaker as mm
|
from heat.openstack.common.rpc import matchmaker as mm
|
||||||
|
|
||||||
@ -54,9 +53,8 @@ class RingExchange(mm.Exchange):
|
|||||||
if ring:
|
if ring:
|
||||||
self.ring = ring
|
self.ring = ring
|
||||||
else:
|
else:
|
||||||
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
|
||||||
self.ring = json.load(fh)
|
self.ring = json.load(fh)
|
||||||
fh.close()
|
|
||||||
|
|
||||||
self.ring0 = {}
|
self.ring0 = {}
|
||||||
for k in self.ring.keys():
|
for k in self.ring.keys():
|
||||||
@ -74,8 +72,8 @@ class RoundRobinRingExchange(RingExchange):
|
|||||||
def run(self, key):
|
def run(self, key):
|
||||||
if not self._ring_has(key):
|
if not self._ring_has(key):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (key, )
|
"see ringfile") % (key, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
host = next(self.ring0[key])
|
host = next(self.ring0[key])
|
||||||
@ -92,8 +90,8 @@ class FanoutRingExchange(RingExchange):
|
|||||||
nkey = key.split('fanout~')[1:][0]
|
nkey = key.split('fanout~')[1:][0]
|
||||||
if not self._ring_has(nkey):
|
if not self._ring_has(nkey):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (nkey, )
|
"see ringfile") % (nkey, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2012-2013 Red Hat, Inc.
|
# Copyright 2012-2013 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -16,7 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import rpc
|
from heat.openstack.common import rpc
|
||||||
from heat.openstack.common.rpc import dispatcher as rpc_dispatcher
|
from heat.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||||
@ -45,7 +43,7 @@ class Service(service.Service):
|
|||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
self.conn = rpc.create_connection(new=True)
|
self.conn = rpc.create_connection(new=True)
|
||||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
LOG.debug("Creating Consumer connection for Service %s" %
|
||||||
self.topic)
|
self.topic)
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -24,7 +23,6 @@ import os
|
|||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import threading
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -36,12 +34,14 @@ except ImportError:
|
|||||||
UnsupportedOperation = None
|
UnsupportedOperation = None
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
from eventlet import event
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.openstack.common import eventlet_backdoor
|
from heat.openstack.common import eventlet_backdoor
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _LE, _LI, _LW
|
||||||
from heat.openstack.common import importutils
|
from heat.openstack.common import importutils
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
from heat.openstack.common import systemd
|
||||||
from heat.openstack.common import threadgroup
|
from heat.openstack.common import threadgroup
|
||||||
|
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ class ServiceLauncher(Launcher):
|
|||||||
status = None
|
status = None
|
||||||
signo = 0
|
signo = 0
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -173,7 +173,7 @@ class ServiceLauncher(Launcher):
|
|||||||
super(ServiceLauncher, self).wait()
|
super(ServiceLauncher, self).wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = _signo_to_signame(exc.signo)
|
signame = _signo_to_signame(exc.signo)
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
status = exc.code
|
status = exc.code
|
||||||
signo = exc.signo
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
@ -185,7 +185,7 @@ class ServiceLauncher(Launcher):
|
|||||||
rpc.cleanup()
|
rpc.cleanup()
|
||||||
except Exception:
|
except Exception:
|
||||||
# We're shutting down, so it doesn't matter at this point.
|
# We're shutting down, so it doesn't matter at this point.
|
||||||
LOG.exception(_('Exception during rpc cleanup.'))
|
LOG.exception(_LE('Exception during rpc cleanup.'))
|
||||||
|
|
||||||
return status, signo
|
return status, signo
|
||||||
|
|
||||||
@ -236,7 +236,7 @@ class ProcessLauncher(object):
|
|||||||
# dies unexpectedly
|
# dies unexpectedly
|
||||||
self.readpipe.read()
|
self.readpipe.read()
|
||||||
|
|
||||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -267,13 +267,13 @@ class ProcessLauncher(object):
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = _signo_to_signame(exc.signo)
|
signame = _signo_to_signame(exc.signo)
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
status = exc.code
|
status = exc.code
|
||||||
signo = exc.signo
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
status = exc.code
|
status = exc.code
|
||||||
except BaseException:
|
except BaseException:
|
||||||
LOG.exception(_('Unhandled exception'))
|
LOG.exception(_LE('Unhandled exception'))
|
||||||
status = 2
|
status = 2
|
||||||
finally:
|
finally:
|
||||||
launcher.stop()
|
launcher.stop()
|
||||||
@ -306,7 +306,7 @@ class ProcessLauncher(object):
|
|||||||
# start up quickly but ensure we don't fork off children that
|
# start up quickly but ensure we don't fork off children that
|
||||||
# die instantly too quickly.
|
# die instantly too quickly.
|
||||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||||
LOG.info(_('Forking too fast, sleeping'))
|
LOG.info(_LI('Forking too fast, sleeping'))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
wrap.forktimes.pop(0)
|
wrap.forktimes.pop(0)
|
||||||
@ -325,7 +325,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
os._exit(status)
|
os._exit(status)
|
||||||
|
|
||||||
LOG.info(_('Started child %d'), pid)
|
LOG.info(_LI('Started child %d'), pid)
|
||||||
|
|
||||||
wrap.children.add(pid)
|
wrap.children.add(pid)
|
||||||
self.children[pid] = wrap
|
self.children[pid] = wrap
|
||||||
@ -335,7 +335,7 @@ class ProcessLauncher(object):
|
|||||||
def launch_service(self, service, workers=1):
|
def launch_service(self, service, workers=1):
|
||||||
wrap = ServiceWrapper(service, workers)
|
wrap = ServiceWrapper(service, workers)
|
||||||
|
|
||||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
|
|
||||||
@ -352,15 +352,15 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
if os.WIFSIGNALED(status):
|
if os.WIFSIGNALED(status):
|
||||||
sig = os.WTERMSIG(status)
|
sig = os.WTERMSIG(status)
|
||||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||||
dict(pid=pid, sig=sig))
|
dict(pid=pid, sig=sig))
|
||||||
else:
|
else:
|
||||||
code = os.WEXITSTATUS(status)
|
code = os.WEXITSTATUS(status)
|
||||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||||
dict(pid=pid, code=code))
|
dict(pid=pid, code=code))
|
||||||
|
|
||||||
if pid not in self.children:
|
if pid not in self.children:
|
||||||
LOG.warning(_('pid %d not in child list'), pid)
|
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
wrap = self.children.pop(pid)
|
wrap = self.children.pop(pid)
|
||||||
@ -382,22 +382,25 @@ class ProcessLauncher(object):
|
|||||||
def wait(self):
|
def wait(self):
|
||||||
"""Loop waiting on children to die and respawning as necessary."""
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
while True:
|
try:
|
||||||
self.handle_signal()
|
while True:
|
||||||
self._respawn_children()
|
self.handle_signal()
|
||||||
if self.sigcaught:
|
self._respawn_children()
|
||||||
signame = _signo_to_signame(self.sigcaught)
|
if self.sigcaught:
|
||||||
LOG.info(_('Caught %s, stopping children'), signame)
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
if not _is_sighup_and_daemon(self.sigcaught):
|
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||||
break
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
|
break
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
os.kill(pid, signal.SIGHUP)
|
os.kill(pid, signal.SIGHUP)
|
||||||
self.running = True
|
self.running = True
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
try:
|
try:
|
||||||
@ -408,7 +411,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
# Wait for children to die
|
# Wait for children to die
|
||||||
if self.children:
|
if self.children:
|
||||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||||
while self.children:
|
while self.children:
|
||||||
self._wait_child()
|
self._wait_child()
|
||||||
|
|
||||||
@ -420,10 +423,11 @@ class Service(object):
|
|||||||
self.tg = threadgroup.ThreadGroup(threads)
|
self.tg = threadgroup.ThreadGroup(threads)
|
||||||
|
|
||||||
# signal that the service is done shutting itself down:
|
# signal that the service is done shutting itself down:
|
||||||
self._done = threading.Event()
|
self._done = event.Event()
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self._done = threading.Event()
|
# NOTE(Fengqian): docs for Event.reset() recommend against using it
|
||||||
|
self._done = event.Event()
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
pass
|
pass
|
||||||
@ -432,7 +436,8 @@ class Service(object):
|
|||||||
self.tg.stop()
|
self.tg.stop()
|
||||||
self.tg.wait()
|
self.tg.wait()
|
||||||
# Signal that service cleanup is done:
|
# Signal that service cleanup is done:
|
||||||
self._done.set()
|
if not self._done.ready():
|
||||||
|
self._done.send()
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
self._done.wait()
|
self._done.wait()
|
||||||
@ -443,7 +448,7 @@ class Services(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.services = []
|
self.services = []
|
||||||
self.tg = threadgroup.ThreadGroup()
|
self.tg = threadgroup.ThreadGroup()
|
||||||
self.done = threading.Event()
|
self.done = event.Event()
|
||||||
|
|
||||||
def add(self, service):
|
def add(self, service):
|
||||||
self.services.append(service)
|
self.services.append(service)
|
||||||
@ -457,7 +462,8 @@ class Services(object):
|
|||||||
|
|
||||||
# Each service has performed cleanup, now signal that the run_service
|
# Each service has performed cleanup, now signal that the run_service
|
||||||
# wrapper threads can now die:
|
# wrapper threads can now die:
|
||||||
self.done.set()
|
if not self.done.ready():
|
||||||
|
self.done.send()
|
||||||
|
|
||||||
# reap threads:
|
# reap threads:
|
||||||
self.tg.stop()
|
self.tg.stop()
|
||||||
@ -467,7 +473,7 @@ class Services(object):
|
|||||||
|
|
||||||
def restart(self):
|
def restart(self):
|
||||||
self.stop()
|
self.stop()
|
||||||
self.done = threading.Event()
|
self.done = event.Event()
|
||||||
for restart_service in self.services:
|
for restart_service in self.services:
|
||||||
restart_service.reset()
|
restart_service.reset()
|
||||||
self.tg.add_thread(self.run_service, restart_service, self.done)
|
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||||
@ -482,14 +488,16 @@ class Services(object):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
service.start()
|
service.start()
|
||||||
|
systemd.notify_once()
|
||||||
done.wait()
|
done.wait()
|
||||||
|
|
||||||
|
|
||||||
def launch(service, workers=None):
|
def launch(service, workers=1):
|
||||||
if workers:
|
if workers is None or workers == 1:
|
||||||
launcher = ProcessLauncher()
|
|
||||||
launcher.launch_service(service, workers=workers)
|
|
||||||
else:
|
|
||||||
launcher = ServiceLauncher()
|
launcher = ServiceLauncher()
|
||||||
launcher.launch_service(service)
|
launcher.launch_service(service)
|
||||||
|
else:
|
||||||
|
launcher = ProcessLauncher()
|
||||||
|
launcher.launch_service(service, workers=workers)
|
||||||
|
|
||||||
return launcher
|
return launcher
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -25,15 +24,15 @@ ssl_opts = [
|
|||||||
cfg.StrOpt('ca_file',
|
cfg.StrOpt('ca_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="CA certificate file to use to verify "
|
help="CA certificate file to use to verify "
|
||||||
"connecting clients"),
|
"connecting clients."),
|
||||||
cfg.StrOpt('cert_file',
|
cfg.StrOpt('cert_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Certificate file to use when starting "
|
help="Certificate file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
cfg.StrOpt('key_file',
|
cfg.StrOpt('key_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Private key file to use when starting "
|
help="Private key file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -18,6 +17,7 @@
|
|||||||
System-level utilities and helper functions.
|
System-level utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import unicodedata
|
import unicodedata
|
||||||
@ -27,16 +27,21 @@ import six
|
|||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
# Used for looking up extensions of text
|
UNIT_PREFIX_EXPONENT = {
|
||||||
# to their 'multiplied' byte amount
|
'k': 1,
|
||||||
BYTE_MULTIPLIERS = {
|
'K': 1,
|
||||||
'': 1,
|
'Ki': 1,
|
||||||
't': 1024 ** 4,
|
'M': 2,
|
||||||
'g': 1024 ** 3,
|
'Mi': 2,
|
||||||
'm': 1024 ** 2,
|
'G': 3,
|
||||||
'k': 1024,
|
'Gi': 3,
|
||||||
|
'T': 4,
|
||||||
|
'Ti': 4,
|
||||||
|
}
|
||||||
|
UNIT_SYSTEM_INFO = {
|
||||||
|
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
|
||||||
|
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
|
||||||
}
|
}
|
||||||
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
|
|
||||||
|
|
||||||
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||||
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||||
@ -93,7 +98,8 @@ def bool_from_string(subject, strict=False, default=False):
|
|||||||
|
|
||||||
|
|
||||||
def safe_decode(text, incoming=None, errors='strict'):
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
"""Decodes incoming str using `incoming` if they're not already unicode.
|
"""Decodes incoming text/bytes string using `incoming` if they're not
|
||||||
|
already unicode.
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
:param incoming: Text's current encoding
|
||||||
:param errors: Errors handling policy. See here for valid
|
:param errors: Errors handling policy. See here for valid
|
||||||
@ -102,7 +108,7 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an instance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
raise TypeError("%s can't be decoded" % type(text))
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
if isinstance(text, six.text_type):
|
||||||
@ -132,7 +138,7 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
|
|
||||||
def safe_encode(text, incoming=None,
|
def safe_encode(text, incoming=None,
|
||||||
encoding='utf-8', errors='strict'):
|
encoding='utf-8', errors='strict'):
|
||||||
"""Encodes incoming str/unicode using `encoding`.
|
"""Encodes incoming text/bytes string using `encoding`.
|
||||||
|
|
||||||
If incoming is not specified, text is expected to be encoded with
|
If incoming is not specified, text is expected to be encoded with
|
||||||
current python's default encoding. (`sys.getdefaultencoding`)
|
current python's default encoding. (`sys.getdefaultencoding`)
|
||||||
@ -145,7 +151,7 @@ def safe_encode(text, incoming=None,
|
|||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an instance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||||
raise TypeError("%s can't be encoded" % type(text))
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
|
|
||||||
if not incoming:
|
if not incoming:
|
||||||
@ -153,49 +159,59 @@ def safe_encode(text, incoming=None,
|
|||||||
sys.getdefaultencoding())
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
if isinstance(text, six.text_type):
|
||||||
if six.PY3:
|
return text.encode(encoding, errors)
|
||||||
return text.encode(encoding, errors).decode(incoming)
|
|
||||||
else:
|
|
||||||
return text.encode(encoding, errors)
|
|
||||||
elif text and encoding != incoming:
|
elif text and encoding != incoming:
|
||||||
# Decode text before encoding it with `encoding`
|
# Decode text before encoding it with `encoding`
|
||||||
text = safe_decode(text, incoming, errors)
|
text = safe_decode(text, incoming, errors)
|
||||||
if six.PY3:
|
return text.encode(encoding, errors)
|
||||||
return text.encode(encoding, errors).decode(incoming)
|
else:
|
||||||
else:
|
return text
|
||||||
return text.encode(encoding, errors)
|
|
||||||
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def to_bytes(text, default=0):
|
def string_to_bytes(text, unit_system='IEC', return_int=False):
|
||||||
"""Converts a string into an integer of bytes.
|
"""Converts a string into an float representation of bytes.
|
||||||
|
|
||||||
Looks at the last characters of the text to determine
|
The units supported for IEC ::
|
||||||
what conversion is needed to turn the input text into a byte number.
|
|
||||||
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
|
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
|
||||||
|
KB, KiB, MB, MiB, GB, GiB, TB, TiB
|
||||||
|
|
||||||
|
The units supported for SI ::
|
||||||
|
|
||||||
|
kb(it), Mb(it), Gb(it), Tb(it)
|
||||||
|
kB, MB, GB, TB
|
||||||
|
|
||||||
|
Note that the SI unit system does not support capital letter 'K'
|
||||||
|
|
||||||
:param text: String input for bytes size conversion.
|
:param text: String input for bytes size conversion.
|
||||||
:param default: Default return value when text is blank.
|
:param unit_system: Unit system for byte size conversion.
|
||||||
|
:param return_int: If True, returns integer representation of text
|
||||||
|
in bytes. (default: decimal)
|
||||||
|
:returns: Numerical representation of text in bytes.
|
||||||
|
:raises ValueError: If text has an invalid value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
match = BYTE_REGEX.search(text)
|
try:
|
||||||
|
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
|
||||||
|
except KeyError:
|
||||||
|
msg = _('Invalid unit system: "%s"') % unit_system
|
||||||
|
raise ValueError(msg)
|
||||||
|
match = reg_ex.match(text)
|
||||||
if match:
|
if match:
|
||||||
magnitude = int(match.group(1))
|
magnitude = float(match.group(1))
|
||||||
mult_key_org = match.group(2)
|
unit_prefix = match.group(2)
|
||||||
if not mult_key_org:
|
if match.group(3) in ['b', 'bit']:
|
||||||
return magnitude
|
magnitude /= 8
|
||||||
elif text:
|
|
||||||
msg = _('Invalid string format: %s') % text
|
|
||||||
raise TypeError(msg)
|
|
||||||
else:
|
else:
|
||||||
return default
|
msg = _('Invalid string format: %s') % text
|
||||||
mult_key = mult_key_org.lower().replace('b', '', 1)
|
raise ValueError(msg)
|
||||||
multiplier = BYTE_MULTIPLIERS.get(mult_key)
|
if not unit_prefix:
|
||||||
if multiplier is None:
|
res = magnitude
|
||||||
msg = _('Unknown byte multiplier: %s') % mult_key_org
|
else:
|
||||||
raise TypeError(msg)
|
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
|
||||||
return magnitude * multiplier
|
if return_int:
|
||||||
|
return int(math.ceil(res))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
def to_slug(value, incoming=None, errors="strict"):
|
def to_slug(value, incoming=None, errors="strict"):
|
||||||
|
104
heat/openstack/common/systemd.py
Normal file
104
heat/openstack/common/systemd.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Copyright 2012-2014 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper module for systemd service readiness notification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _abstractify(socket_name):
|
||||||
|
if socket_name.startswith('@'):
|
||||||
|
# abstract namespace socket
|
||||||
|
socket_name = '\0%s' % socket_name[1:]
|
||||||
|
return socket_name
|
||||||
|
|
||||||
|
|
||||||
|
def _sd_notify(unset_env, msg):
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
try:
|
||||||
|
sock.connect(_abstractify(notify_socket))
|
||||||
|
sock.sendall(msg)
|
||||||
|
if unset_env:
|
||||||
|
del os.environ['NOTIFY_SOCKET']
|
||||||
|
except EnvironmentError:
|
||||||
|
LOG.debug("Systemd notification failed", exc_info=True)
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
|
||||||
|
def notify():
|
||||||
|
"""Send notification to Systemd that service is ready.
|
||||||
|
For details see
|
||||||
|
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||||
|
"""
|
||||||
|
_sd_notify(False, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def notify_once():
|
||||||
|
"""Send notification once to Systemd that service is ready.
|
||||||
|
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||||
|
socket listening for notifications from services.
|
||||||
|
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||||
|
notification is sent only once.
|
||||||
|
"""
|
||||||
|
_sd_notify(True, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def onready(notify_socket, timeout):
|
||||||
|
"""Wait for systemd style notification on the socket.
|
||||||
|
|
||||||
|
:param notify_socket: local socket address
|
||||||
|
:type notify_socket: string
|
||||||
|
:param timeout: socket timeout
|
||||||
|
:type timeout: float
|
||||||
|
:returns: 0 service ready
|
||||||
|
1 service not ready
|
||||||
|
2 timeout occurred
|
||||||
|
"""
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
sock.bind(_abstractify(notify_socket))
|
||||||
|
try:
|
||||||
|
msg = sock.recv(512)
|
||||||
|
except socket.timeout:
|
||||||
|
return 2
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
if 'READY=1' in msg:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# simple CLI for testing
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
notify()
|
||||||
|
elif len(sys.argv) >= 2:
|
||||||
|
timeout = float(sys.argv[1])
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
retval = onready(notify_socket, timeout)
|
||||||
|
sys.exit(retval)
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -14,10 +13,22 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the heattest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
"""Common utilities used in testing"""
|
"""Common utilities used in testing"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import testtools
|
import testtools
|
||||||
@ -35,6 +46,7 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
self._fake_logs()
|
self._fake_logs()
|
||||||
self.useFixture(fixtures.NestedTempfile())
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
self.useFixture(fixtures.TempHomeDir())
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
self.tempdirs = []
|
||||||
|
|
||||||
def _set_timeout(self):
|
def _set_timeout(self):
|
||||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||||
@ -70,3 +82,18 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logging.basicConfig(format=_LOG_FORMAT, level=level)
|
logging.basicConfig(format=_LOG_FORMAT, level=level)
|
||||||
|
|
||||||
|
def create_tempfiles(self, files, ext='.conf'):
|
||||||
|
tempfiles = []
|
||||||
|
for (basename, contents) in files:
|
||||||
|
if not os.path.isabs(basename):
|
||||||
|
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
|
||||||
|
else:
|
||||||
|
path = basename + ext
|
||||||
|
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
|
||||||
|
tempfiles.append(path)
|
||||||
|
try:
|
||||||
|
os.write(fd, contents)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return tempfiles
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -12,10 +11,10 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import threading
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
|
||||||
|
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import loopingcall
|
from heat.openstack.common import loopingcall
|
||||||
@ -52,7 +51,7 @@ class Thread(object):
|
|||||||
|
|
||||||
|
|
||||||
class ThreadGroup(object):
|
class ThreadGroup(object):
|
||||||
"""The point of the ThreadGroup classis to:
|
"""The point of the ThreadGroup class is to:
|
||||||
|
|
||||||
* keep track of timers and greenthreads (making it easier to stop them
|
* keep track of timers and greenthreads (making it easier to stop them
|
||||||
when need be).
|
when need be).
|
||||||
@ -87,7 +86,7 @@ class ThreadGroup(object):
|
|||||||
self.threads.remove(thread)
|
self.threads.remove(thread)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
current = greenthread.getcurrent()
|
current = threading.current_thread()
|
||||||
|
|
||||||
# Iterate over a copy of self.threads so thread_done doesn't
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
# modify the list while we're iterating
|
# modify the list while we're iterating
|
||||||
@ -115,7 +114,7 @@ class ThreadGroup(object):
|
|||||||
pass
|
pass
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
current = greenthread.getcurrent()
|
current = threading.current_thread()
|
||||||
|
|
||||||
# Iterate over a copy of self.threads so thread_done doesn't
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
# modify the list while we're iterating
|
# modify the list while we're iterating
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2012 Intel Corporation.
|
# Copyright (c) 2012 Intel Corporation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@ -8,7 +8,6 @@ module=excutils
|
|||||||
module=fixture
|
module=fixture
|
||||||
module=gettextutils
|
module=gettextutils
|
||||||
module=importutils
|
module=importutils
|
||||||
module=install_venv_common
|
|
||||||
module=jsonutils
|
module=jsonutils
|
||||||
module=local
|
module=local
|
||||||
module=log
|
module=log
|
||||||
@ -24,7 +23,6 @@ module=timeutils
|
|||||||
module=uuidutils
|
module=uuidutils
|
||||||
module=config
|
module=config
|
||||||
module=strutils
|
module=strutils
|
||||||
module=py3kcompat
|
|
||||||
module=versionutils
|
module=versionutils
|
||||||
module=test
|
module=test
|
||||||
module=crypto
|
module=crypto
|
||||||
|
@ -11,6 +11,7 @@ oslo.config>=1.2.0
|
|||||||
paramiko>=1.9.0
|
paramiko>=1.9.0
|
||||||
PasteDeploy>=1.5.0
|
PasteDeploy>=1.5.0
|
||||||
pbr>=0.6,!=0.7,<1.0
|
pbr>=0.6,!=0.7,<1.0
|
||||||
|
posix_ipc
|
||||||
pycrypto>=2.6
|
pycrypto>=2.6
|
||||||
python-ceilometerclient>=1.0.6
|
python-ceilometerclient>=1.0.6
|
||||||
python-cinderclient>=1.0.6
|
python-cinderclient>=1.0.6
|
||||||
|
@ -1,15 +1,25 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
TEMPDIR=`mktemp -d /tmp/tmp.XXXXXXXXXX`
|
|
||||||
trap "rm -rf $TEMPDIR" EXIT
|
PROJECT_NAME=${PROJECT_NAME:-heat}
|
||||||
CFGFILE=heat.conf.sample
|
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
|
||||||
GENERATOR=tools/config/generate_sample.sh
|
|
||||||
mkdir -p $TEMPDIR/{a,b}/etc/heat
|
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
|
||||||
cp etc/heat/$CFGFILE $TEMPDIR/a/etc/heat
|
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
|
||||||
$GENERATOR -b ./ -p heat -o $TEMPDIR/b/etc/heat &> /dev/null
|
elif [ -e etc/${CFGFILE_NAME} ]; then
|
||||||
if [ ! -f "$TEMPDIR/b/etc/heat/$CFGFILE" ]; then
|
CFGFILE=etc/${CFGFILE_NAME}
|
||||||
echo "Error: $CFGFILE can not be generated by $GENERATOR." 1>&2
|
else
|
||||||
exit 2
|
echo "${0##*/}: can not find config file"
|
||||||
elif ! (cd $TEMPDIR; diff -Naur a b); then
|
exit 1
|
||||||
echo "Error: $CFGFILE is not up to date, please run $GENERATOR." 1>&2
|
fi
|
||||||
exit 42
|
|
||||||
|
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
|
||||||
|
trap "rm -rf $TEMPDIR" EXIT
|
||||||
|
|
||||||
|
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
|
||||||
|
|
||||||
|
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
|
||||||
|
then
|
||||||
|
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
|
||||||
|
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -4,8 +4,8 @@ print_hint() {
|
|||||||
echo "Try \`${0##*/} --help' for more information." >&2
|
echo "Try \`${0##*/} --help' for more information." >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
|
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
|
||||||
--long help,base-dir:,package-name:,output-dir: -- "$@")
|
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
|
||||||
|
|
||||||
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
||||||
|
|
||||||
@ -21,6 +21,8 @@ while true; do
|
|||||||
echo "-b, --base-dir=DIR project base directory"
|
echo "-b, --base-dir=DIR project base directory"
|
||||||
echo "-p, --package-name=NAME project package name"
|
echo "-p, --package-name=NAME project package name"
|
||||||
echo "-o, --output-dir=DIR file output directory"
|
echo "-o, --output-dir=DIR file output directory"
|
||||||
|
echo "-m, --module=MOD extra python module to interrogate for options"
|
||||||
|
echo "-l, --library=LIB extra library that registers options for discovery"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
-b|--base-dir)
|
-b|--base-dir)
|
||||||
@ -38,6 +40,16 @@ while true; do
|
|||||||
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
-m|--module)
|
||||||
|
shift
|
||||||
|
MODULES="$MODULES -m $1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-l|--library)
|
||||||
|
shift
|
||||||
|
LIBRARIES="$LIBRARIES -l $1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--)
|
--)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
@ -53,7 +65,7 @@ then
|
|||||||
BASEDIR=$(cd "$BASEDIR" && pwd)
|
BASEDIR=$(cd "$BASEDIR" && pwd)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
|
||||||
TARGETDIR=$BASEDIR/$PACKAGENAME
|
TARGETDIR=$BASEDIR/$PACKAGENAME
|
||||||
if ! [ -d $TARGETDIR ]
|
if ! [ -d $TARGETDIR ]
|
||||||
then
|
then
|
||||||
@ -77,12 +89,20 @@ find $TARGETDIR -type f -name "*.pyc" -delete
|
|||||||
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
||||||
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
||||||
|
|
||||||
EXTRA_MODULES_FILE="`dirname $0`/oslo.config.generator.rc"
|
RC_FILE="`dirname $0`/oslo.config.generator.rc"
|
||||||
if test -r "$EXTRA_MODULES_FILE"
|
if test -r "$RC_FILE"
|
||||||
then
|
then
|
||||||
source "$EXTRA_MODULES_FILE"
|
source "$RC_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
for mod in ${HEAT_CONFIG_GENERATOR_EXTRA_MODULES}; do
|
||||||
|
MODULES="$MODULES -m $mod"
|
||||||
|
done
|
||||||
|
|
||||||
|
for lib in ${HEAT_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
|
||||||
|
LIBRARIES="$LIBRARIES -l $lib"
|
||||||
|
done
|
||||||
|
|
||||||
export EVENTLET_NO_GREENDNS=yes
|
export EVENTLET_NO_GREENDNS=yes
|
||||||
|
|
||||||
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
||||||
@ -90,7 +110,7 @@ OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
|||||||
DEFAULT_MODULEPATH=heat.openstack.common.config.generator
|
DEFAULT_MODULEPATH=heat.openstack.common.config.generator
|
||||||
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
|
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
|
||||||
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
||||||
python -m $MODULEPATH $FILES > $OUTPUTFILE
|
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
|
||||||
|
|
||||||
# Hook to allow projects to append custom config file snippets
|
# Hook to allow projects to append custom config file snippets
|
||||||
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
|
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user