Merge "Synchronized Openstack Common"
This commit is contained in:
commit
dadff4654f
@ -13,7 +13,19 @@ bind_port = 8082
|
|||||||
|
|
||||||
# Set up logging. Make sure the user has permissions to write to this file! To use syslog just set use_syslog parameter value to 'True'.
|
# Set up logging. Make sure the user has permissions to write to this file! To use syslog just set use_syslog parameter value to 'True'.
|
||||||
log_file = /tmp/murano-api.log
|
log_file = /tmp/murano-api.log
|
||||||
|
|
||||||
|
# Use syslog for logging. Existing syslog format is DEPRECATED
|
||||||
|
# during I, and then will be changed in J to honor RFC5424
|
||||||
|
|
||||||
use_syslog = False
|
use_syslog = False
|
||||||
|
|
||||||
|
# (Optional) Use syslog rfc5424 format for logging. If
|
||||||
|
# enabled, will add APP-NAME (RFC5424) before the MSG part of
|
||||||
|
# the syslog message. The old format without APP-NAME is
|
||||||
|
# deprecated in I, and will be removed in J.
|
||||||
|
#use_syslog_rfc_format=false
|
||||||
|
|
||||||
|
#Syslog facility to receive log lines
|
||||||
syslog_log_facility = LOG_LOCAL0
|
syslog_log_facility = LOG_LOCAL0
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
|
@ -73,17 +73,9 @@ CONF.register_opts(reports_opts, group='reports')
|
|||||||
CONF.register_opts(rabbit_opts, group='rabbitmq')
|
CONF.register_opts(rabbit_opts, group='rabbitmq')
|
||||||
CONF.register_opts(db_opts, group='database')
|
CONF.register_opts(db_opts, group='database')
|
||||||
|
|
||||||
|
CONF.import_opt('connection',
|
||||||
CONF.import_opt('verbose', 'muranoapi.openstack.common.log')
|
'muranoapi.openstack.common.db.options',
|
||||||
CONF.import_opt('debug', 'muranoapi.openstack.common.log')
|
group='database')
|
||||||
CONF.import_opt('log_dir', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('log_file', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('log_config', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('log_format', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('log_date_format', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('use_syslog', 'muranoapi.openstack.common.log')
|
|
||||||
CONF.import_opt('syslog_log_facility', 'muranoapi.openstack.common.log')
|
|
||||||
|
|
||||||
|
|
||||||
cfg.set_defaults(log.log_opts,
|
cfg.set_defaults(log.log_opts,
|
||||||
default_log_levels=['qpid.messaging=INFO',
|
default_log_levels=['qpid.messaging=INFO',
|
||||||
|
@ -26,7 +26,7 @@ from migrate import exceptions as versioning_exceptions
|
|||||||
|
|
||||||
from muranoapi.common.config import CONF as conf
|
from muranoapi.common.config import CONF as conf
|
||||||
from muranoapi.db import migrate_repo
|
from muranoapi.db import migrate_repo
|
||||||
from muranoapi.openstack.common.db.sqlalchemy import session
|
from muranoapi.openstack.common.db.sqlalchemy import session as db_session
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _ # noqa
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
@ -34,13 +34,15 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False):
|
def get_session(autocommit=True, expire_on_commit=False):
|
||||||
if not session._MAKER:
|
s = _create_facade_lazily().get_session(autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit)
|
||||||
|
if s:
|
||||||
if conf.database.auto_create:
|
if conf.database.auto_create:
|
||||||
log.info(_('auto-creating DB'))
|
log.info(_('auto-creating DB'))
|
||||||
_auto_create_db()
|
_auto_create_db()
|
||||||
else:
|
else:
|
||||||
log.info(_('not auto-creating DB'))
|
log.info(_('not auto-creating DB'))
|
||||||
return session.get_session(autocommit, expire_on_commit)
|
return s
|
||||||
|
|
||||||
|
|
||||||
def _auto_create_db():
|
def _auto_create_db():
|
||||||
@ -50,3 +52,17 @@ def _auto_create_db():
|
|||||||
except versioning_exceptions.DatabaseNotControlledError:
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
versioning_api.version_control(conf.database.connection, repo_path)
|
versioning_api.version_control(conf.database.connection, repo_path)
|
||||||
versioning_api.upgrade(conf.database.connection, repo_path)
|
versioning_api.upgrade(conf.database.connection, repo_path)
|
||||||
|
|
||||||
|
|
||||||
|
_FACADE = None
|
||||||
|
|
||||||
|
|
||||||
|
def _create_facade_lazily():
|
||||||
|
global _FACADE
|
||||||
|
|
||||||
|
if _FACADE is None:
|
||||||
|
_FACADE = db_session.EngineFacade(
|
||||||
|
conf.database.connection,
|
||||||
|
**dict(conf.database.iteritems())
|
||||||
|
)
|
||||||
|
return _FACADE
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
import six
|
||||||
|
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
@ -1,6 +1,5 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 SINA Corporation
|
# Copyright 2012 SINA Corporation
|
||||||
|
# Copyright 2014 Cisco Systems, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -20,6 +19,7 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import imp
|
import imp
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -28,6 +28,8 @@ import sys
|
|||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
import stevedore.named
|
||||||
|
|
||||||
from muranoapi.openstack.common import gettextutils
|
from muranoapi.openstack.common import gettextutils
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
@ -39,6 +41,7 @@ BOOLOPT = "BoolOpt"
|
|||||||
INTOPT = "IntOpt"
|
INTOPT = "IntOpt"
|
||||||
FLOATOPT = "FloatOpt"
|
FLOATOPT = "FloatOpt"
|
||||||
LISTOPT = "ListOpt"
|
LISTOPT = "ListOpt"
|
||||||
|
DICTOPT = "DictOpt"
|
||||||
MULTISTROPT = "MultiStrOpt"
|
MULTISTROPT = "MultiStrOpt"
|
||||||
|
|
||||||
OPT_TYPES = {
|
OPT_TYPES = {
|
||||||
@ -47,11 +50,12 @@ OPT_TYPES = {
|
|||||||
INTOPT: 'integer value',
|
INTOPT: 'integer value',
|
||||||
FLOATOPT: 'floating point value',
|
FLOATOPT: 'floating point value',
|
||||||
LISTOPT: 'list value',
|
LISTOPT: 'list value',
|
||||||
|
DICTOPT: 'dict value',
|
||||||
MULTISTROPT: 'multi valued',
|
MULTISTROPT: 'multi valued',
|
||||||
}
|
}
|
||||||
|
|
||||||
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||||
FLOATOPT, LISTOPT,
|
FLOATOPT, LISTOPT, DICTOPT,
|
||||||
MULTISTROPT]))
|
MULTISTROPT]))
|
||||||
|
|
||||||
PY_EXT = ".py"
|
PY_EXT = ".py"
|
||||||
@ -60,24 +64,55 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|||||||
WORDWRAP_WIDTH = 60
|
WORDWRAP_WIDTH = 60
|
||||||
|
|
||||||
|
|
||||||
def generate(srcfiles):
|
def generate(argv):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='generate sample configuration file',
|
||||||
|
)
|
||||||
|
parser.add_argument('-m', dest='modules', action='append')
|
||||||
|
parser.add_argument('-l', dest='libraries', action='append')
|
||||||
|
parser.add_argument('srcfiles', nargs='*')
|
||||||
|
parsed_args = parser.parse_args(argv)
|
||||||
|
|
||||||
mods_by_pkg = dict()
|
mods_by_pkg = dict()
|
||||||
for filepath in srcfiles:
|
for filepath in parsed_args.srcfiles:
|
||||||
pkg_name = filepath.split(os.sep)[1]
|
pkg_name = filepath.split(os.sep)[1]
|
||||||
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||||
os.path.basename(filepath).split('.')[0]])
|
os.path.basename(filepath).split('.')[0]])
|
||||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||||
# NOTE(lzyeval): place top level modules before packages
|
# NOTE(lzyeval): place top level modules before packages
|
||||||
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
|
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||||
pkg_names.sort()
|
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||||
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
|
|
||||||
ext_names.sort()
|
|
||||||
pkg_names.extend(ext_names)
|
pkg_names.extend(ext_names)
|
||||||
|
|
||||||
# opts_by_group is a mapping of group name to an options list
|
# opts_by_group is a mapping of group name to an options list
|
||||||
# The options list is a list of (module, options) tuples
|
# The options list is a list of (module, options) tuples
|
||||||
opts_by_group = {'DEFAULT': []}
|
opts_by_group = {'DEFAULT': []}
|
||||||
|
|
||||||
|
if parsed_args.modules:
|
||||||
|
for module_name in parsed_args.modules:
|
||||||
|
module = _import_module(module_name)
|
||||||
|
if module:
|
||||||
|
for group, opts in _list_opts(module):
|
||||||
|
opts_by_group.setdefault(group, []).append((module_name,
|
||||||
|
opts))
|
||||||
|
|
||||||
|
# Look for entry points defined in libraries (or applications) for
|
||||||
|
# option discovery, and include their return values in the output.
|
||||||
|
#
|
||||||
|
# Each entry point should be a function returning an iterable
|
||||||
|
# of pairs with the group name (or None for the default group)
|
||||||
|
# and the list of Opt instances for that group.
|
||||||
|
if parsed_args.libraries:
|
||||||
|
loader = stevedore.named.NamedExtensionManager(
|
||||||
|
'oslo.config.opts',
|
||||||
|
names=list(set(parsed_args.libraries)),
|
||||||
|
invoke_on_load=False,
|
||||||
|
)
|
||||||
|
for ext in loader:
|
||||||
|
for group, opts in ext.plugin():
|
||||||
|
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
|
||||||
|
opt_list.append((ext.name, opts))
|
||||||
|
|
||||||
for pkg_name in pkg_names:
|
for pkg_name in pkg_names:
|
||||||
mods = mods_by_pkg.get(pkg_name)
|
mods = mods_by_pkg.get(pkg_name)
|
||||||
mods.sort()
|
mods.sort()
|
||||||
@ -87,14 +122,14 @@ def generate(srcfiles):
|
|||||||
|
|
||||||
mod_obj = _import_module(mod_str)
|
mod_obj = _import_module(mod_str)
|
||||||
if not mod_obj:
|
if not mod_obj:
|
||||||
continue
|
raise RuntimeError("Unable to import module %s" % mod_str)
|
||||||
|
|
||||||
for group, opts in _list_opts(mod_obj):
|
for group, opts in _list_opts(mod_obj):
|
||||||
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||||
|
|
||||||
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||||
for group, opts in opts_by_group.items():
|
for group in sorted(opts_by_group.keys()):
|
||||||
print_group_opts(group, opts)
|
print_group_opts(group, opts_by_group[group])
|
||||||
|
|
||||||
|
|
||||||
def _import_module(mod_str):
|
def _import_module(mod_str):
|
||||||
@ -104,17 +139,17 @@ def _import_module(mod_str):
|
|||||||
return sys.modules[mod_str[4:]]
|
return sys.modules[mod_str[4:]]
|
||||||
else:
|
else:
|
||||||
return importutils.import_module(mod_str)
|
return importutils.import_module(mod_str)
|
||||||
except ImportError as ie:
|
except Exception as e:
|
||||||
sys.stderr.write("%s\n" % str(ie))
|
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
|
||||||
return None
|
|
||||||
except Exception:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _is_in_group(opt, group):
|
def _is_in_group(opt, group):
|
||||||
"Check if opt is in group."
|
"Check if opt is in group."
|
||||||
for key, value in group._opts.items():
|
for value in group._opts.values():
|
||||||
if value['opt'] == opt:
|
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||||
|
# newly released oslo.config support '==' operator.
|
||||||
|
if not(value['opt'] != opt):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -125,7 +160,7 @@ def _guess_groups(opt, mod_obj):
|
|||||||
return 'DEFAULT'
|
return 'DEFAULT'
|
||||||
|
|
||||||
# what other groups is it in?
|
# what other groups is it in?
|
||||||
for key, value in cfg.CONF.items():
|
for value in cfg.CONF.values():
|
||||||
if isinstance(value, cfg.CONF.GroupAttr):
|
if isinstance(value, cfg.CONF.GroupAttr):
|
||||||
if _is_in_group(opt, value._group):
|
if _is_in_group(opt, value._group):
|
||||||
return value._group.name
|
return value._group.name
|
||||||
@ -181,24 +216,24 @@ def _get_my_ip():
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _sanitize_default(s):
|
def _sanitize_default(name, value):
|
||||||
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
||||||
if s.startswith(sys.prefix):
|
if value.startswith(sys.prefix):
|
||||||
# NOTE(jd) Don't use os.path.join, because it is likely to think the
|
# NOTE(jd) Don't use os.path.join, because it is likely to think the
|
||||||
# second part is an absolute pathname and therefore drop the first
|
# second part is an absolute pathname and therefore drop the first
|
||||||
# part.
|
# part.
|
||||||
s = os.path.normpath("/usr/" + s[len(sys.prefix):])
|
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
|
||||||
elif s.startswith(BASEDIR):
|
elif value.startswith(BASEDIR):
|
||||||
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
|
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
|
||||||
elif BASEDIR in s:
|
elif BASEDIR in value:
|
||||||
return s.replace(BASEDIR, '')
|
return value.replace(BASEDIR, '')
|
||||||
elif s == _get_my_ip():
|
elif value == _get_my_ip():
|
||||||
return '10.0.0.1'
|
return '10.0.0.1'
|
||||||
elif s == socket.gethostname():
|
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
|
||||||
return 'muranoapi'
|
return 'muranoapi'
|
||||||
elif s.strip() != s:
|
elif value.strip() != value:
|
||||||
return '"%s"' % s
|
return '"%s"' % value
|
||||||
return s
|
return value
|
||||||
|
|
||||||
|
|
||||||
def _print_opt(opt):
|
def _print_opt(opt):
|
||||||
@ -212,14 +247,24 @@ def _print_opt(opt):
|
|||||||
except (ValueError, AttributeError) as err:
|
except (ValueError, AttributeError) as err:
|
||||||
sys.stderr.write("%s\n" % str(err))
|
sys.stderr.write("%s\n" % str(err))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
opt_help = u'%s (%s)' % (opt_help,
|
||||||
|
OPT_TYPES[opt_type])
|
||||||
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||||
|
if opt.deprecated_opts:
|
||||||
|
for deprecated_opt in opt.deprecated_opts:
|
||||||
|
if deprecated_opt.name:
|
||||||
|
deprecated_group = (deprecated_opt.group if
|
||||||
|
deprecated_opt.group else "DEFAULT")
|
||||||
|
print('# Deprecated group/name - [%s]/%s' %
|
||||||
|
(deprecated_group,
|
||||||
|
deprecated_opt.name))
|
||||||
try:
|
try:
|
||||||
if opt_default is None:
|
if opt_default is None:
|
||||||
print('#%s=<None>' % opt_name)
|
print('#%s=<None>' % opt_name)
|
||||||
elif opt_type == STROPT:
|
elif opt_type == STROPT:
|
||||||
assert(isinstance(opt_default, basestring))
|
assert(isinstance(opt_default, six.string_types))
|
||||||
print('#%s=%s' % (opt_name, _sanitize_default(opt_default)))
|
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||||
|
opt_default)))
|
||||||
elif opt_type == BOOLOPT:
|
elif opt_type == BOOLOPT:
|
||||||
assert(isinstance(opt_default, bool))
|
assert(isinstance(opt_default, bool))
|
||||||
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
||||||
@ -233,6 +278,11 @@ def _print_opt(opt):
|
|||||||
elif opt_type == LISTOPT:
|
elif opt_type == LISTOPT:
|
||||||
assert(isinstance(opt_default, list))
|
assert(isinstance(opt_default, list))
|
||||||
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
||||||
|
elif opt_type == DICTOPT:
|
||||||
|
assert(isinstance(opt_default, dict))
|
||||||
|
opt_default_strlist = [str(key) + ':' + str(value)
|
||||||
|
for (key, value) in opt_default.items()]
|
||||||
|
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||||
elif opt_type == MULTISTROPT:
|
elif opt_type == MULTISTROPT:
|
||||||
assert(isinstance(opt_default, list))
|
assert(isinstance(opt_default, list))
|
||||||
if not opt_default:
|
if not opt_default:
|
||||||
@ -246,9 +296,6 @@ def _print_opt(opt):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if len(sys.argv) < 2:
|
|
||||||
print("usage: %s [srcfile]...\n" % sys.argv[0])
|
|
||||||
sys.exit(0)
|
|
||||||
generate(sys.argv[1:])
|
generate(sys.argv[1:])
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -98,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
|
|||||||
return arg
|
return arg
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -17,90 +15,148 @@
|
|||||||
|
|
||||||
"""Multiple DB API backend support.
|
"""Multiple DB API backend support.
|
||||||
|
|
||||||
Supported configuration options:
|
|
||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
|
||||||
`use_tpool`: Enable thread pooling of DB API calls.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
takes no arguments. The method can return any object that implements DB
|
takes no arguments. The method can return any object that implements DB
|
||||||
API methods.
|
API methods.
|
||||||
|
|
||||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
|
||||||
threading locks. The python logging module happens to use such locks. To
|
|
||||||
work around this issue, be sure to specify thread=False with
|
|
||||||
eventlet.monkey_patch().
|
|
||||||
|
|
||||||
A bug for eventlet has been filed here:
|
|
||||||
|
|
||||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
from oslo.config import cfg
|
from muranoapi.openstack.common.db import exception
|
||||||
|
from muranoapi.openstack.common.gettextutils import _LE
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import lockutils
|
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
LOG = logging.getLogger(__name__)
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
cfg.BoolOpt('use_tpool',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='dbapi_use_tpool',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Enable the experimental use of thread pooling for '
|
|
||||||
'all DB API calls')
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(db_opts, 'database')
|
def safe_for_db_retry(f):
|
||||||
|
"""Enable db-retry for decorated function, if config option enabled."""
|
||||||
|
f.__dict__['enable_retry'] = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
class wrap_db_retry(object):
|
||||||
|
"""Retry db.api methods, if DBConnectionError() raised
|
||||||
|
|
||||||
|
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
||||||
|
in config, this decorator will be applied to all db.api functions,
|
||||||
|
marked with @safe_for_db_retry decorator.
|
||||||
|
Decorator catchs DBConnectionError() and retries function in a
|
||||||
|
loop until it succeeds, or until maximum retries count will be reached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
||||||
|
max_retry_interval):
|
||||||
|
super(wrap_db_retry, self).__init__()
|
||||||
|
|
||||||
|
self.retry_interval = retry_interval
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.inc_retry_interval = inc_retry_interval
|
||||||
|
self.max_retry_interval = max_retry_interval
|
||||||
|
|
||||||
|
def __call__(self, f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
next_interval = self.retry_interval
|
||||||
|
remaining = self.max_retries
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exception.DBConnectionError as e:
|
||||||
|
if remaining == 0:
|
||||||
|
LOG.exception(_LE('DB exceeded retry limit.'))
|
||||||
|
raise exception.DBError(e)
|
||||||
|
if remaining != -1:
|
||||||
|
remaining -= 1
|
||||||
|
LOG.exception(_LE('DB connection error.'))
|
||||||
|
# NOTE(vsergeyev): We are using patched time module, so
|
||||||
|
# this effectively yields the execution
|
||||||
|
# context to another green thread.
|
||||||
|
time.sleep(next_interval)
|
||||||
|
if self.inc_retry_interval:
|
||||||
|
next_interval = min(
|
||||||
|
next_interval * 2,
|
||||||
|
self.max_retry_interval
|
||||||
|
)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
class DBAPI(object):
|
||||||
def __init__(self, backend_mapping=None):
|
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
||||||
if backend_mapping is None:
|
**kwargs):
|
||||||
backend_mapping = {}
|
"""Initialize the chosen DB API backend.
|
||||||
self.__backend = None
|
|
||||||
self.__backend_mapping = backend_mapping
|
:param backend_name: name of the backend to load
|
||||||
|
:type backend_name: str
|
||||||
|
|
||||||
|
:param backend_mapping: backend name -> module/class to load mapping
|
||||||
|
:type backend_mapping: dict
|
||||||
|
|
||||||
|
:param lazy: load the DB backend lazily on the first DB API method call
|
||||||
|
:type lazy: bool
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
|
||||||
|
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
||||||
|
:type use_db_reconnect: bool
|
||||||
|
|
||||||
|
:keyword retry_interval: seconds between transaction retries
|
||||||
|
:type retry_interval: int
|
||||||
|
|
||||||
|
:keyword inc_retry_interval: increase retry interval or not
|
||||||
|
:type inc_retry_interval: bool
|
||||||
|
|
||||||
|
:keyword max_retry_interval: max interval value between retries
|
||||||
|
:type max_retry_interval: int
|
||||||
|
|
||||||
|
:keyword max_retries: max number of retries before an error is raised
|
||||||
|
:type max_retries: int
|
||||||
|
|
||||||
@lockutils.synchronized('dbapi_backend', 'muranoapi-')
|
|
||||||
def __get_backend(self):
|
|
||||||
"""Get the actual backend. May be a module or an instance of
|
|
||||||
a class. Doesn't matter to us. We do this synchronized as it's
|
|
||||||
possible multiple greenthreads started very quickly trying to do
|
|
||||||
DB calls and eventlet can switch threads before self.__backend gets
|
|
||||||
assigned.
|
|
||||||
"""
|
"""
|
||||||
if self.__backend:
|
|
||||||
# Another thread assigned it
|
self._backend = None
|
||||||
return self.__backend
|
self._backend_name = backend_name
|
||||||
backend_name = CONF.database.backend
|
self._backend_mapping = backend_mapping or {}
|
||||||
self.__use_tpool = CONF.database.use_tpool
|
self._lock = threading.Lock()
|
||||||
if self.__use_tpool:
|
|
||||||
from eventlet import tpool
|
if not lazy:
|
||||||
self.__tpool = tpool
|
self._load_backend()
|
||||||
# Import the untranslated name if we don't have a
|
|
||||||
# mapping.
|
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
||||||
backend_path = self.__backend_mapping.get(backend_name,
|
self.retry_interval = kwargs.get('retry_interval', 1)
|
||||||
backend_name)
|
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
||||||
backend_mod = importutils.import_module(backend_path)
|
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
||||||
self.__backend = backend_mod.get_backend()
|
self.max_retries = kwargs.get('max_retries', 20)
|
||||||
return self.__backend
|
|
||||||
|
def _load_backend(self):
|
||||||
|
with self._lock:
|
||||||
|
if not self._backend:
|
||||||
|
# Import the untranslated name if we don't have a mapping
|
||||||
|
backend_path = self._backend_mapping.get(self._backend_name,
|
||||||
|
self._backend_name)
|
||||||
|
backend_mod = importutils.import_module(backend_path)
|
||||||
|
self._backend = backend_mod.get_backend()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
backend = self.__backend or self.__get_backend()
|
if not self._backend:
|
||||||
attr = getattr(backend, key)
|
self._load_backend()
|
||||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
|
||||||
|
attr = getattr(self._backend, key)
|
||||||
|
if not hasattr(attr, '__call__'):
|
||||||
return attr
|
return attr
|
||||||
|
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
||||||
|
# DB API methods, decorated with @safe_for_db_retry
|
||||||
|
# on disconnect.
|
||||||
|
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
||||||
|
attr = wrap_db_retry(
|
||||||
|
retry_interval=self.retry_interval,
|
||||||
|
max_retries=self.max_retries,
|
||||||
|
inc_retry_interval=self.inc_retry_interval,
|
||||||
|
max_retry_interval=self.max_retry_interval)(attr)
|
||||||
|
|
||||||
def tpool_wrapper(*args, **kwargs):
|
return attr
|
||||||
return self.__tpool.execute(attr, *args, **kwargs)
|
|
||||||
|
|
||||||
functools.update_wrapper(tpool_wrapper, attr)
|
|
||||||
return tpool_wrapper
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -18,14 +16,16 @@
|
|||||||
|
|
||||||
"""DB related custom exceptions."""
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
import six
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
class DBError(Exception):
|
||||||
"""Wraps an implementation specific exception."""
|
"""Wraps an implementation specific exception."""
|
||||||
def __init__(self, inner_exception=None):
|
def __init__(self, inner_exception=None):
|
||||||
self.inner_exception = inner_exception
|
self.inner_exception = inner_exception
|
||||||
super(DBError, self).__init__(str(inner_exception))
|
super(DBError, self).__init__(six.text_type(inner_exception))
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
class DBDuplicateEntry(DBError):
|
||||||
@ -43,3 +43,14 @@ class DBDeadlock(DBError):
|
|||||||
class DBInvalidUnicodeParameter(Exception):
|
class DBInvalidUnicodeParameter(Exception):
|
||||||
message = _("Invalid Parameter: "
|
message = _("Invalid Parameter: "
|
||||||
"Unicode is not supported by the current database.")
|
"Unicode is not supported by the current database.")
|
||||||
|
|
||||||
|
|
||||||
|
class DbMigrationError(DBError):
|
||||||
|
"""Wraps migration specific exception."""
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(DbMigrationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class DBConnectionError(DBError):
|
||||||
|
"""Wraps connection specific exception."""
|
||||||
|
pass
|
||||||
|
171
muranoapi/openstack/common/db/options.py
Normal file
171
muranoapi/openstack/common/db/options.py
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
|
||||||
|
database_opts = [
|
||||||
|
cfg.StrOpt('sqlite_db',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default='muranoapi.sqlite',
|
||||||
|
help='The file name to use with SQLite'),
|
||||||
|
cfg.BoolOpt('sqlite_synchronous',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default=True,
|
||||||
|
help='If True, SQLite uses synchronous mode'),
|
||||||
|
cfg.StrOpt('backend',
|
||||||
|
default='sqlalchemy',
|
||||||
|
deprecated_name='db_backend',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
help='The backend to use for db'),
|
||||||
|
cfg.StrOpt('connection',
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'database',
|
||||||
|
secret=True,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('connection',
|
||||||
|
group='sql'), ]),
|
||||||
|
cfg.StrOpt('mysql_sql_mode',
|
||||||
|
default='TRADITIONAL',
|
||||||
|
help='The SQL mode to be used for MySQL sessions. '
|
||||||
|
'This option, including the default, overrides any '
|
||||||
|
'server-set SQL mode. To use whatever SQL mode '
|
||||||
|
'is set by the server configuration, '
|
||||||
|
'set this to no value. Example: mysql_sql_mode='),
|
||||||
|
cfg.IntOpt('idle_timeout',
|
||||||
|
default=3600,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
|
help='Timeout before idle sql connections are reaped'),
|
||||||
|
cfg.IntOpt('min_pool_size',
|
||||||
|
default=1,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Minimum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_pool_size',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_retries',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum db connection retries during startup. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
cfg.IntOpt('retry_interval',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('reconnect_interval',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Interval between retries of opening a sql connection'),
|
||||||
|
cfg.IntOpt('max_overflow',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||||
|
cfg.IntOpt('connection_debug',
|
||||||
|
default=0,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Verbosity of SQL debugging information. 0=None, '
|
||||||
|
'100=Everything'),
|
||||||
|
cfg.BoolOpt('connection_trace',
|
||||||
|
default=False,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
|
cfg.IntOpt('pool_timeout',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||||
|
cfg.BoolOpt('use_db_reconnect',
|
||||||
|
default=False,
|
||||||
|
help='Enable the experimental use of database reconnect '
|
||||||
|
'on connection lost'),
|
||||||
|
cfg.IntOpt('db_retry_interval',
|
||||||
|
default=1,
|
||||||
|
help='seconds between db connection retries'),
|
||||||
|
cfg.BoolOpt('db_inc_retry_interval',
|
||||||
|
default=True,
|
||||||
|
help='Whether to increase interval between db connection '
|
||||||
|
'retries, up to db_max_retry_interval'),
|
||||||
|
cfg.IntOpt('db_max_retry_interval',
|
||||||
|
default=10,
|
||||||
|
help='max seconds between db connection retries, if '
|
||||||
|
'db_inc_retry_interval is enabled'),
|
||||||
|
cfg.IntOpt('db_max_retries',
|
||||||
|
default=20,
|
||||||
|
help='maximum db connection retries before error is raised. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(database_opts, 'database')
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||||
|
max_overflow=None, pool_timeout=None):
|
||||||
|
"""Set defaults for configuration variables."""
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
connection=sql_connection,
|
||||||
|
sqlite_db=sqlite_db)
|
||||||
|
# Update the QueuePool defaults
|
||||||
|
if max_pool_size is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_pool_size=max_pool_size)
|
||||||
|
if max_overflow is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_overflow=max_overflow)
|
||||||
|
if pool_timeout is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
pool_timeout=pool_timeout)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
"""Returns a list of oslo.config options available in the library.
|
||||||
|
|
||||||
|
The returned list includes all oslo.config options which may be registered
|
||||||
|
at runtime by the library.
|
||||||
|
|
||||||
|
Each element of the list is a tuple. The first element is the name of the
|
||||||
|
group under which the list of elements in the second element will be
|
||||||
|
registered. A group name of None corresponds to the [DEFAULT] group in
|
||||||
|
config files.
|
||||||
|
|
||||||
|
The purpose of this is to allow tools like the Oslo sample config file
|
||||||
|
generator to discover the options exposed to users by this library.
|
||||||
|
|
||||||
|
:returns: a list of (group_name, opts) tuples
|
||||||
|
"""
|
||||||
|
return [('database', copy.deepcopy(database_opts))]
|
@ -1,16 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -36,14 +36,23 @@
|
|||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from migrate.changeset import ansisql
|
from migrate.changeset import ansisql
|
||||||
from migrate.changeset.databases import sqlite
|
from migrate.changeset.databases import sqlite
|
||||||
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
|
import sqlalchemy
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.db import exception
|
||||||
|
from muranoapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
def _get_unique_constraints(self, table):
|
def _get_unique_constraints(self, table):
|
||||||
"""Retrieve information about existing unique constraints of the table
|
"""Retrieve information about existing unique constraints of the table
|
||||||
@ -157,3 +166,103 @@ def patch_migrate():
|
|||||||
_visit_migrate_unique_constraint
|
_visit_migrate_unique_constraint
|
||||||
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
||||||
sqlite.SQLiteConstraintGenerator)
|
sqlite.SQLiteConstraintGenerator)
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(engine, abs_path, version=None, init_version=0):
|
||||||
|
"""Upgrade or downgrade a database.
|
||||||
|
|
||||||
|
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository.
|
||||||
|
:param version: Database will upgrade/downgrade until this version.
|
||||||
|
If None - database will update to the latest
|
||||||
|
available version.
|
||||||
|
:param init_version: Initial database version
|
||||||
|
"""
|
||||||
|
if version is not None:
|
||||||
|
try:
|
||||||
|
version = int(version)
|
||||||
|
except ValueError:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_("version should be an integer"))
|
||||||
|
|
||||||
|
current_version = db_version(engine, abs_path, init_version)
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
_db_schema_sanity_check(engine)
|
||||||
|
if version is None or version > current_version:
|
||||||
|
return versioning_api.upgrade(engine, repository, version)
|
||||||
|
else:
|
||||||
|
return versioning_api.downgrade(engine, repository,
|
||||||
|
version)
|
||||||
|
|
||||||
|
|
||||||
|
def _db_schema_sanity_check(engine):
|
||||||
|
"""Ensure all database tables were created with required parameters.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
if engine.name == 'mysql':
|
||||||
|
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||||
|
'from information_schema.TABLES '
|
||||||
|
'where TABLE_SCHEMA=%s and '
|
||||||
|
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
||||||
|
|
||||||
|
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
|
||||||
|
engine.url.database)]
|
||||||
|
if len(table_names) > 0:
|
||||||
|
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||||
|
'please make sure all tables are CHARSET=utf8'
|
||||||
|
) % ','.join(table_names))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version(engine, abs_path, init_version):
|
||||||
|
"""Show the current version of the repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
try:
|
||||||
|
return versioning_api.db_version(engine, repository)
|
||||||
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
|
meta = sqlalchemy.MetaData()
|
||||||
|
meta.reflect(bind=engine)
|
||||||
|
tables = meta.tables
|
||||||
|
if len(tables) == 0 or 'alembic_version' in tables:
|
||||||
|
db_version_control(engine, abs_path, version=init_version)
|
||||||
|
return versioning_api.db_version(engine, repository)
|
||||||
|
else:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_(
|
||||||
|
"The database is not under version control, but has "
|
||||||
|
"tables. Please stamp the current version of the schema "
|
||||||
|
"manually."))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version_control(engine, abs_path, version=None):
|
||||||
|
"""Mark a database as under this repository's version control.
|
||||||
|
|
||||||
|
Once a database is under version control, schema changes should
|
||||||
|
only be done via change scripts in this repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
versioning_api.version_control(engine, repository, version)
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
def _find_migrate_repo(abs_path):
|
||||||
|
"""Get the project's change script repository
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
"""
|
||||||
|
if not os.path.exists(abs_path):
|
||||||
|
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||||
|
return Repository(abs_path)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@ -28,7 +26,6 @@ from sqlalchemy import Column, Integer
|
|||||||
from sqlalchemy import DateTime
|
from sqlalchemy import DateTime
|
||||||
from sqlalchemy.orm import object_mapper
|
from sqlalchemy.orm import object_mapper
|
||||||
|
|
||||||
from muranoapi.openstack.common.db.sqlalchemy import session as sa
|
|
||||||
from muranoapi.openstack.common import timeutils
|
from muranoapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
@ -36,18 +33,17 @@ class ModelBase(object):
|
|||||||
"""Base class for models."""
|
"""Base class for models."""
|
||||||
__table_initialized__ = False
|
__table_initialized__ = False
|
||||||
|
|
||||||
def save(self, session=None):
|
def save(self, session):
|
||||||
"""Save this object."""
|
"""Save this object."""
|
||||||
if not session:
|
|
||||||
session = sa.get_session()
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
# sesssion.add(self)
|
# session.add(self)
|
||||||
# session.flush()
|
# session.flush()
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
# But there is a bug in sqlalchemy and eventlet that
|
||||||
# raises NoneType exception if there is no running
|
# raises NoneType exception if there is no running
|
||||||
# transaction and rollback is called. As long as
|
# transaction and rollback is called. As long as
|
||||||
# sqlalchemy has this bug we have to create transaction
|
# sqlalchemy has this bug we have to create transaction
|
||||||
# explicity.
|
# explicitly.
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
session.add(self)
|
session.add(self)
|
||||||
session.flush()
|
session.flush()
|
||||||
@ -61,13 +57,24 @@ class ModelBase(object):
|
|||||||
def get(self, key, default=None):
|
def get(self, key, default=None):
|
||||||
return getattr(self, key, default)
|
return getattr(self, key, default)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _extra_keys(self):
|
||||||
|
"""Specifies custom fields
|
||||||
|
|
||||||
|
Subclasses can override this property to return a list
|
||||||
|
of custom fields that should be included in their dict
|
||||||
|
representation.
|
||||||
|
|
||||||
|
For reference check tests/db/sqlalchemy/test_models.py
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
columns = dict(object_mapper(self).columns).keys()
|
columns = dict(object_mapper(self).columns).keys()
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
# property for an Instance.
|
# property for an Instance.
|
||||||
if hasattr(self, '_extra_keys'):
|
columns.extend(self._extra_keys)
|
||||||
columns.extend(self._extra_keys())
|
|
||||||
self._i = iter(columns)
|
self._i = iter(columns)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -89,19 +96,19 @@ class ModelBase(object):
|
|||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||||
if not k[0] == '_'])
|
if not k[0] == '_'])
|
||||||
local.update(joined)
|
local.update(joined)
|
||||||
return local.iteritems()
|
return six.iteritems(local)
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
class TimestampMixin(object):
|
||||||
created_at = Column(DateTime, default=timeutils.utcnow)
|
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
||||||
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
||||||
|
|
||||||
|
|
||||||
class SoftDeleteMixin(object):
|
class SoftDeleteMixin(object):
|
||||||
deleted_at = Column(DateTime)
|
deleted_at = Column(DateTime)
|
||||||
deleted = Column(Integer, default=0)
|
deleted = Column(Integer, default=0)
|
||||||
|
|
||||||
def soft_delete(self, session=None):
|
def soft_delete(self, session):
|
||||||
"""Mark this object as deleted."""
|
"""Mark this object as deleted."""
|
||||||
self.deleted = self.id
|
self.deleted = self.id
|
||||||
self.deleted_at = timeutils.utcnow()
|
self.deleted_at = timeutils.utcnow()
|
||||||
|
187
muranoapi/openstack/common/db/sqlalchemy/provision.py
Normal file
187
muranoapi/openstack/common/db/sqlalchemy/provision.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
# Copyright 2013 Mirantis.inc
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provision test environment for specific DB backends"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.db import exception as exc
|
||||||
|
|
||||||
|
|
||||||
|
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_credentials(*names):
|
||||||
|
"""Generate credentials."""
|
||||||
|
auth_dict = {}
|
||||||
|
for name in names:
|
||||||
|
val = ''.join(random.choice(string.ascii_lowercase)
|
||||||
|
for i in moves.range(10))
|
||||||
|
auth_dict[name] = val
|
||||||
|
return auth_dict
|
||||||
|
|
||||||
|
|
||||||
|
def _get_engine(uri=SQL_CONNECTION):
|
||||||
|
"""Engine creation
|
||||||
|
|
||||||
|
By default the uri is SQL_CONNECTION which is admin credentials.
|
||||||
|
Call the function without arguments to get admin connection. Admin
|
||||||
|
connection required to create temporary user and database for each
|
||||||
|
particular test. Otherwise use existing connection to recreate connection
|
||||||
|
to the temporary database.
|
||||||
|
"""
|
||||||
|
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
||||||
|
|
||||||
|
|
||||||
|
def _execute_sql(engine, sql, driver):
|
||||||
|
"""Initialize connection, execute sql query and close it."""
|
||||||
|
try:
|
||||||
|
with engine.connect() as conn:
|
||||||
|
if driver == 'postgresql':
|
||||||
|
conn.connection.set_isolation_level(0)
|
||||||
|
for s in sql:
|
||||||
|
conn.execute(s)
|
||||||
|
except sqlalchemy.exc.OperationalError:
|
||||||
|
msg = ('%s does not match database admin '
|
||||||
|
'credentials or database does not exist.')
|
||||||
|
raise exc.DBConnectionError(msg % SQL_CONNECTION)
|
||||||
|
|
||||||
|
|
||||||
|
def create_database(engine):
|
||||||
|
"""Provide temporary user and database for each particular test."""
|
||||||
|
driver = engine.name
|
||||||
|
|
||||||
|
auth = _gen_credentials('database', 'user', 'passwd')
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"grant all on %(database)s.* to '%(user)s'@'localhost'"
|
||||||
|
" identified by '%(passwd)s';",
|
||||||
|
"create database %(database)s;",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
"create user %(user)s with password '%(passwd)s';",
|
||||||
|
"create database %(database)s owner %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
return 'sqlite:////tmp/%s' % auth['database']
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(engine, sql_query, driver)
|
||||||
|
|
||||||
|
params = auth.copy()
|
||||||
|
params['backend'] = driver
|
||||||
|
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||||
|
|
||||||
|
|
||||||
|
def drop_database(engine, current_uri):
|
||||||
|
"""Drop temporary database and user after each particular test."""
|
||||||
|
engine = _get_engine(current_uri)
|
||||||
|
admin_engine = _get_engine()
|
||||||
|
driver = engine.name
|
||||||
|
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
try:
|
||||||
|
os.remove(auth['database'])
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user '%(user)s'@'localhost';",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(admin_engine, sql_query, driver)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Controller to handle commands
|
||||||
|
|
||||||
|
::create: Create test user and database with random names.
|
||||||
|
::drop: Drop user and database created by previous command.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Controller to handle database creation and dropping'
|
||||||
|
' commands.',
|
||||||
|
epilog='Under normal circumstances is not used directly.'
|
||||||
|
' Used in .testr.conf to automate test database creation'
|
||||||
|
' and dropping processes.')
|
||||||
|
subparsers = parser.add_subparsers(
|
||||||
|
help='Subcommands to manipulate temporary test databases.')
|
||||||
|
|
||||||
|
create = subparsers.add_parser(
|
||||||
|
'create',
|
||||||
|
help='Create temporary test '
|
||||||
|
'databases and users.')
|
||||||
|
create.set_defaults(which='create')
|
||||||
|
create.add_argument(
|
||||||
|
'instances_count',
|
||||||
|
type=int,
|
||||||
|
help='Number of databases to create.')
|
||||||
|
|
||||||
|
drop = subparsers.add_parser(
|
||||||
|
'drop',
|
||||||
|
help='Drop temporary test databases and users.')
|
||||||
|
drop.set_defaults(which='drop')
|
||||||
|
drop.add_argument(
|
||||||
|
'instances',
|
||||||
|
nargs='+',
|
||||||
|
help='List of databases uri to be dropped.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
engine = _get_engine()
|
||||||
|
which = args.which
|
||||||
|
|
||||||
|
if which == "create":
|
||||||
|
for i in range(int(args.instances_count)):
|
||||||
|
print(create_database(engine))
|
||||||
|
elif which == "drop":
|
||||||
|
for db in args.instances:
|
||||||
|
drop_database(engine, db)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
File diff suppressed because it is too large
Load Diff
149
muranoapi/openstack/common/db/sqlalchemy/test_base.py
Normal file
149
muranoapi/openstack/common/db/sqlalchemy/test_base.py
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import six
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.db.sqlalchemy import session
|
||||||
|
from muranoapi.openstack.common.db.sqlalchemy import utils
|
||||||
|
from muranoapi.openstack.common import test
|
||||||
|
|
||||||
|
|
||||||
|
class DbFixture(fixtures.Fixture):
|
||||||
|
"""Basic database fixture.
|
||||||
|
|
||||||
|
Allows to run tests on various db backends, such as SQLite, MySQL and
|
||||||
|
PostgreSQL. By default use sqlite backend. To override default backend
|
||||||
|
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
|
||||||
|
credentials for specific backend.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_uri(self):
|
||||||
|
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
def __init__(self, test):
|
||||||
|
super(DbFixture, self).__init__()
|
||||||
|
|
||||||
|
self.test = test
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(DbFixture, self).setUp()
|
||||||
|
|
||||||
|
self.test.engine = session.create_engine(self._get_uri())
|
||||||
|
self.test.sessionmaker = session.get_maker(self.test.engine)
|
||||||
|
self.addCleanup(self.test.engine.dispose)
|
||||||
|
|
||||||
|
|
||||||
|
class DbTestCase(test.BaseTestCase):
|
||||||
|
"""Base class for testing of DB code.
|
||||||
|
|
||||||
|
Using `DbFixture`. Intended to be the main database test case to use all
|
||||||
|
the tests on a given backend with user defined uri. Backend specific
|
||||||
|
tests should be decorated with `backend_specific` decorator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FIXTURE = DbFixture
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(DbTestCase, self).setUp()
|
||||||
|
self.useFixture(self.FIXTURE(self))
|
||||||
|
|
||||||
|
|
||||||
|
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
||||||
|
|
||||||
|
|
||||||
|
def backend_specific(*dialects):
|
||||||
|
"""Decorator to skip backend specific tests on inappropriate engines.
|
||||||
|
|
||||||
|
::dialects: list of dialects names under which the test will be launched.
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def ins_wrap(self):
|
||||||
|
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
||||||
|
raise ValueError(
|
||||||
|
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
||||||
|
if self.engine.name not in dialects:
|
||||||
|
msg = ('The test "%s" can be run '
|
||||||
|
'only on %s. Current engine is %s.')
|
||||||
|
args = (f.__name__, ' '.join(dialects), self.engine.name)
|
||||||
|
self.skip(msg % args)
|
||||||
|
else:
|
||||||
|
return f(self)
|
||||||
|
return ins_wrap
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class OpportunisticFixture(DbFixture):
|
||||||
|
"""Base fixture to use default CI databases.
|
||||||
|
|
||||||
|
The databases exist in OpenStack CI infrastructure. But for the
|
||||||
|
correct functioning in local environment the databases must be
|
||||||
|
created manually.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DRIVER = abc.abstractproperty(lambda: None)
|
||||||
|
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
|
||||||
|
|
||||||
|
def _get_uri(self):
|
||||||
|
return utils.get_connect_string(backend=self.DRIVER,
|
||||||
|
user=self.USERNAME,
|
||||||
|
passwd=self.PASSWORD,
|
||||||
|
database=self.DBNAME)
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class OpportunisticTestCase(DbTestCase):
|
||||||
|
"""Base test case to use default CI databases.
|
||||||
|
|
||||||
|
The subclasses of the test case are running only when openstack_citest
|
||||||
|
database is available otherwise a tests will be skipped.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FIXTURE = abc.abstractproperty(lambda: None)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
credentials = {
|
||||||
|
'backend': self.FIXTURE.DRIVER,
|
||||||
|
'user': self.FIXTURE.USERNAME,
|
||||||
|
'passwd': self.FIXTURE.PASSWORD,
|
||||||
|
'database': self.FIXTURE.DBNAME}
|
||||||
|
|
||||||
|
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
|
||||||
|
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
|
||||||
|
return self.skip(msg)
|
||||||
|
|
||||||
|
super(OpportunisticTestCase, self).setUp()
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLOpportunisticFixture(OpportunisticFixture):
|
||||||
|
DRIVER = 'mysql'
|
||||||
|
|
||||||
|
|
||||||
|
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
|
||||||
|
DRIVER = 'postgresql'
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLOpportunisticTestCase(OpportunisticTestCase):
|
||||||
|
FIXTURE = MySQLOpportunisticFixture
|
||||||
|
|
||||||
|
|
||||||
|
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
|
||||||
|
FIXTURE = PostgreSQLOpportunisticFixture
|
269
muranoapi/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
269
muranoapi/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
# Copyright 2010-2011 OpenStack Foundation
|
||||||
|
# Copyright 2012-2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import lockfile
|
||||||
|
from six import moves
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
import sqlalchemy
|
||||||
|
import sqlalchemy.exc
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.db.sqlalchemy import utils
|
||||||
|
from muranoapi.openstack.common.gettextutils import _LE
|
||||||
|
from muranoapi.openstack.common import test
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _have_mysql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_MYSQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='mysql',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _have_postgresql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='postgres',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _set_db_lock(lock_path=None, lock_prefix=None):
|
||||||
|
def decorator(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
path = lock_path or os.environ.get("MURANOAPI_LOCK_PATH")
|
||||||
|
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||||
|
with lock:
|
||||||
|
LOG.debug('Got lock "%s"' % f.__name__)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug('Lock released "%s"' % f.__name__)
|
||||||
|
return wrapper
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
class BaseMigrationTestCase(test.BaseTestCase):
|
||||||
|
"""Base class fort testing of migration utils."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
|
'test_migrations.conf')
|
||||||
|
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
||||||
|
# to override the location of the config file for migration testing
|
||||||
|
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
||||||
|
self.DEFAULT_CONFIG_FILE)
|
||||||
|
self.test_databases = {}
|
||||||
|
self.migration_api = None
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(BaseMigrationTestCase, self).setUp()
|
||||||
|
|
||||||
|
# Load test databases from the config file. Only do this
|
||||||
|
# once. No need to re-run this on each test...
|
||||||
|
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||||
|
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||||
|
cp = moves.configparser.RawConfigParser()
|
||||||
|
try:
|
||||||
|
cp.read(self.CONFIG_FILE_PATH)
|
||||||
|
defaults = cp.defaults()
|
||||||
|
for key, value in defaults.items():
|
||||||
|
self.test_databases[key] = value
|
||||||
|
except moves.configparser.ParsingError as e:
|
||||||
|
self.fail("Failed to read test_migrations.conf config "
|
||||||
|
"file. Got error: %s" % e)
|
||||||
|
else:
|
||||||
|
self.fail("Failed to find test_migrations.conf config "
|
||||||
|
"file.")
|
||||||
|
|
||||||
|
self.engines = {}
|
||||||
|
for key, value in self.test_databases.items():
|
||||||
|
self.engines[key] = sqlalchemy.create_engine(value)
|
||||||
|
|
||||||
|
# We start each test case with a completely blank slate.
|
||||||
|
self._reset_databases()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
# We destroy the test data store between each test case,
|
||||||
|
# and recreate it, which ensures that we have no side-effects
|
||||||
|
# from the tests
|
||||||
|
self._reset_databases()
|
||||||
|
super(BaseMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def execute_cmd(self, cmd=None):
|
||||||
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
output = process.communicate()[0]
|
||||||
|
LOG.debug(output)
|
||||||
|
self.assertEqual(0, process.returncode,
|
||||||
|
"Failed to run: %s\n%s" % (cmd, output))
|
||||||
|
|
||||||
|
def _reset_pg(self, conn_pieces):
|
||||||
|
(user,
|
||||||
|
password,
|
||||||
|
database,
|
||||||
|
host) = utils.get_db_connection_info(conn_pieces)
|
||||||
|
os.environ['PGPASSWORD'] = password
|
||||||
|
os.environ['PGUSER'] = user
|
||||||
|
# note(boris-42): We must create and drop database, we can't
|
||||||
|
# drop database which we have connected to, so for such
|
||||||
|
# operations there is a special database template1.
|
||||||
|
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||||
|
" '%(sql)s' -d template1")
|
||||||
|
|
||||||
|
sql = ("drop database if exists %s;") % database
|
||||||
|
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(droptable)
|
||||||
|
|
||||||
|
sql = ("create database %s;") % database
|
||||||
|
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(createtable)
|
||||||
|
|
||||||
|
os.unsetenv('PGPASSWORD')
|
||||||
|
os.unsetenv('PGUSER')
|
||||||
|
|
||||||
|
@_set_db_lock(lock_prefix='migration_tests-')
|
||||||
|
def _reset_databases(self):
|
||||||
|
for key, engine in self.engines.items():
|
||||||
|
conn_string = self.test_databases[key]
|
||||||
|
conn_pieces = parse.urlparse(conn_string)
|
||||||
|
engine.dispose()
|
||||||
|
if conn_string.startswith('sqlite'):
|
||||||
|
# We can just delete the SQLite database, which is
|
||||||
|
# the easiest and cleanest solution
|
||||||
|
db_path = conn_pieces.path.strip('/')
|
||||||
|
if os.path.exists(db_path):
|
||||||
|
os.unlink(db_path)
|
||||||
|
# No need to recreate the SQLite DB. SQLite will
|
||||||
|
# create it for us if it's not there...
|
||||||
|
elif conn_string.startswith('mysql'):
|
||||||
|
# We can execute the MySQL client to destroy and re-create
|
||||||
|
# the MYSQL database, which is easier and less error-prone
|
||||||
|
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||||
|
(user, password, database, host) = \
|
||||||
|
utils.get_db_connection_info(conn_pieces)
|
||||||
|
sql = ("drop database if exists %(db)s; "
|
||||||
|
"create database %(db)s;") % {'db': database}
|
||||||
|
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
||||||
|
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
||||||
|
'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(cmd)
|
||||||
|
elif conn_string.startswith('postgresql'):
|
||||||
|
self._reset_pg(conn_pieces)
|
||||||
|
|
||||||
|
|
||||||
|
class WalkVersionsMixin(object):
|
||||||
|
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
||||||
|
# Determine latest version script from the repo, then
|
||||||
|
# upgrade from 1 through to the latest, with no data
|
||||||
|
# in the databases. This just checks that the schema itself
|
||||||
|
# upgrades successfully.
|
||||||
|
|
||||||
|
# Place the database under version control
|
||||||
|
self.migration_api.version_control(engine, self.REPOSITORY,
|
||||||
|
self.INIT_VERSION)
|
||||||
|
self.assertEqual(self.INIT_VERSION,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
|
||||||
|
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
||||||
|
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
# upgrade -> downgrade -> upgrade
|
||||||
|
self._migrate_up(engine, version, with_data=True)
|
||||||
|
if snake_walk:
|
||||||
|
downgraded = self._migrate_down(
|
||||||
|
engine, version - 1, with_data=True)
|
||||||
|
if downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
|
||||||
|
if downgrade:
|
||||||
|
# Now walk it back down to 0 from the latest, testing
|
||||||
|
# the downgrade paths.
|
||||||
|
for version in reversed(versions):
|
||||||
|
# downgrade -> upgrade -> downgrade
|
||||||
|
downgraded = self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
if snake_walk and downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
def _migrate_down(self, engine, version, with_data=False):
|
||||||
|
try:
|
||||||
|
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
||||||
|
except NotImplementedError:
|
||||||
|
# NOTE(sirp): some migrations, namely release-level
|
||||||
|
# migrations, don't support a downgrade.
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
||||||
|
|
||||||
|
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||||
|
# version). So if we have any downgrade checks, they need to be run for
|
||||||
|
# the previous (higher numbered) migration.
|
||||||
|
if with_data:
|
||||||
|
post_downgrade = getattr(
|
||||||
|
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||||
|
if post_downgrade:
|
||||||
|
post_downgrade(engine)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _migrate_up(self, engine, version, with_data=False):
|
||||||
|
"""migrate up to a new version of the db.
|
||||||
|
|
||||||
|
We allow for data insertion and post checks at every
|
||||||
|
migration version with special _pre_upgrade_### and
|
||||||
|
_check_### functions in the main test.
|
||||||
|
"""
|
||||||
|
# NOTE(sdague): try block is here because it's impossible to debug
|
||||||
|
# where a failed data migration happens otherwise
|
||||||
|
try:
|
||||||
|
if with_data:
|
||||||
|
data = None
|
||||||
|
pre_upgrade = getattr(
|
||||||
|
self, "_pre_upgrade_%03d" % version, None)
|
||||||
|
if pre_upgrade:
|
||||||
|
data = pre_upgrade(engine)
|
||||||
|
|
||||||
|
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
||||||
|
self.assertEqual(version,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
if with_data:
|
||||||
|
check = getattr(self, "_check_%03d" % version, None)
|
||||||
|
if check:
|
||||||
|
check(engine, data)
|
||||||
|
except Exception:
|
||||||
|
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
|
||||||
|
(version, engine))
|
||||||
|
raise
|
220
muranoapi/openstack/common/db/sqlalchemy/utils.py
Executable file → Normal file
220
muranoapi/openstack/common/db/sqlalchemy/utils.py
Executable file → Normal file
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
# Copyright 2010-2011 OpenStack Foundation.
|
||||||
@ -18,6 +16,10 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy import Boolean
|
from sqlalchemy import Boolean
|
||||||
from sqlalchemy import CheckConstraint
|
from sqlalchemy import CheckConstraint
|
||||||
@ -28,6 +30,7 @@ from sqlalchemy import func
|
|||||||
from sqlalchemy import Index
|
from sqlalchemy import Index
|
||||||
from sqlalchemy import Integer
|
from sqlalchemy import Integer
|
||||||
from sqlalchemy import MetaData
|
from sqlalchemy import MetaData
|
||||||
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
from sqlalchemy.sql.expression import UpdateBase
|
from sqlalchemy.sql.expression import UpdateBase
|
||||||
from sqlalchemy.sql import select
|
from sqlalchemy.sql import select
|
||||||
@ -35,15 +38,23 @@ from sqlalchemy import String
|
|||||||
from sqlalchemy import Table
|
from sqlalchemy import Table
|
||||||
from sqlalchemy.types import NullType
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common import context as request_context
|
||||||
|
from muranoapi.openstack.common.db.sqlalchemy import models
|
||||||
from muranoapi.openstack.common import exception
|
from muranoapi.openstack.common.gettextutils import _, _LI, _LW
|
||||||
from muranoapi.openstack.common import log as logging
|
|
||||||
from muranoapi.openstack.common import timeutils
|
from muranoapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_db_url(url):
|
||||||
|
match = _DBURL_REGEX.match(url)
|
||||||
|
if match:
|
||||||
|
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
class InvalidSortKey(Exception):
|
class InvalidSortKey(Exception):
|
||||||
message = _("Sort key supplied was not valid.")
|
message = _("Sort key supplied was not valid.")
|
||||||
@ -85,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
if 'id' not in sort_keys:
|
if 'id' not in sort_keys:
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
# the actual primary key, rather than assuming its id
|
# the actual primary key, rather than assuming its id
|
||||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
@ -124,9 +135,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
# Build up an array of sort criteria as in the docstring
|
||||||
criteria_list = []
|
criteria_list = []
|
||||||
for i in range(0, len(sort_keys)):
|
for i in range(len(sort_keys)):
|
||||||
crit_attrs = []
|
crit_attrs = []
|
||||||
for j in range(0, i):
|
for j in range(i):
|
||||||
model_attr = getattr(model, sort_keys[j])
|
model_attr = getattr(model, sort_keys[j])
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
crit_attrs.append((model_attr == marker_values[j]))
|
||||||
|
|
||||||
@ -148,6 +159,94 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _read_deleted_filter(query, db_model, read_deleted):
|
||||||
|
if 'deleted' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `deleted` column in `%s` table. "
|
||||||
|
"Project doesn't use soft-deleted feature.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
default_deleted_value = db_model.__table__.c.deleted.default.arg
|
||||||
|
if read_deleted == 'no':
|
||||||
|
query = query.filter(db_model.deleted == default_deleted_value)
|
||||||
|
elif read_deleted == 'yes':
|
||||||
|
pass # omit the filter to include deleted and active
|
||||||
|
elif read_deleted == 'only':
|
||||||
|
query = query.filter(db_model.deleted != default_deleted_value)
|
||||||
|
else:
|
||||||
|
raise ValueError(_("Unrecognized read_deleted value '%s'")
|
||||||
|
% read_deleted)
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _project_filter(query, db_model, context, project_only):
|
||||||
|
if project_only and 'project_id' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `project_id` column in `%s` table.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
if request_context.is_user_context(context) and project_only:
|
||||||
|
if project_only == 'allow_none':
|
||||||
|
is_none = None
|
||||||
|
query = query.filter(or_(db_model.project_id == context.project_id,
|
||||||
|
db_model.project_id == is_none))
|
||||||
|
else:
|
||||||
|
query = query.filter(db_model.project_id == context.project_id)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def model_query(context, model, session, args=None, project_only=False,
|
||||||
|
read_deleted=None):
|
||||||
|
"""Query helper that accounts for context's `read_deleted` field.
|
||||||
|
|
||||||
|
:param context: context to query under
|
||||||
|
|
||||||
|
:param model: Model to query. Must be a subclass of ModelBase.
|
||||||
|
:type model: models.ModelBase
|
||||||
|
|
||||||
|
:param session: The session to use.
|
||||||
|
:type session: sqlalchemy.orm.session.Session
|
||||||
|
|
||||||
|
:param args: Arguments to query. If None - model is used.
|
||||||
|
:type args: tuple
|
||||||
|
|
||||||
|
:param project_only: If present and context is user-type, then restrict
|
||||||
|
query to match the context's project_id. If set to
|
||||||
|
'allow_none', restriction includes project_id = None.
|
||||||
|
:type project_only: bool
|
||||||
|
|
||||||
|
:param read_deleted: If present, overrides context's read_deleted field.
|
||||||
|
:type read_deleted: bool
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
result = (utils.model_query(context, models.Instance, session=session)
|
||||||
|
.filter_by(uuid=instance_uuid)
|
||||||
|
.all())
|
||||||
|
|
||||||
|
query = utils.model_query(
|
||||||
|
context, Node,
|
||||||
|
session=session,
|
||||||
|
args=(func.count(Node.id), func.sum(Node.ram))
|
||||||
|
).filter_by(project_id=project_id)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not read_deleted:
|
||||||
|
if hasattr(context, 'read_deleted'):
|
||||||
|
# NOTE(viktors): some projects use `read_deleted` attribute in
|
||||||
|
# their contexts instead of `show_deleted`.
|
||||||
|
read_deleted = context.read_deleted
|
||||||
|
else:
|
||||||
|
read_deleted = context.show_deleted
|
||||||
|
|
||||||
|
if not issubclass(model, models.ModelBase):
|
||||||
|
raise TypeError(_("model should be a subclass of ModelBase"))
|
||||||
|
|
||||||
|
query = session.query(model) if not args else session.query(*args)
|
||||||
|
query = _read_deleted_filter(query, model, read_deleted)
|
||||||
|
query = _project_filter(query, model, context, project_only)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
def get_table(engine, name):
|
def get_table(engine, name):
|
||||||
"""Returns an sqlalchemy table dynamically from db.
|
"""Returns an sqlalchemy table dynamically from db.
|
||||||
|
|
||||||
@ -174,6 +273,10 @@ def visit_insert_from_select(element, compiler, **kw):
|
|||||||
compiler.process(element.select))
|
compiler.process(element.select))
|
||||||
|
|
||||||
|
|
||||||
|
class ColumnError(Exception):
|
||||||
|
"""Error raised when no column or an invalid column is found."""
|
||||||
|
|
||||||
|
|
||||||
def _get_not_supported_column(col_name_col_instance, column_name):
|
def _get_not_supported_column(col_name_col_instance, column_name):
|
||||||
try:
|
try:
|
||||||
column = col_name_col_instance[column_name]
|
column = col_name_col_instance[column_name]
|
||||||
@ -181,16 +284,53 @@ def _get_not_supported_column(col_name_col_instance, column_name):
|
|||||||
msg = _("Please specify column %s in col_name_col_instance "
|
msg = _("Please specify column %s in col_name_col_instance "
|
||||||
"param. It is required because column has unsupported "
|
"param. It is required because column has unsupported "
|
||||||
"type by sqlite).")
|
"type by sqlite).")
|
||||||
raise exception.OpenstackException(message=msg % column_name)
|
raise ColumnError(msg % column_name)
|
||||||
|
|
||||||
if not isinstance(column, Column):
|
if not isinstance(column, Column):
|
||||||
msg = _("col_name_col_instance param has wrong type of "
|
msg = _("col_name_col_instance param has wrong type of "
|
||||||
"column instance for column %s It should be instance "
|
"column instance for column %s It should be instance "
|
||||||
"of sqlalchemy.Column.")
|
"of sqlalchemy.Column.")
|
||||||
raise exception.OpenstackException(message=msg % column_name)
|
raise ColumnError(msg % column_name)
|
||||||
return column
|
return column
|
||||||
|
|
||||||
|
|
||||||
|
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
||||||
|
**col_name_col_instance):
|
||||||
|
"""Drop unique constraint from table.
|
||||||
|
|
||||||
|
This method drops UC from table and works for mysql, postgresql and sqlite.
|
||||||
|
In mysql and postgresql we are able to use "alter table" construction.
|
||||||
|
Sqlalchemy doesn't support some sqlite column types and replaces their
|
||||||
|
type with NullType in metadata. We process these columns and replace
|
||||||
|
NullType with the correct column type.
|
||||||
|
|
||||||
|
:param migrate_engine: sqlalchemy engine
|
||||||
|
:param table_name: name of table that contains uniq constraint.
|
||||||
|
:param uc_name: name of uniq constraint that will be dropped.
|
||||||
|
:param columns: columns that are in uniq constraint.
|
||||||
|
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||||
|
column_instance is instance of Column. These params
|
||||||
|
are required only for columns that have unsupported
|
||||||
|
types by sqlite. For example BigInteger.
|
||||||
|
"""
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
t = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
override_cols = [
|
||||||
|
_get_not_supported_column(col_name_col_instance, col.name)
|
||||||
|
for col in t.columns
|
||||||
|
if isinstance(col.type, NullType)
|
||||||
|
]
|
||||||
|
for col in override_cols:
|
||||||
|
t.columns.replace(col)
|
||||||
|
|
||||||
|
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
||||||
|
uc.drop()
|
||||||
|
|
||||||
|
|
||||||
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
||||||
use_soft_delete, *uc_column_names):
|
use_soft_delete, *uc_column_names):
|
||||||
"""Drop all old rows having the same values for columns in uc_columns.
|
"""Drop all old rows having the same values for columns in uc_columns.
|
||||||
@ -227,8 +367,8 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
|||||||
|
|
||||||
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
||||||
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||||
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
|
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
|
||||||
"%(table)s") % dict(id=row[0], table=table_name))
|
"%(table)s") % dict(id=row[0], table=table_name))
|
||||||
|
|
||||||
if use_soft_delete:
|
if use_soft_delete:
|
||||||
delete_statement = table.update().\
|
delete_statement = table.update().\
|
||||||
@ -248,8 +388,7 @@ def _get_default_deleted_value(table):
|
|||||||
return 0
|
return 0
|
||||||
if isinstance(table.c.id.type, String):
|
if isinstance(table.c.id.type, String):
|
||||||
return ""
|
return ""
|
||||||
raise exception.OpenstackException(
|
raise ColumnError(_("Unsupported id columns type"))
|
||||||
message=_("Unsupported id columns type"))
|
|
||||||
|
|
||||||
|
|
||||||
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
||||||
@ -319,7 +458,7 @@ def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
|||||||
|
|
||||||
constraints = [constraint.copy() for constraint in table.constraints]
|
constraints = [constraint.copy() for constraint in table.constraints]
|
||||||
|
|
||||||
meta = MetaData(bind=migrate_engine)
|
meta = table.metadata
|
||||||
new_table = Table(table_name + "__tmp__", meta,
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
*(columns + constraints))
|
*(columns + constraints))
|
||||||
new_table.create()
|
new_table.create()
|
||||||
@ -448,3 +587,52 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
|||||||
where(new_table.c.deleted == deleted).\
|
where(new_table.c.deleted == deleted).\
|
||||||
values(deleted=default_deleted_value).\
|
values(deleted=default_deleted_value).\
|
||||||
execute()
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def get_connect_string(backend, database, user=None, passwd=None):
|
||||||
|
"""Get database connection
|
||||||
|
|
||||||
|
Try to get a connection with a very specific set of values, if we get
|
||||||
|
these then we'll run the tests, otherwise they are skipped
|
||||||
|
"""
|
||||||
|
args = {'backend': backend,
|
||||||
|
'user': user,
|
||||||
|
'passwd': passwd,
|
||||||
|
'database': database}
|
||||||
|
if backend == 'sqlite':
|
||||||
|
template = '%(backend)s:///%(database)s'
|
||||||
|
else:
|
||||||
|
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
||||||
|
return template % args
|
||||||
|
|
||||||
|
|
||||||
|
def is_backend_avail(backend, database, user=None, passwd=None):
|
||||||
|
try:
|
||||||
|
connect_uri = get_connect_string(backend=backend,
|
||||||
|
database=database,
|
||||||
|
user=user,
|
||||||
|
passwd=passwd)
|
||||||
|
engine = sqlalchemy.create_engine(connect_uri)
|
||||||
|
connection = engine.connect()
|
||||||
|
except Exception:
|
||||||
|
# intentionally catch all to handle exceptions even if we don't
|
||||||
|
# have any backend code loaded.
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
connection.close()
|
||||||
|
engine.dispose()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_db_connection_info(conn_pieces):
|
||||||
|
database = conn_pieces.path.strip('/')
|
||||||
|
loc_pieces = conn_pieces.netloc.split('@')
|
||||||
|
host = loc_pieces[1]
|
||||||
|
|
||||||
|
auth_pieces = loc_pieces[0].split(':')
|
||||||
|
user = auth_pieces[0]
|
||||||
|
password = ""
|
||||||
|
if len(auth_pieces) > 1:
|
||||||
|
password = auth_pieces[1].strip()
|
||||||
|
|
||||||
|
return (user, password, database, host)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -31,7 +29,7 @@ import eventlet.backdoor
|
|||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LI
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
help_for_backdoor_port = (
|
help_for_backdoor_port = (
|
||||||
@ -66,7 +64,7 @@ def _dont_use_this():
|
|||||||
|
|
||||||
|
|
||||||
def _find_objects(t):
|
def _find_objects(t):
|
||||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||||
|
|
||||||
|
|
||||||
def _print_greenthreads():
|
def _print_greenthreads():
|
||||||
@ -104,7 +102,7 @@ def _listen(host, start_port, end_port, listen_func):
|
|||||||
try:
|
try:
|
||||||
return listen_func((host, try_port))
|
return listen_func((host, try_port))
|
||||||
except socket.error as exc:
|
except socket.error as exc:
|
||||||
if (exc.errno != errno.EADDRINUSE or try_port >= end_port):
|
if exc.errno != errno.EADDRINUSE or try_port >= end_port:
|
||||||
raise
|
raise
|
||||||
try_port += 1
|
try_port += 1
|
||||||
|
|
||||||
@ -138,8 +136,10 @@ def initialize_if_enabled():
|
|||||||
# In the case of backdoor port being zero, a port number is assigned by
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
# listen(). In any case, pull the port number out here.
|
# listen(). In any case, pull the port number out here.
|
||||||
port = sock.getsockname()[1]
|
port = sock.getsockname()[1]
|
||||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
LOG.info(
|
||||||
{'port': port, 'pid': os.getpid()})
|
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()}
|
||||||
|
)
|
||||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
locals=backdoor_locals)
|
locals=backdoor_locals)
|
||||||
return port
|
return port
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2012, Red Hat, Inc.
|
# Copyright 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -24,7 +22,9 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
import six
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.gettextutils import _LE
|
||||||
|
|
||||||
|
|
||||||
class save_and_reraise_exception(object):
|
class save_and_reraise_exception(object):
|
||||||
@ -42,13 +42,13 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
In some cases the caller may not want to re-raise the exception, and
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
for those circumstances this context provides a reraise flag that
|
for those circumstances this context provides a reraise flag that
|
||||||
can be used to suppress the exception. For example:
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with save_and_reraise_exception() as ctxt:
|
with save_and_reraise_exception() as ctxt:
|
||||||
decide_if_need_reraise()
|
decide_if_need_reraise()
|
||||||
if not should_be_reraised:
|
if not should_be_reraised:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reraise = True
|
self.reraise = True
|
||||||
@ -59,13 +59,13 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
if exc_type is not None:
|
if exc_type is not None:
|
||||||
logging.error(_('Original exception being dropped: %s'),
|
logging.error(_LE('Original exception being dropped: %s'),
|
||||||
traceback.format_exception(self.type_,
|
traceback.format_exception(self.type_,
|
||||||
self.value,
|
self.value,
|
||||||
self.tb))
|
self.tb))
|
||||||
return False
|
return False
|
||||||
if self.reraise:
|
if self.reraise:
|
||||||
raise self.type_, self.value, self.tb
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
def forever_retry_uncaught_exceptions(infunc):
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
@ -77,7 +77,8 @@ def forever_retry_uncaught_exceptions(infunc):
|
|||||||
try:
|
try:
|
||||||
return infunc(*args, **kwargs)
|
return infunc(*args, **kwargs)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
if exc.message == last_exc_message:
|
this_exc_message = six.u(str(exc))
|
||||||
|
if this_exc_message == last_exc_message:
|
||||||
exc_count += 1
|
exc_count += 1
|
||||||
else:
|
else:
|
||||||
exc_count = 1
|
exc_count = 1
|
||||||
@ -85,12 +86,12 @@ def forever_retry_uncaught_exceptions(infunc):
|
|||||||
# the exception message changes
|
# the exception message changes
|
||||||
cur_time = int(time.time())
|
cur_time = int(time.time())
|
||||||
if (cur_time - last_log_time > 60 or
|
if (cur_time - last_log_time > 60 or
|
||||||
exc.message != last_exc_message):
|
this_exc_message != last_exc_message):
|
||||||
logging.exception(
|
logging.exception(
|
||||||
_('Unexpected exception occurred %d time(s)... '
|
_LE('Unexpected exception occurred %d time(s)... '
|
||||||
'retrying.') % exc_count)
|
'retrying.') % exc_count)
|
||||||
last_log_time = cur_time
|
last_log_time = cur_time
|
||||||
last_exc_message = exc.message
|
last_exc_message = this_exc_message
|
||||||
exc_count = 0
|
exc_count = 0
|
||||||
# This should be a very rare event. In case it isn't, do
|
# This should be a very rare event. In case it isn't, do
|
||||||
# a sleep.
|
# a sleep.
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -15,13 +13,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
from muranoapi.openstack.common import excutils
|
from muranoapi.openstack.common import excutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -61,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
|
|||||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
LOG.debug(_("Reloading cached file %s") % filename)
|
LOG.debug("Reloading cached file %s" % filename)
|
||||||
with open(filename) as fap:
|
with open(filename) as fap:
|
||||||
cache_info['data'] = fap.read()
|
cache_info['data'] = fap.read()
|
||||||
cache_info['mtime'] = mtime
|
cache_info['mtime'] = mtime
|
||||||
@ -69,33 +66,34 @@ def read_cached_file(filename, force_reload=False):
|
|||||||
return (reloaded, cache_info['data'])
|
return (reloaded, cache_info['data'])
|
||||||
|
|
||||||
|
|
||||||
def delete_if_exists(path):
|
def delete_if_exists(path, remove=os.unlink):
|
||||||
"""Delete a file, but ignore file not found error.
|
"""Delete a file, but ignore file not found error.
|
||||||
|
|
||||||
:param path: File to delete
|
:param path: File to delete
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os.unlink(path)
|
remove(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def remove_path_on_error(path):
|
def remove_path_on_error(path, remove=delete_if_exists):
|
||||||
"""Protect code that wants to operate on PATH atomically.
|
"""Protect code that wants to operate on PATH atomically.
|
||||||
Any exception will cause PATH to be removed.
|
Any exception will cause PATH to be removed.
|
||||||
|
|
||||||
:param path: File to work with
|
:param path: File to work with
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
delete_if_exists(path)
|
remove(path)
|
||||||
|
|
||||||
|
|
||||||
def file_open(*args, **kwargs):
|
def file_open(*args, **kwargs):
|
||||||
@ -108,3 +106,30 @@ def file_open(*args, **kwargs):
|
|||||||
state at all (for unit tests)
|
state at all (for unit tests)
|
||||||
"""
|
"""
|
||||||
return file(*args, **kwargs)
|
return file(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
|
||||||
|
"""Create temporary file or use existing file.
|
||||||
|
|
||||||
|
This util is needed for creating temporary file with
|
||||||
|
specified content, suffix and prefix. If path is not None,
|
||||||
|
it will be used for writing content. If the path doesn't
|
||||||
|
exist it'll be created.
|
||||||
|
|
||||||
|
:param content: content for temporary file.
|
||||||
|
:param path: same as parameter 'dir' for mkstemp
|
||||||
|
:param suffix: same as parameter 'suffix' for mkstemp
|
||||||
|
:param prefix: same as parameter 'prefix' for mkstemp
|
||||||
|
|
||||||
|
For example: it can be used in database tests for creating
|
||||||
|
configuration files.
|
||||||
|
"""
|
||||||
|
if path:
|
||||||
|
ensure_tree(path)
|
||||||
|
|
||||||
|
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
|
||||||
|
try:
|
||||||
|
os.write(fd, content)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return path
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -25,23 +23,78 @@ Usual usage in an openstack.common module:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
|
import functools
|
||||||
import gettext
|
import gettext
|
||||||
import logging.handlers
|
import locale
|
||||||
|
from logging import handlers
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import UserString
|
|
||||||
|
|
||||||
|
from babel import localedata
|
||||||
import six
|
import six
|
||||||
|
|
||||||
_localedir = os.environ.get('muranoapi'.upper() + '_LOCALEDIR')
|
_localedir = os.environ.get('muranoapi'.upper() + '_LOCALEDIR')
|
||||||
_t = gettext.translation('muranoapi', localedir=_localedir, fallback=True)
|
_t = gettext.translation('muranoapi', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
# We use separate translation catalogs for each log level, so set up a
|
||||||
|
# mapping between the log level name and the translator. The domain
|
||||||
|
# for the log level is project_name + "-log-" + log_level so messages
|
||||||
|
# for each level end up in their own catalog.
|
||||||
|
_t_log_levels = dict(
|
||||||
|
(level, gettext.translation('muranoapi' + '-log-' + level,
|
||||||
|
localedir=_localedir,
|
||||||
|
fallback=True))
|
||||||
|
for level in ['info', 'warning', 'error', 'critical']
|
||||||
|
)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
def _(msg):
|
def _(msg):
|
||||||
return _t.ugettext(msg)
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='muranoapi')
|
||||||
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
return _t.gettext(msg)
|
||||||
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
def install(domain):
|
def _log_translation(msg, level):
|
||||||
|
"""Build a single translation of a log message
|
||||||
|
"""
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='muranoapi' + '-log-' + level)
|
||||||
|
else:
|
||||||
|
translator = _t_log_levels[level]
|
||||||
|
if six.PY3:
|
||||||
|
return translator.gettext(msg)
|
||||||
|
return translator.ugettext(msg)
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = functools.partial(_log_translation, level='info')
|
||||||
|
_LW = functools.partial(_log_translation, level='warning')
|
||||||
|
_LE = functools.partial(_log_translation, level='error')
|
||||||
|
_LC = functools.partial(_log_translation, level='critical')
|
||||||
|
|
||||||
|
|
||||||
|
def install(domain, lazy=False):
|
||||||
"""Install a _() function using the given translation domain.
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
Given a translation domain, install a _() function using gettext's
|
Given a translation domain, install a _() function using gettext's
|
||||||
@ -51,209 +104,371 @@ def install(domain):
|
|||||||
overriding the default localedir (e.g. /usr/share/locale) using
|
overriding the default localedir (e.g. /usr/share/locale) using
|
||||||
a translation-domain-specific environment variable (e.g.
|
a translation-domain-specific environment variable (e.g.
|
||||||
NOVA_LOCALEDIR).
|
NOVA_LOCALEDIR).
|
||||||
|
|
||||||
|
:param domain: the translation domain
|
||||||
|
:param lazy: indicates whether or not to install the lazy _() function.
|
||||||
|
The lazy _() introduces a way to do deferred translation
|
||||||
|
of messages by installing a _ that builds Message objects,
|
||||||
|
instead of strings, which can then be lazily translated into
|
||||||
|
any available locale.
|
||||||
"""
|
"""
|
||||||
gettext.install(domain,
|
if lazy:
|
||||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
# NOTE(mrodden): Lazy gettext functionality.
|
||||||
unicode=True)
|
#
|
||||||
|
# The following introduces a deferred way to do translations on
|
||||||
|
# messages in OpenStack. We override the standard _() function
|
||||||
|
# and % (format string) operation to build Message objects that can
|
||||||
|
# later be translated when we have more information.
|
||||||
|
def _lazy_gettext(msg):
|
||||||
|
"""Create and return a Message object.
|
||||||
|
|
||||||
|
Lazy gettext function for a given domain, it is a factory method
|
||||||
|
for a project/module to get a lazy gettext function for its own
|
||||||
|
translation domain (i.e. nova, glance, cinder, etc.)
|
||||||
|
|
||||||
"""
|
Message encapsulates a string so that we can translate
|
||||||
Lazy gettext functionality.
|
it later when needed.
|
||||||
|
"""
|
||||||
|
return Message(msg, domain=domain)
|
||||||
|
|
||||||
The following is an attempt to introduce a deferred way
|
from six import moves
|
||||||
to do translations on messages in OpenStack. We attempt to
|
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||||
override the standard _() function and % (format string) operation
|
else:
|
||||||
to build Message objects that can later be translated when we have
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
more information. Also included is an example LogHandler that
|
if six.PY3:
|
||||||
translates Messages to an associated locale, effectively allowing
|
gettext.install(domain,
|
||||||
many logs, each with their own locale.
|
localedir=os.environ.get(localedir))
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_lazy_gettext(domain):
|
|
||||||
"""Assemble and return a lazy gettext function for a given domain.
|
|
||||||
|
|
||||||
Factory method for a project/module to get a lazy gettext function
|
|
||||||
for its own translation domain (i.e. nova, glance, cinder, etc.)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _lazy_gettext(msg):
|
|
||||||
"""Create and return a Message object.
|
|
||||||
|
|
||||||
Message encapsulates a string so that we can translate it later when
|
|
||||||
needed.
|
|
||||||
"""
|
|
||||||
return Message(msg, domain)
|
|
||||||
|
|
||||||
return _lazy_gettext
|
|
||||||
|
|
||||||
|
|
||||||
class Message(UserString.UserString, object):
|
|
||||||
"""Class used to encapsulate translatable messages."""
|
|
||||||
def __init__(self, msg, domain):
|
|
||||||
# _msg is the gettext msgid and should never change
|
|
||||||
self._msg = msg
|
|
||||||
self._left_extra_msg = ''
|
|
||||||
self._right_extra_msg = ''
|
|
||||||
self.params = None
|
|
||||||
self.locale = None
|
|
||||||
self.domain = domain
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data(self):
|
|
||||||
# NOTE(mrodden): this should always resolve to a unicode string
|
|
||||||
# that best represents the state of the message currently
|
|
||||||
|
|
||||||
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
|
||||||
if self.locale:
|
|
||||||
lang = gettext.translation(self.domain,
|
|
||||||
localedir=localedir,
|
|
||||||
languages=[self.locale],
|
|
||||||
fallback=True)
|
|
||||||
else:
|
else:
|
||||||
# use system locale for translations
|
gettext.install(domain,
|
||||||
lang = gettext.translation(self.domain,
|
localedir=os.environ.get(localedir),
|
||||||
localedir=localedir,
|
unicode=True)
|
||||||
fallback=True)
|
|
||||||
|
|
||||||
full_msg = (self._left_extra_msg +
|
|
||||||
lang.ugettext(self._msg) +
|
|
||||||
self._right_extra_msg)
|
|
||||||
|
|
||||||
if self.params is not None:
|
class Message(six.text_type):
|
||||||
full_msg = full_msg % self.params
|
"""A Message object is a unicode object that can be translated.
|
||||||
|
|
||||||
return six.text_type(full_msg)
|
Translation of Message is done explicitly using the translate() method.
|
||||||
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
|
and can be treated as such.
|
||||||
|
"""
|
||||||
|
|
||||||
def _save_dictionary_parameter(self, dict_param):
|
def __new__(cls, msgid, msgtext=None, params=None,
|
||||||
full_msg = self.data
|
domain='muranoapi', *args):
|
||||||
# look for %(blah) fields in string;
|
"""Create a new Message object.
|
||||||
# ignore %% and deal with the
|
|
||||||
# case where % is first character on the line
|
|
||||||
keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
|
|
||||||
|
|
||||||
# if we don't find any %(blah) blocks but have a %s
|
In order for translation to work gettext requires a message ID, this
|
||||||
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
|
msgid will be used as the base unicode text. It is also possible
|
||||||
# apparently the full dictionary is the parameter
|
for the msgid and the base unicode text to be different by passing
|
||||||
params = copy.deepcopy(dict_param)
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
|
if six.PY3:
|
||||||
|
translator = lang.gettext
|
||||||
|
else:
|
||||||
|
translator = lang.ugettext
|
||||||
|
|
||||||
|
translated_message = translator(msgid)
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
params = self._sanitize_mod_params(other)
|
||||||
|
unicode_mod = super(Message, self).__mod__(params)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=params,
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
|
- Add support for modding 'None' so translation supports it
|
||||||
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
params = self._trim_dictionary_parameters(other)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _trim_dictionary_parameters(self, dict_param):
|
||||||
|
"""Return a dict that only has matching entries in the msgid."""
|
||||||
|
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
||||||
|
# to avoid carrying a lot of unnecessary weight around in the message
|
||||||
|
# object, for example if someone passes in Message() % locals() but
|
||||||
|
# only some params are used, and additionally we prevent errors for
|
||||||
|
# non-deepcopyable objects by unicoding() them.
|
||||||
|
|
||||||
|
# Look for %(param) keys in msgid;
|
||||||
|
# Skip %% and deal with the case where % is first character on the line
|
||||||
|
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
||||||
|
|
||||||
|
# If we don't find any %(param) keys but have a %s
|
||||||
|
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
||||||
|
# Apparently the full dictionary is the parameter
|
||||||
|
params = self._copy_param(dict_param)
|
||||||
else:
|
else:
|
||||||
params = {}
|
params = {}
|
||||||
|
# Save our existing parameters as defaults to protect
|
||||||
|
# ourselves from losing values if we are called through an
|
||||||
|
# (erroneous) chain that builds a valid Message with
|
||||||
|
# arguments, and then does something like "msg % kwds"
|
||||||
|
# where kwds is an empty dictionary.
|
||||||
|
src = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
src.update(self.params)
|
||||||
|
src.update(dict_param)
|
||||||
for key in keys:
|
for key in keys:
|
||||||
try:
|
params[key] = self._copy_param(src[key])
|
||||||
params[key] = copy.deepcopy(dict_param[key])
|
|
||||||
except TypeError:
|
|
||||||
# cast uncopyable thing to unicode string
|
|
||||||
params[key] = unicode(dict_param[key])
|
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _save_parameters(self, other):
|
def _copy_param(self, param):
|
||||||
# we check for None later to see if
|
try:
|
||||||
# we actually have parameters to inject,
|
return copy.deepcopy(param)
|
||||||
# so encapsulate if our parameter is actually None
|
except TypeError:
|
||||||
if other is None:
|
# Fallback to casting to unicode this will handle the
|
||||||
self.params = (other, )
|
# python code-like objects that can't be deep-copied
|
||||||
elif isinstance(other, dict):
|
return six.text_type(param)
|
||||||
self.params = self._save_dictionary_parameter(other)
|
|
||||||
else:
|
|
||||||
# fallback to casting to unicode,
|
|
||||||
# this will handle the problematic python code-like
|
|
||||||
# objects that cannot be deep-copied
|
|
||||||
try:
|
|
||||||
self.params = copy.deepcopy(other)
|
|
||||||
except TypeError:
|
|
||||||
self.params = unicode(other)
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
# overrides to be more string-like
|
|
||||||
def __unicode__(self):
|
|
||||||
return self.data
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.data.encode('utf-8')
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
|
||||||
'domain', 'params', 'locale']
|
|
||||||
new_dict = self.__dict__.fromkeys(to_copy)
|
|
||||||
for attr in to_copy:
|
|
||||||
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
|
||||||
|
|
||||||
return new_dict
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
for (k, v) in state.items():
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
# operator overloads
|
|
||||||
def __add__(self, other):
|
def __add__(self, other):
|
||||||
copied = copy.deepcopy(self)
|
msg = _('Message objects do not support addition.')
|
||||||
copied._right_extra_msg += other.__str__()
|
raise TypeError(msg)
|
||||||
return copied
|
|
||||||
|
|
||||||
def __radd__(self, other):
|
def __radd__(self, other):
|
||||||
copied = copy.deepcopy(self)
|
return self.__add__(other)
|
||||||
copied._left_extra_msg += other.__str__()
|
|
||||||
return copied
|
|
||||||
|
|
||||||
def __mod__(self, other):
|
def __str__(self):
|
||||||
# do a format string to catch and raise
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
# any possible KeyErrors from missing parameters
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
self.data % other
|
msg = _('Message objects do not support str() because they may '
|
||||||
copied = copy.deepcopy(self)
|
'contain non-ascii characters. '
|
||||||
return copied._save_parameters(other)
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
def __mul__(self, other):
|
|
||||||
return self.data * other
|
|
||||||
|
|
||||||
def __rmul__(self, other):
|
|
||||||
return other * self.data
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return self.data[key]
|
|
||||||
|
|
||||||
def __getslice__(self, start, end):
|
|
||||||
return self.data.__getslice__(start, end)
|
|
||||||
|
|
||||||
def __getattribute__(self, name):
|
|
||||||
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
|
||||||
# These override the UserString implementation, since UserString
|
|
||||||
# uses our __class__ attribute to try and build a new message
|
|
||||||
# after running the inner data string through the operation.
|
|
||||||
# At that point, we have lost the gettext message id and can just
|
|
||||||
# safely resolve to a string instead.
|
|
||||||
ops = ['capitalize', 'center', 'decode', 'encode',
|
|
||||||
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
|
||||||
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
|
||||||
if name in ops:
|
|
||||||
return getattr(self.data, name)
|
|
||||||
else:
|
|
||||||
return UserString.UserString.__getattribute__(self, name)
|
|
||||||
|
|
||||||
|
|
||||||
class LocaleHandler(logging.Handler):
|
def get_available_languages(domain):
|
||||||
"""Handler that can have a locale associated to translate Messages.
|
"""Lists the available languages for the given translation domain.
|
||||||
|
|
||||||
A quick example of how to utilize the Message class above.
|
:param domain: the domain to get languages for
|
||||||
LocaleHandler takes a locale and a target logging.Handler object
|
"""
|
||||||
to forward LogRecord objects to after translating the internal Message.
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
find = lambda x: gettext.find(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
languages=[x])
|
||||||
|
|
||||||
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
|
# order matters) since our in-line message strings are en_US
|
||||||
|
language_list = ['en_US']
|
||||||
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
|
getattr(localedata, 'locale_identifiers'))
|
||||||
|
locale_identifiers = list_identifiers()
|
||||||
|
|
||||||
|
for i in locale_identifiers:
|
||||||
|
if find(i) is not None:
|
||||||
|
language_list.append(i)
|
||||||
|
|
||||||
|
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||||
|
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||||
|
# are perfectly legitimate locales:
|
||||||
|
# https://github.com/mitsuhiko/babel/issues/37
|
||||||
|
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||||
|
# they are still not explicitly "listed" by locale_identifiers().
|
||||||
|
# That is why we add the locales here explicitly if necessary so that
|
||||||
|
# they are listed as supported.
|
||||||
|
aliases = {'zh': 'zh_CN',
|
||||||
|
'zh_Hant_HK': 'zh_HK',
|
||||||
|
'zh_Hant': 'zh_TW',
|
||||||
|
'fil': 'tl_PH'}
|
||||||
|
for (locale, alias) in six.iteritems(aliases):
|
||||||
|
if locale in language_list and alias not in language_list:
|
||||||
|
language_list.append(alias)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
|
def translate(obj, desired_locale=None):
|
||||||
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
|
If the object is not translatable it is returned as-is.
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
|
if isinstance(message, Message):
|
||||||
|
# Even after unicoding() we still need to check if we are
|
||||||
|
# running with translatable unicode before translating
|
||||||
|
return message.translate(desired_locale)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_args(args, desired_locale=None):
|
||||||
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
|
This method is used for translating the translatable values in method
|
||||||
|
arguments which include values of tuples or dictionaries.
|
||||||
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, locale, target):
|
def __init__(self, locale=None, target=None):
|
||||||
"""Initialize a LocaleHandler
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
:param locale: locale to use for translating messages
|
:param locale: locale to use for translating messages
|
||||||
:param target: logging.Handler object to forward
|
:param target: logging.Handler object to forward
|
||||||
LogRecord objects to after translation
|
LogRecord objects to after translation
|
||||||
"""
|
"""
|
||||||
logging.Handler.__init__(self)
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
self.locale = locale
|
self.locale = locale
|
||||||
self.target = target
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
if isinstance(record.msg, Message):
|
# We save the message from the original record to restore it
|
||||||
# set the locale and resolve to a string
|
# after translation, so other handlers are not affected by this
|
||||||
record.msg.locale = self.locale
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
self.target.emit(record)
|
self.target.emit(record)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -60,6 +58,13 @@ def import_module(import_str):
|
|||||||
return sys.modules[import_str]
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def import_versioned_module(version, submodule=None):
|
||||||
|
module = 'muranoapi.v%s' % version
|
||||||
|
if submodule:
|
||||||
|
module = '.'.join((module, submodule))
|
||||||
|
return import_module(module)
|
||||||
|
|
||||||
|
|
||||||
def try_import(import_str, default=None):
|
def try_import(import_str, default=None):
|
||||||
"""Try to import a module and if it fails return default."""
|
"""Try to import a module and if it fails return default."""
|
||||||
try:
|
try:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -38,14 +36,15 @@ import functools
|
|||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import types
|
|
||||||
import xmlrpclib
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
import six
|
import six
|
||||||
|
import six.moves.xmlrpc_client as xmlrpclib
|
||||||
|
|
||||||
|
from muranoapi.openstack.common import gettextutils
|
||||||
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import timeutils
|
from muranoapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
inspect.isfunction, inspect.isgeneratorfunction,
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
@ -53,7 +52,8 @@ _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
|||||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
inspect.isabstract]
|
inspect.isabstract]
|
||||||
|
|
||||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
@ -118,7 +118,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
level=level,
|
level=level,
|
||||||
max_depth=max_depth)
|
max_depth=max_depth)
|
||||||
if isinstance(value, dict):
|
if isinstance(value, dict):
|
||||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
elif isinstance(value, (list, tuple)):
|
elif isinstance(value, (list, tuple)):
|
||||||
return [recursive(lv) for lv in value]
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
@ -130,6 +130,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
|
|
||||||
if convert_datetime and isinstance(value, datetime.datetime):
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
return timeutils.strtime(value)
|
return timeutils.strtime(value)
|
||||||
|
elif isinstance(value, gettextutils.Message):
|
||||||
|
return value.data
|
||||||
elif hasattr(value, 'iteritems'):
|
elif hasattr(value, 'iteritems'):
|
||||||
return recursive(dict(value.iteritems()), level=level + 1)
|
return recursive(dict(value.iteritems()), level=level + 1)
|
||||||
elif hasattr(value, '__iter__'):
|
elif hasattr(value, '__iter__'):
|
||||||
@ -138,7 +140,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
# Likely an instance of something. Watch for cycles.
|
# Likely an instance of something. Watch for cycles.
|
||||||
# Ignore class member vars.
|
# Ignore class member vars.
|
||||||
return recursive(value.__dict__, level=level + 1)
|
return recursive(value.__dict__, level=level + 1)
|
||||||
elif isinstance(value, netaddr.IPAddress):
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
return six.text_type(value)
|
return six.text_type(value)
|
||||||
else:
|
else:
|
||||||
if any(test(value) for test in _nasty_type_tests):
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -15,16 +13,15 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Greenthread local storage of variables using weak references"""
|
"""Local storage of variables using weak references"""
|
||||||
|
|
||||||
|
import threading
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
from eventlet import corolocal
|
|
||||||
|
|
||||||
|
class WeakLocal(threading.local):
|
||||||
class WeakLocal(corolocal.local):
|
|
||||||
def __getattribute__(self, attr):
|
def __getattribute__(self, attr):
|
||||||
rval = corolocal.local.__getattribute__(self, attr)
|
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||||
if rval:
|
if rval:
|
||||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||||
# reference, not the value itself. We therefore need to lookup
|
# reference, not the value itself. We therefore need to lookup
|
||||||
@ -34,7 +31,7 @@ class WeakLocal(corolocal.local):
|
|||||||
|
|
||||||
def __setattr__(self, attr, value):
|
def __setattr__(self, attr, value):
|
||||||
value = weakref.ref(value)
|
value = weakref.ref(value)
|
||||||
return corolocal.local.__setattr__(self, attr, value)
|
return super(WeakLocal, self).__setattr__(attr, value)
|
||||||
|
|
||||||
|
|
||||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||||
@ -45,4 +42,4 @@ store = WeakLocal()
|
|||||||
# "strong" store will hold a reference to the object so that it never falls out
|
# "strong" store will hold a reference to the object so that it never falls out
|
||||||
# of scope.
|
# of scope.
|
||||||
weak_store = WeakLocal()
|
weak_store = WeakLocal()
|
||||||
strong_store = corolocal.local
|
strong_store = threading.local()
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -15,20 +13,23 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
|
import fcntl
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
from eventlet import semaphore
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common import fileutils
|
from muranoapi.openstack.common import fileutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from muranoapi.openstack.common import local
|
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -39,6 +40,7 @@ util_opts = [
|
|||||||
cfg.BoolOpt('disable_process_locking', default=False,
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
help='Whether to disable inter-process locks'),
|
help='Whether to disable inter-process locks'),
|
||||||
cfg.StrOpt('lock_path',
|
cfg.StrOpt('lock_path',
|
||||||
|
default=os.environ.get("MURANOAPI_LOCK_PATH"),
|
||||||
help=('Directory to use for lock files.'))
|
help=('Directory to use for lock files.'))
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -51,7 +53,7 @@ def set_defaults(lock_path):
|
|||||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||||
|
|
||||||
|
|
||||||
class _InterProcessLock(object):
|
class _FileLock(object):
|
||||||
"""Lock implementation which allows multiple locks, working around
|
"""Lock implementation which allows multiple locks, working around
|
||||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||||
not require any cleanup. Since the lock is always held on a file
|
not require any cleanup. Since the lock is always held on a file
|
||||||
@ -73,7 +75,13 @@ class _InterProcessLock(object):
|
|||||||
self.lockfile = None
|
self.lockfile = None
|
||||||
self.fname = name
|
self.fname = name
|
||||||
|
|
||||||
def __enter__(self):
|
def acquire(self):
|
||||||
|
basedir = os.path.dirname(self.fname)
|
||||||
|
|
||||||
|
if not os.path.exists(basedir):
|
||||||
|
fileutils.ensure_tree(basedir)
|
||||||
|
LOG.info(_LI('Created lock path: %s'), basedir)
|
||||||
|
|
||||||
self.lockfile = open(self.fname, 'w')
|
self.lockfile = open(self.fname, 'w')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
@ -83,23 +91,41 @@ class _InterProcessLock(object):
|
|||||||
# Also upon reading the MSDN docs for locking(), it seems
|
# Also upon reading the MSDN docs for locking(), it seems
|
||||||
# to have a laughable 10 attempts "blocking" mechanism.
|
# to have a laughable 10 attempts "blocking" mechanism.
|
||||||
self.trylock()
|
self.trylock()
|
||||||
return self
|
LOG.debug('Got file lock "%s"', self.fname)
|
||||||
|
return True
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
# external locks synchronise things like iptables
|
# external locks synchronise things like iptables
|
||||||
# updates - give it some time to prevent busy spinning
|
# updates - give it some time to prevent busy spinning
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
else:
|
else:
|
||||||
raise
|
raise threading.ThreadError(_("Unable to acquire lock on"
|
||||||
|
" `%(filename)s` due to"
|
||||||
|
" %(exception)s") %
|
||||||
|
{
|
||||||
|
'filename': self.fname,
|
||||||
|
'exception': e,
|
||||||
|
})
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
try:
|
try:
|
||||||
self.unlock()
|
self.unlock()
|
||||||
self.lockfile.close()
|
self.lockfile.close()
|
||||||
|
LOG.debug('Released file lock "%s"', self.fname)
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
LOG.exception(_LE("Could not release the acquired lock `%s`"),
|
||||||
self.fname)
|
self.fname)
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
return os.path.exists(self.fname)
|
||||||
|
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -107,7 +133,7 @@ class _InterProcessLock(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class _WindowsLock(_InterProcessLock):
|
class _WindowsLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||||
|
|
||||||
@ -115,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
|
|||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
|
||||||
|
|
||||||
class _PosixLock(_InterProcessLock):
|
class _FcntlLock(_FileLock):
|
||||||
def trylock(self):
|
def trylock(self):
|
||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
|
||||||
@ -123,94 +149,144 @@ class _PosixLock(_InterProcessLock):
|
|||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
class _PosixLock(object):
|
||||||
|
def __init__(self, name):
|
||||||
|
# Hash the name because it's not valid to have POSIX semaphore
|
||||||
|
# names with things like / in them. Then use base64 to encode
|
||||||
|
# the digest() instead taking the hexdigest() because the
|
||||||
|
# result is shorter and most systems can't have shm sempahore
|
||||||
|
# names longer than 31 characters.
|
||||||
|
h = hashlib.sha1()
|
||||||
|
h.update(name.encode('ascii'))
|
||||||
|
self.name = str((b'/' + base64.urlsafe_b64encode(
|
||||||
|
h.digest())).decode('ascii'))
|
||||||
|
|
||||||
|
def acquire(self, timeout=None):
|
||||||
|
self.semaphore = posix_ipc.Semaphore(self.name,
|
||||||
|
flags=posix_ipc.O_CREAT,
|
||||||
|
initial_value=1)
|
||||||
|
self.semaphore.acquire(timeout)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.semaphore.release()
|
||||||
|
self.semaphore.close()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
def exists(self):
|
||||||
|
try:
|
||||||
|
semaphore = posix_ipc.Semaphore(self.name)
|
||||||
|
except posix_ipc.ExistentialError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
semaphore.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
import msvcrt
|
import msvcrt
|
||||||
InterProcessLock = _WindowsLock
|
InterProcessLock = _WindowsLock
|
||||||
|
FileLock = _WindowsLock
|
||||||
else:
|
else:
|
||||||
import fcntl
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import posix_ipc
|
||||||
InterProcessLock = _PosixLock
|
InterProcessLock = _PosixLock
|
||||||
|
FileLock = _FcntlLock
|
||||||
|
|
||||||
_semaphores = weakref.WeakValueDictionary()
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_lock_path(name, lock_file_prefix, lock_path=None):
|
||||||
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
|
# separators
|
||||||
|
name = name.replace(os.sep, '_')
|
||||||
|
if lock_file_prefix:
|
||||||
|
sep = '' if lock_file_prefix.endswith('-') else '-'
|
||||||
|
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
||||||
|
|
||||||
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
|
|
||||||
|
if not local_lock_path:
|
||||||
|
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
|
||||||
|
# unnecessarily raise the RequiredOptError below.
|
||||||
|
if InterProcessLock is not _PosixLock:
|
||||||
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
local_lock_path = 'posixlock:/'
|
||||||
|
|
||||||
|
return os.path.join(local_lock_path, name)
|
||||||
|
|
||||||
|
|
||||||
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
|
LOG.debug('Attempting to grab external lock "%(lock)s"',
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
|
||||||
|
|
||||||
|
# NOTE(bnemec): If an explicit lock_path was passed to us then it
|
||||||
|
# means the caller is relying on file-based locking behavior, so
|
||||||
|
# we can't use posix locks for those calls.
|
||||||
|
if lock_path:
|
||||||
|
return FileLock(lock_file_path)
|
||||||
|
return InterProcessLock(lock_file_path)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_external_lock_file(name, lock_file_prefix=None):
|
||||||
|
"""Remove a external lock file when it's not used anymore
|
||||||
|
This will be helpful when we have a lot of lock files
|
||||||
|
"""
|
||||||
|
with internal_lock(name):
|
||||||
|
lock_file_path = _get_lock_path(name, lock_file_prefix)
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
except OSError:
|
||||||
|
LOG.info(_LI('Failed to remove file %(file)s'),
|
||||||
|
{'file': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
|
def internal_lock(name):
|
||||||
|
with _semaphores_lock:
|
||||||
|
try:
|
||||||
|
sem = _semaphores[name]
|
||||||
|
except KeyError:
|
||||||
|
sem = threading.Semaphore()
|
||||||
|
_semaphores[name] = sem
|
||||||
|
|
||||||
|
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
|
||||||
|
return sem
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Context based lock
|
"""Context based lock
|
||||||
|
|
||||||
This function yields a `semaphore.Semaphore` instance unless external is
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
|
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||||
True, in which case, it'll yield an InterProcessLock instance.
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
lock files on disk with a meaningful prefix.
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
:param external: The external keyword argument denotes whether this lock
|
:param external: The external keyword argument denotes whether this lock
|
||||||
should work across multiple processes. This means that if two different
|
should work across multiple processes. This means that if two different
|
||||||
workers both run a a method decorated with @synchronized('mylock',
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
external=True), only one of them will execute at a time.
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
:param lock_path: The lock_path keyword argument is used to specify a
|
|
||||||
special location for external lock files to live. If nothing is set, then
|
|
||||||
CONF.lock_path is used as a default.
|
|
||||||
"""
|
"""
|
||||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
int_lock = internal_lock(name)
|
||||||
# See http://stackoverflow.com/questions/5390569/dyn
|
with int_lock:
|
||||||
# amically-allocating-and-destroying-mutexes
|
if external and not CONF.disable_process_locking:
|
||||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
ext_lock = external_lock(name, lock_file_prefix, lock_path)
|
||||||
if name not in _semaphores:
|
with ext_lock:
|
||||||
# this check is not racy - we're already holding ref locally
|
yield ext_lock
|
||||||
# so GC won't remove the item and there was no IO switch
|
else:
|
||||||
# (only valid in greenthreads)
|
yield int_lock
|
||||||
_semaphores[name] = sem
|
|
||||||
|
|
||||||
with sem:
|
|
||||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
|
||||||
|
|
||||||
# NOTE(mikal): I know this looks odd
|
|
||||||
if not hasattr(local.strong_store, 'locks_held'):
|
|
||||||
local.strong_store.locks_held = []
|
|
||||||
local.strong_store.locks_held.append(name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if external and not CONF.disable_process_locking:
|
|
||||||
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
|
||||||
{'lock': name})
|
|
||||||
|
|
||||||
# We need a copy of lock_path because it is non-local
|
|
||||||
local_lock_path = lock_path or CONF.lock_path
|
|
||||||
if not local_lock_path:
|
|
||||||
raise cfg.RequiredOptError('lock_path')
|
|
||||||
|
|
||||||
if not os.path.exists(local_lock_path):
|
|
||||||
fileutils.ensure_tree(local_lock_path)
|
|
||||||
LOG.info(_('Created lock path: %s'), local_lock_path)
|
|
||||||
|
|
||||||
def add_prefix(name, prefix):
|
|
||||||
if not prefix:
|
|
||||||
return name
|
|
||||||
sep = '' if prefix.endswith('-') else '-'
|
|
||||||
return '%s%s%s' % (prefix, sep, name)
|
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
|
||||||
# separators
|
|
||||||
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
|
||||||
lock_file_prefix)
|
|
||||||
|
|
||||||
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
lock = InterProcessLock(lock_file_path)
|
|
||||||
with lock as lock:
|
|
||||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
yield lock
|
|
||||||
finally:
|
|
||||||
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
else:
|
|
||||||
yield sem
|
|
||||||
|
|
||||||
finally:
|
|
||||||
local.strong_store.locks_held.remove(name)
|
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
@ -240,13 +316,14 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
|||||||
def wrap(f):
|
def wrap(f):
|
||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
def inner(*args, **kwargs):
|
def inner(*args, **kwargs):
|
||||||
with lock(name, lock_file_prefix, external, lock_path):
|
try:
|
||||||
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
with lock(name, lock_file_prefix, external, lock_path):
|
||||||
|
LOG.debug('Got semaphore / lock "%(function)s"',
|
||||||
|
{'function': f.__name__})
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug('Semaphore / lock released "%(function)s"',
|
||||||
{'function': f.__name__})
|
{'function': f.__name__})
|
||||||
return f(*args, **kwargs)
|
|
||||||
|
|
||||||
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
|
||||||
{'function': f.__name__})
|
|
||||||
return inner
|
return inner
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
@ -274,3 +351,27 @@ def synchronized_with_prefix(lock_file_prefix):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
"""Create a dir for locks and pass it to command from arguments
|
||||||
|
|
||||||
|
If you run this:
|
||||||
|
python -m openstack.common.lockutils python setup.py testr <etc>
|
||||||
|
|
||||||
|
a temporary directory will be created for all your locks and passed to all
|
||||||
|
your tests in an environment variable. The temporary dir will be deleted
|
||||||
|
afterwards and the return value will be preserved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lock_dir = tempfile.mkdtemp()
|
||||||
|
os.environ["MURANOAPI_LOCK_PATH"] = lock_dir
|
||||||
|
try:
|
||||||
|
ret_val = subprocess.call(argv[1:])
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
||||||
|
return ret_val
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(sys.argv))
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@ -17,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Openstack logging handler.
|
"""OpenStack logging handler.
|
||||||
|
|
||||||
This module adds to logging functionality by adding the option to specify
|
This module adds to logging functionality by adding the option to specify
|
||||||
a context object when calling the various log methods. If the context object
|
a context object when calling the various log methods. If the context object
|
||||||
@ -35,13 +33,15 @@ import logging
|
|||||||
import logging.config
|
import logging.config
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import jsonutils
|
from muranoapi.openstack.common import jsonutils
|
||||||
from muranoapi.openstack.common import local
|
from muranoapi.openstack.common import local
|
||||||
@ -49,6 +49,24 @@ from muranoapi.openstack.common import local
|
|||||||
|
|
||||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS = []
|
||||||
|
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
common_cli_opts = [
|
common_cli_opts = [
|
||||||
cfg.BoolOpt('debug',
|
cfg.BoolOpt('debug',
|
||||||
short='d',
|
short='d',
|
||||||
@ -63,11 +81,13 @@ common_cli_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
logging_cli_opts = [
|
logging_cli_opts = [
|
||||||
cfg.StrOpt('log-config',
|
cfg.StrOpt('log-config-append',
|
||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
help='If this option is specified, the logging configuration '
|
deprecated_name='log-config',
|
||||||
'file specified is used and overrides any other logging '
|
help='The name of logging configuration file. It does not '
|
||||||
'options specified. Please see the Python logging module '
|
'disable existing loggers, but just appends specified '
|
||||||
|
'logging configuration to any other existing logging '
|
||||||
|
'options. Please see the Python logging module '
|
||||||
'documentation for details on logging configuration '
|
'documentation for details on logging configuration '
|
||||||
'files.'),
|
'files.'),
|
||||||
cfg.StrOpt('log-format',
|
cfg.StrOpt('log-format',
|
||||||
@ -95,10 +115,21 @@ logging_cli_opts = [
|
|||||||
'--log-file paths'),
|
'--log-file paths'),
|
||||||
cfg.BoolOpt('use-syslog',
|
cfg.BoolOpt('use-syslog',
|
||||||
default=False,
|
default=False,
|
||||||
help='Use syslog for logging.'),
|
help='Use syslog for logging. '
|
||||||
|
'Existing syslog format is DEPRECATED during I, '
|
||||||
|
'and then will be changed in J to honor RFC5424'),
|
||||||
|
cfg.BoolOpt('use-syslog-rfc-format',
|
||||||
|
# TODO(bogdando) remove or use True after existing
|
||||||
|
# syslog format deprecation in J
|
||||||
|
default=False,
|
||||||
|
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||||
|
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||||
|
'MSG part of the syslog message. The old format '
|
||||||
|
'without APP-NAME is deprecated in I, '
|
||||||
|
'and will be removed in J.'),
|
||||||
cfg.StrOpt('syslog-log-facility',
|
cfg.StrOpt('syslog-log-facility',
|
||||||
default='LOG_USER',
|
default='LOG_USER',
|
||||||
help='syslog facility to receive log lines')
|
help='Syslog facility to receive log lines')
|
||||||
]
|
]
|
||||||
|
|
||||||
generic_log_opts = [
|
generic_log_opts = [
|
||||||
@ -110,36 +141,38 @@ generic_log_opts = [
|
|||||||
log_opts = [
|
log_opts = [
|
||||||
cfg.StrOpt('logging_context_format_string',
|
cfg.StrOpt('logging_context_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
'%(instance)s%(message)s',
|
'%(instance)s%(message)s',
|
||||||
help='format string to use for log messages with context'),
|
help='Format string to use for log messages with context'),
|
||||||
cfg.StrOpt('logging_default_format_string',
|
cfg.StrOpt('logging_default_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [-] %(instance)s%(message)s',
|
'%(name)s [-] %(instance)s%(message)s',
|
||||||
help='format string to use for log messages without context'),
|
help='Format string to use for log messages without context'),
|
||||||
cfg.StrOpt('logging_debug_format_suffix',
|
cfg.StrOpt('logging_debug_format_suffix',
|
||||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||||
help='data to append to log format when level is DEBUG'),
|
help='Data to append to log format when level is DEBUG'),
|
||||||
cfg.StrOpt('logging_exception_prefix',
|
cfg.StrOpt('logging_exception_prefix',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||||
'%(instance)s',
|
'%(instance)s',
|
||||||
help='prefix each line of exception output with this format'),
|
help='Prefix each line of exception output with this format'),
|
||||||
cfg.ListOpt('default_log_levels',
|
cfg.ListOpt('default_log_levels',
|
||||||
default=[
|
default=[
|
||||||
|
'amqp=WARN',
|
||||||
'amqplib=WARN',
|
'amqplib=WARN',
|
||||||
'sqlalchemy=WARN',
|
|
||||||
'boto=WARN',
|
'boto=WARN',
|
||||||
|
'qpid=WARN',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
'suds=INFO',
|
'suds=INFO',
|
||||||
'keystone=INFO',
|
'iso8601=WARN',
|
||||||
'eventlet.wsgi.server=WARN'
|
'requests.packages.urllib3.connectionpool=WARN'
|
||||||
],
|
],
|
||||||
help='list of logger=LEVEL pairs'),
|
help='List of logger=LEVEL pairs'),
|
||||||
cfg.BoolOpt('publish_errors',
|
cfg.BoolOpt('publish_errors',
|
||||||
default=False,
|
default=False,
|
||||||
help='publish error events'),
|
help='Publish error events'),
|
||||||
cfg.BoolOpt('fatal_deprecations',
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
default=False,
|
default=False,
|
||||||
help='make deprecations fatal'),
|
help='Make deprecations fatal'),
|
||||||
|
|
||||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||||
# a full instance (and could include more information), and other times we
|
# a full instance (and could include more information), and other times we
|
||||||
@ -207,6 +240,42 @@ def _get_log_file_path(binary=None):
|
|||||||
binary = binary or _get_binary_name()
|
binary = binary or _get_binary_name()
|
||||||
return '%s.log' % (os.path.join(logdir, binary),)
|
return '%s.log' % (os.path.join(logdir, binary),)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
secret = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS:
|
||||||
|
message = re.sub(pattern, secret, message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
@ -235,20 +304,48 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.project = project_name
|
self.project = project_name
|
||||||
self.version = version_string
|
self.version = version_string
|
||||||
|
self._deprecated_messages_sent = dict()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def handlers(self):
|
def handlers(self):
|
||||||
return self.logger.handlers
|
return self.logger.handlers
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
"""Call this method when a deprecated feature is used.
|
||||||
|
|
||||||
|
If the system is configured for fatal deprecations then the message
|
||||||
|
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||||
|
be raised.
|
||||||
|
|
||||||
|
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||||
|
|
||||||
|
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||||
|
fatal deprecations.
|
||||||
|
|
||||||
|
"""
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
if CONF.fatal_deprecations:
|
if CONF.fatal_deprecations:
|
||||||
self.critical(stdmsg, *args, **kwargs)
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
raise DeprecatedConfig(msg=stdmsg)
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
else:
|
|
||||||
self.warn(stdmsg, *args, **kwargs)
|
# Using a list because a tuple with dict can't be stored in a set.
|
||||||
|
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||||
|
|
||||||
|
if args in sent_args:
|
||||||
|
# Already logged this message, so don't log it again.
|
||||||
|
return
|
||||||
|
|
||||||
|
sent_args.append(args)
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
def process(self, msg, kwargs):
|
||||||
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
|
# coerce to unicode before they can get
|
||||||
|
# to the python logging and possibly
|
||||||
|
# cause string encoding trouble
|
||||||
|
if not isinstance(msg, six.string_types):
|
||||||
|
msg = six.text_type(msg)
|
||||||
|
|
||||||
if 'extra' not in kwargs:
|
if 'extra' not in kwargs:
|
||||||
kwargs['extra'] = {}
|
kwargs['extra'] = {}
|
||||||
extra = kwargs['extra']
|
extra = kwargs['extra']
|
||||||
@ -260,18 +357,20 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
extra.update(_dictify_context(context))
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
instance = kwargs.pop('instance', None)
|
||||||
|
instance_uuid = (extra.get('instance_uuid') or
|
||||||
|
kwargs.pop('instance_uuid', None))
|
||||||
instance_extra = ''
|
instance_extra = ''
|
||||||
if instance:
|
if instance:
|
||||||
instance_extra = CONF.instance_format % instance
|
instance_extra = CONF.instance_format % instance
|
||||||
else:
|
elif instance_uuid:
|
||||||
instance_uuid = kwargs.pop('instance_uuid', None)
|
instance_extra = (CONF.instance_uuid_format
|
||||||
if instance_uuid:
|
% {'uuid': instance_uuid})
|
||||||
instance_extra = (CONF.instance_uuid_format
|
extra['instance'] = instance_extra
|
||||||
% {'uuid': instance_uuid})
|
|
||||||
extra.update({'instance': instance_extra})
|
|
||||||
|
|
||||||
extra.update({"project": self.project})
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
extra.update({"version": self.version})
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
extra['extra'] = extra.copy()
|
extra['extra'] = extra.copy()
|
||||||
return msg, kwargs
|
return msg, kwargs
|
||||||
|
|
||||||
@ -285,7 +384,7 @@ class JSONFormatter(logging.Formatter):
|
|||||||
def formatException(self, ei, strip_newlines=True):
|
def formatException(self, ei, strip_newlines=True):
|
||||||
lines = traceback.format_exception(*ei)
|
lines = traceback.format_exception(*ei)
|
||||||
if strip_newlines:
|
if strip_newlines:
|
||||||
lines = [itertools.ifilter(
|
lines = [moves.filter(
|
||||||
lambda x: x,
|
lambda x: x,
|
||||||
line.rstrip().splitlines()) for line in lines]
|
line.rstrip().splitlines()) for line in lines]
|
||||||
lines = list(itertools.chain(*lines))
|
lines = list(itertools.chain(*lines))
|
||||||
@ -323,11 +422,13 @@ class JSONFormatter(logging.Formatter):
|
|||||||
|
|
||||||
|
|
||||||
def _create_logging_excepthook(product_name):
|
def _create_logging_excepthook(product_name):
|
||||||
def logging_excepthook(type, value, tb):
|
def logging_excepthook(exc_type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if CONF.verbose:
|
if CONF.verbose or CONF.debug:
|
||||||
extra['exc_info'] = (type, value, tb)
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
getLogger(product_name).critical(str(value), **extra)
|
getLogger(product_name).critical(
|
||||||
|
"".join(traceback.format_exception_only(exc_type, value)),
|
||||||
|
**extra)
|
||||||
return logging_excepthook
|
return logging_excepthook
|
||||||
|
|
||||||
|
|
||||||
@ -344,19 +445,20 @@ class LogConfigError(Exception):
|
|||||||
err_msg=self.err_msg)
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
def _load_log_config(log_config):
|
def _load_log_config(log_config_append):
|
||||||
try:
|
try:
|
||||||
logging.config.fileConfig(log_config)
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
except moves.configparser.Error as exc:
|
except moves.configparser.Error as exc:
|
||||||
raise LogConfigError(log_config, str(exc))
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name):
|
def setup(product_name, version='unknown'):
|
||||||
"""Setup logging."""
|
"""Setup logging."""
|
||||||
if CONF.log_config:
|
if CONF.log_config_append:
|
||||||
_load_log_config(CONF.log_config)
|
_load_log_config(CONF.log_config_append)
|
||||||
else:
|
else:
|
||||||
_setup_logging_from_conf()
|
_setup_logging_from_conf(product_name, version)
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
|
|
||||||
|
|
||||||
@ -390,15 +492,32 @@ def _find_facility_from_conf():
|
|||||||
return facility
|
return facility
|
||||||
|
|
||||||
|
|
||||||
def _setup_logging_from_conf():
|
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.binary_name = _get_binary_name()
|
||||||
|
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
msg = super(RFCSysLogHandler, self).format(record)
|
||||||
|
msg = self.binary_name + ' ' + msg
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging_from_conf(project, version):
|
||||||
log_root = getLogger(None).logger
|
log_root = getLogger(None).logger
|
||||||
for handler in log_root.handlers:
|
for handler in log_root.handlers:
|
||||||
log_root.removeHandler(handler)
|
log_root.removeHandler(handler)
|
||||||
|
|
||||||
if CONF.use_syslog:
|
if CONF.use_syslog:
|
||||||
facility = _find_facility_from_conf()
|
facility = _find_facility_from_conf()
|
||||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||||
facility=facility)
|
# after existing syslog format deprecation in J
|
||||||
|
if CONF.use_syslog_rfc_format:
|
||||||
|
syslog = RFCSysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
else:
|
||||||
|
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
log_root.addHandler(syslog)
|
log_root.addHandler(syslog)
|
||||||
|
|
||||||
logpath = _get_log_file_path()
|
logpath = _get_log_file_path()
|
||||||
@ -410,7 +529,7 @@ def _setup_logging_from_conf():
|
|||||||
streamlog = ColorHandler()
|
streamlog = ColorHandler()
|
||||||
log_root.addHandler(streamlog)
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
elif not CONF.log_file:
|
elif not logpath:
|
||||||
# pass sys.stdout as a positional argument
|
# pass sys.stdout as a positional argument
|
||||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
streamlog = logging.StreamHandler(sys.stdout)
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
@ -432,7 +551,9 @@ def _setup_logging_from_conf():
|
|||||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
'be removed in the next release')
|
'be removed in the next release')
|
||||||
else:
|
else:
|
||||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
handler.setFormatter(ContextFormatter(project=project,
|
||||||
|
version=version,
|
||||||
|
datefmt=datefmt))
|
||||||
|
|
||||||
if CONF.debug:
|
if CONF.debug:
|
||||||
log_root.setLevel(logging.DEBUG)
|
log_root.setLevel(logging.DEBUG)
|
||||||
@ -476,7 +597,7 @@ class WritableLogger(object):
|
|||||||
self.level = level
|
self.level = level
|
||||||
|
|
||||||
def write(self, msg):
|
def write(self, msg):
|
||||||
self.logger.log(self.level, msg)
|
self.logger.log(self.level, msg.rstrip())
|
||||||
|
|
||||||
|
|
||||||
class ContextFormatter(logging.Formatter):
|
class ContextFormatter(logging.Formatter):
|
||||||
@ -490,18 +611,50 @@ class ContextFormatter(logging.Formatter):
|
|||||||
For information about what variables are available for the formatter see:
|
For information about what variables are available for the formatter see:
|
||||||
http://docs.python.org/library/logging.html#formatter
|
http://docs.python.org/library/logging.html#formatter
|
||||||
|
|
||||||
|
If available, uses the context value stored in TLS - local.store.context
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Initialize ContextFormatter instance
|
||||||
|
|
||||||
|
Takes additional keyword arguments which can be used in the message
|
||||||
|
format string.
|
||||||
|
|
||||||
|
:keyword project: project name
|
||||||
|
:type project: string
|
||||||
|
:keyword version: project version
|
||||||
|
:type version: string
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.project = kwargs.pop('project', 'unknown')
|
||||||
|
self.version = kwargs.pop('version', 'unknown')
|
||||||
|
|
||||||
|
logging.Formatter.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
def format(self, record):
|
def format(self, record):
|
||||||
"""Uses contextstring if request_id is set, otherwise default."""
|
"""Uses contextstring if request_id is set, otherwise default."""
|
||||||
# NOTE(sdague): default the fancier formating params
|
|
||||||
|
# store project info
|
||||||
|
record.project = self.project
|
||||||
|
record.version = self.version
|
||||||
|
|
||||||
|
# store request info
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
d = _dictify_context(context)
|
||||||
|
for k, v in d.items():
|
||||||
|
setattr(record, k, v)
|
||||||
|
|
||||||
|
# NOTE(sdague): default the fancier formatting params
|
||||||
# to an empty string so we don't throw an exception if
|
# to an empty string so we don't throw an exception if
|
||||||
# they get used
|
# they get used
|
||||||
for key in ('instance', 'color'):
|
for key in ('instance', 'color'):
|
||||||
if key not in record.__dict__:
|
if key not in record.__dict__:
|
||||||
record.__dict__[key] = ''
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
if record.__dict__.get('request_id', None):
|
if record.__dict__.get('request_id'):
|
||||||
self._fmt = CONF.logging_context_format_string
|
self._fmt = CONF.logging_context_format_string
|
||||||
else:
|
else:
|
||||||
self._fmt = CONF.logging_default_format_string
|
self._fmt = CONF.logging_default_format_string
|
||||||
@ -510,7 +663,7 @@ class ContextFormatter(logging.Formatter):
|
|||||||
CONF.logging_debug_format_suffix):
|
CONF.logging_debug_format_suffix):
|
||||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||||
|
|
||||||
# Cache this on the record, Logger will respect our formated copy
|
# Cache this on the record, Logger will respect our formatted copy
|
||||||
if record.exc_info:
|
if record.exc_info:
|
||||||
record.exc_text = self.formatException(record.exc_info, record)
|
record.exc_text = self.formatException(record.exc_info, record)
|
||||||
return logging.Formatter.format(self, record)
|
return logging.Formatter.format(self, record)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -22,7 +20,7 @@ import sys
|
|||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LE, _LW
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common import timeutils
|
from muranoapi.openstack.common import timeutils
|
||||||
|
|
||||||
@ -81,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
|
|||||||
break
|
break
|
||||||
delay = interval - timeutils.delta_seconds(start, end)
|
delay = interval - timeutils.delta_seconds(start, end)
|
||||||
if delay <= 0:
|
if delay <= 0:
|
||||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
LOG.warn(_LW('task run outlasted interval by %s sec') %
|
||||||
-delay)
|
-delay)
|
||||||
greenthread.sleep(delay if delay > 0 else 0)
|
greenthread.sleep(delay if delay > 0 else 0)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in fixed duration looping call'))
|
LOG.exception(_LE('in fixed duration looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -128,14 +126,14 @@ class DynamicLoopingCall(LoopingCallBase):
|
|||||||
|
|
||||||
if periodic_interval_max is not None:
|
if periodic_interval_max is not None:
|
||||||
idle = min(idle, periodic_interval_max)
|
idle = min(idle, periodic_interval_max)
|
||||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
LOG.debug('Dynamic looping call sleeping for %.02f '
|
||||||
'seconds'), idle)
|
'seconds', idle)
|
||||||
greenthread.sleep(idle)
|
greenthread.sleep(idle)
|
||||||
except LoopingCallDone as e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in dynamic looping call'))
|
LOG.exception(_LE('in dynamic looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
# Copyright 2012 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -19,7 +17,17 @@
|
|||||||
Network-related utilities and helper functions.
|
Network-related utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import urlparse
|
# TODO(jd) Use six.moves once
|
||||||
|
# https://bitbucket.org/gutworth/six/pull-request/28
|
||||||
|
# is merged
|
||||||
|
try:
|
||||||
|
import urllib.parse
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
except ImportError:
|
||||||
|
import urlparse
|
||||||
|
SplitResult = urlparse.SplitResult
|
||||||
|
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
|
|
||||||
def parse_host_port(address, default_port=None):
|
def parse_host_port(address, default_port=None):
|
||||||
@ -72,10 +80,10 @@ def urlsplit(url, scheme='', allow_fragments=True):
|
|||||||
|
|
||||||
The parameters are the same as urlparse.urlsplit.
|
The parameters are the same as urlparse.urlsplit.
|
||||||
"""
|
"""
|
||||||
scheme, netloc, path, query, fragment = urlparse.urlsplit(
|
scheme, netloc, path, query, fragment = parse.urlsplit(
|
||||||
url, scheme, allow_fragments)
|
url, scheme, allow_fragments)
|
||||||
if allow_fragments and '#' in path:
|
if allow_fragments and '#' in path:
|
||||||
path, fragment = path.split('#', 1)
|
path, fragment = path.split('#', 1)
|
||||||
if '?' in path:
|
if '?' in path:
|
||||||
path, query = path.split('?', 1)
|
path, query = path.split('?', 1)
|
||||||
return urlparse.SplitResult(scheme, netloc, path, query, fragment)
|
return SplitResult(scheme, netloc, path, query, fragment)
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -19,7 +19,7 @@ import uuid
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common import context
|
from muranoapi.openstack.common import context
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import jsonutils
|
from muranoapi.openstack.common import jsonutils
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
@ -142,9 +142,9 @@ def notify(context, publisher_id, event_type, priority, payload):
|
|||||||
try:
|
try:
|
||||||
driver.notify(context, msg)
|
driver.notify(context, msg)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
LOG.exception(_LE("Problem '%(e)s' attempting to "
|
||||||
"send to notification system. "
|
"send to notification system. "
|
||||||
"Payload=%(payload)s")
|
"Payload=%(payload)s")
|
||||||
% dict(e=e, payload=payload))
|
% dict(e=e, payload=payload))
|
||||||
|
|
||||||
|
|
||||||
@ -161,8 +161,8 @@ def _get_drivers():
|
|||||||
driver = importutils.import_module(notification_driver)
|
driver = importutils.import_module(notification_driver)
|
||||||
_drivers[notification_driver] = driver
|
_drivers[notification_driver] = driver
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.exception(_("Failed to load notifier %s. "
|
LOG.exception(_LE("Failed to load notifier %s. "
|
||||||
"These notifications will not be sent.") %
|
"These notifications will not be sent.") %
|
||||||
notification_driver)
|
notification_driver)
|
||||||
return _drivers.values()
|
return _drivers.values()
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ CONF = cfg.CONF
|
|||||||
def notify(_context, message):
|
def notify(_context, message):
|
||||||
"""Notifies the recipient of the desired event given the model.
|
"""Notifies the recipient of the desired event given the model.
|
||||||
|
|
||||||
Log notifications using openstack's default logging system.
|
Log notifications using OpenStack's default logging system.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
|
77
muranoapi/openstack/common/notifier/proxy.py
Normal file
77
muranoapi/openstack/common/notifier/proxy.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
A temporary helper which emulates muranoapi.messaging.Notifier.
|
||||||
|
|
||||||
|
This helper method allows us to do the tedious porting to the new Notifier API
|
||||||
|
as a standalone commit so that the commit which switches us to
|
||||||
|
muranoapi.messaging is smaller and easier to review. This file will be removed
|
||||||
|
as part of that commit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.notifier import api as notifier_api
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class Notifier(object):
|
||||||
|
|
||||||
|
def __init__(self, publisher_id):
|
||||||
|
super(Notifier, self).__init__()
|
||||||
|
self.publisher_id = publisher_id
|
||||||
|
|
||||||
|
_marker = object()
|
||||||
|
|
||||||
|
def prepare(self, publisher_id=_marker):
|
||||||
|
ret = self.__class__(self.publisher_id)
|
||||||
|
if publisher_id is not self._marker:
|
||||||
|
ret.publisher_id = publisher_id
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _notify(self, ctxt, event_type, payload, priority):
|
||||||
|
notifier_api.notify(ctxt,
|
||||||
|
self.publisher_id,
|
||||||
|
event_type,
|
||||||
|
priority,
|
||||||
|
payload)
|
||||||
|
|
||||||
|
def audit(self, ctxt, event_type, payload):
|
||||||
|
# No audit in old notifier.
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def debug(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'DEBUG')
|
||||||
|
|
||||||
|
def info(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def warn(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'WARN')
|
||||||
|
|
||||||
|
warning = warn
|
||||||
|
|
||||||
|
def error(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'ERROR')
|
||||||
|
|
||||||
|
def critical(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
||||||
|
|
||||||
|
|
||||||
|
def get_notifier(service=None, host=None, publisher_id=None):
|
||||||
|
if not publisher_id:
|
||||||
|
publisher_id = "%s.%s" % (service, host or CONF.host)
|
||||||
|
return Notifier(publisher_id)
|
@ -16,7 +16,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common import context as req_context
|
from muranoapi.openstack.common import context as req_context
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LE
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common import rpc
|
from muranoapi.openstack.common import rpc
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
notification_topic_opt = cfg.ListOpt(
|
||||||
'notification_topics', default=['notifications', ],
|
'notification_topics', default=['notifications', ],
|
||||||
help='AMQP topic used for openstack notifications')
|
help='AMQP topic used for OpenStack notifications')
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opt(notification_topic_opt)
|
CONF.register_opt(notification_topic_opt)
|
||||||
@ -42,5 +42,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message)
|
rpc.notify(context, topic, message)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"), locals())
|
"Payload=%(message)s"),
|
||||||
|
{"topic": topic, "message": message})
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common import context as req_context
|
from muranoapi.openstack.common import context as req_context
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LE
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common import rpc
|
from muranoapi.openstack.common import rpc
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
notification_topic_opt = cfg.ListOpt(
|
||||||
'topics', default=['notifications', ],
|
'topics', default=['notifications', ],
|
||||||
help='AMQP topic(s) used for openstack notifications')
|
help='AMQP topic(s) used for OpenStack notifications')
|
||||||
|
|
||||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||||
title='Options for rpc_notifier2')
|
title='Options for rpc_notifier2')
|
||||||
@ -48,5 +48,6 @@ def notify(context, message):
|
|||||||
try:
|
try:
|
||||||
rpc.notify(context, topic, message, envelope=True)
|
rpc.notify(context, topic, message, envelope=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"), locals())
|
"Payload=%(message)s"),
|
||||||
|
{"topic": topic, "message": message})
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
NOTIFICATIONS = []
|
NOTIFICATIONS = []
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -25,13 +23,9 @@ For some wrappers that add message versioning to rpc, see:
|
|||||||
rpc.proxy
|
rpc.proxy
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import inspect
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import local
|
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -56,13 +50,12 @@ rpc_opts = [
|
|||||||
help='Seconds to wait before a cast expires (TTL). '
|
help='Seconds to wait before a cast expires (TTL). '
|
||||||
'Only supported by impl_zmq.'),
|
'Only supported by impl_zmq.'),
|
||||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
cfg.ListOpt('allowed_rpc_exception_modules',
|
||||||
default=['muranoapi.openstack.common.exception',
|
default=['nova.exception',
|
||||||
'nova.exception',
|
|
||||||
'cinder.exception',
|
'cinder.exception',
|
||||||
'exceptions',
|
'exceptions',
|
||||||
],
|
],
|
||||||
help='Modules of exceptions that are permitted to be recreated'
|
help='Modules of exceptions that are permitted to be recreated'
|
||||||
'upon receiving exception data from an rpc call.'),
|
' upon receiving exception data from an rpc call.'),
|
||||||
cfg.BoolOpt('fake_rabbit',
|
cfg.BoolOpt('fake_rabbit',
|
||||||
default=False,
|
default=False,
|
||||||
help='If passed, use a fake RabbitMQ provider'),
|
help='If passed, use a fake RabbitMQ provider'),
|
||||||
@ -96,24 +89,7 @@ def create_connection(new=True):
|
|||||||
return _get_impl().create_connection(CONF, new=new)
|
return _get_impl().create_connection(CONF, new=new)
|
||||||
|
|
||||||
|
|
||||||
def _check_for_lock():
|
def call(context, topic, msg, timeout=None):
|
||||||
if not CONF.debug:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if ((hasattr(local.strong_store, 'locks_held')
|
|
||||||
and local.strong_store.locks_held)):
|
|
||||||
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
|
|
||||||
LOG.warn(_('A RPC is being made while holding a lock. The locks '
|
|
||||||
'currently held are %(locks)s. This is probably a bug. '
|
|
||||||
'Please report it. Include the following: [%(stack)s].'),
|
|
||||||
{'locks': local.strong_store.locks_held,
|
|
||||||
'stack': stack})
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def call(context, topic, msg, timeout=None, check_for_lock=False):
|
|
||||||
"""Invoke a remote method that returns something.
|
"""Invoke a remote method that returns something.
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
:param context: Information that identifies the user that has made this
|
||||||
@ -127,16 +103,12 @@ def call(context, topic, msg, timeout=None, check_for_lock=False):
|
|||||||
"args" : dict_of_kwargs }
|
"args" : dict_of_kwargs }
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
:param timeout: int, number of seconds to use for a response timeout.
|
||||||
If set, this overrides the rpc_response_timeout option.
|
If set, this overrides the rpc_response_timeout option.
|
||||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
|
||||||
with a lock held.
|
|
||||||
|
|
||||||
:returns: A dict from the remote method.
|
:returns: A dict from the remote method.
|
||||||
|
|
||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||||
is not received before the timeout is reached.
|
is not received before the timeout is reached.
|
||||||
"""
|
"""
|
||||||
if check_for_lock:
|
|
||||||
_check_for_lock()
|
|
||||||
return _get_impl().call(CONF, context, topic, msg, timeout)
|
return _get_impl().call(CONF, context, topic, msg, timeout)
|
||||||
|
|
||||||
|
|
||||||
@ -179,7 +151,7 @@ def fanout_cast(context, topic, msg):
|
|||||||
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
||||||
|
|
||||||
|
|
||||||
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
|
def multicall(context, topic, msg, timeout=None):
|
||||||
"""Invoke a remote method and get back an iterator.
|
"""Invoke a remote method and get back an iterator.
|
||||||
|
|
||||||
In this case, the remote method will be returning multiple values in
|
In this case, the remote method will be returning multiple values in
|
||||||
@ -197,8 +169,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False):
|
|||||||
"args" : dict_of_kwargs }
|
"args" : dict_of_kwargs }
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
:param timeout: int, number of seconds to use for a response timeout.
|
||||||
If set, this overrides the rpc_response_timeout option.
|
If set, this overrides the rpc_response_timeout option.
|
||||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
|
||||||
with a lock held.
|
|
||||||
|
|
||||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
||||||
an index that starts at 0 and increases by one for each value
|
an index that starts at 0 and increases by one for each value
|
||||||
@ -208,8 +178,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False):
|
|||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||||
is not received before the timeout is reached.
|
is not received before the timeout is reached.
|
||||||
"""
|
"""
|
||||||
if check_for_lock:
|
|
||||||
_check_for_lock()
|
|
||||||
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
||||||
|
|
||||||
|
|
||||||
@ -228,7 +196,7 @@ def notify(context, topic, msg, envelope=False):
|
|||||||
|
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
"""Clean up resoruces in use by implementation.
|
"""Clean up resources in use by implementation.
|
||||||
|
|
||||||
Clean up any resources that have been allocated by the RPC implementation.
|
Clean up any resources that have been allocated by the RPC implementation.
|
||||||
This is typically open connections to a messaging service. This function
|
This is typically open connections to a messaging service. This function
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -20,9 +18,9 @@
|
|||||||
"""
|
"""
|
||||||
Shared code between AMQP based openstack.common.rpc implementations.
|
Shared code between AMQP based openstack.common.rpc implementations.
|
||||||
|
|
||||||
The code in this module is shared between the rpc implemenations based on AMQP.
|
The code in this module is shared between the rpc implementations based on
|
||||||
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
|
||||||
AMQP, but is deprecated and predates this code.
|
uses AMQP, but is deprecated and predates this code.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
@ -35,9 +33,11 @@ from eventlet import pools
|
|||||||
from eventlet import queue
|
from eventlet import queue
|
||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
from muranoapi.openstack.common import excutils
|
from muranoapi.openstack.common import excutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE
|
||||||
from muranoapi.openstack.common import local
|
from muranoapi.openstack.common import local
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
@ -72,7 +72,7 @@ class Pool(pools.Pool):
|
|||||||
|
|
||||||
# TODO(comstud): Timeout connections not used in a while
|
# TODO(comstud): Timeout connections not used in a while
|
||||||
def create(self):
|
def create(self):
|
||||||
LOG.debug(_('Pool creating new connection'))
|
LOG.debug('Pool creating new connection')
|
||||||
return self.connection_cls(self.conf)
|
return self.connection_cls(self.conf)
|
||||||
|
|
||||||
def empty(self):
|
def empty(self):
|
||||||
@ -189,7 +189,7 @@ class ReplyProxy(ConnectionContext):
|
|||||||
def __init__(self, conf, connection_pool):
|
def __init__(self, conf, connection_pool):
|
||||||
self._call_waiters = {}
|
self._call_waiters = {}
|
||||||
self._num_call_waiters = 0
|
self._num_call_waiters = 0
|
||||||
self._num_call_waiters_wrn_threshhold = 10
|
self._num_call_waiters_wrn_threshold = 10
|
||||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
self._reply_q = 'reply_' + uuid.uuid4().hex
|
||||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
||||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
self.declare_direct_consumer(self._reply_q, self._process_data)
|
||||||
@ -208,11 +208,11 @@ class ReplyProxy(ConnectionContext):
|
|||||||
|
|
||||||
def add_call_waiter(self, waiter, msg_id):
|
def add_call_waiter(self, waiter, msg_id):
|
||||||
self._num_call_waiters += 1
|
self._num_call_waiters += 1
|
||||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
|
if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
|
||||||
LOG.warn(_('Number of call waiters is greater than warning '
|
LOG.warn(_('Number of call waiters is greater than warning '
|
||||||
'threshhold: %d. There could be a MulticallProxyWaiter '
|
'threshold: %d. There could be a MulticallProxyWaiter '
|
||||||
'leak.') % self._num_call_waiters_wrn_threshhold)
|
'leak.') % self._num_call_waiters_wrn_threshold)
|
||||||
self._num_call_waiters_wrn_threshhold *= 2
|
self._num_call_waiters_wrn_threshold *= 2
|
||||||
self._call_waiters[msg_id] = waiter
|
self._call_waiters[msg_id] = waiter
|
||||||
|
|
||||||
def del_call_waiter(self, msg_id):
|
def del_call_waiter(self, msg_id):
|
||||||
@ -241,7 +241,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
# If a reply_q exists, add the msg_id to the reply and pass the
|
||||||
# reply_q to direct_send() to use it as the response queue.
|
# reply_q to direct_send() to use it as the response queue.
|
||||||
# Otherwise use the msg_id for backward compatibilty.
|
# Otherwise use the msg_id for backward compatibility.
|
||||||
if reply_q:
|
if reply_q:
|
||||||
msg['_msg_id'] = msg_id
|
msg['_msg_id'] = msg_id
|
||||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
||||||
@ -287,7 +287,7 @@ def unpack_context(conf, msg):
|
|||||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||||
context_dict['conf'] = conf
|
context_dict['conf'] = conf
|
||||||
ctx = RpcContext.from_dict(context_dict)
|
ctx = RpcContext.from_dict(context_dict)
|
||||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
|
||||||
return ctx
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
@ -300,8 +300,14 @@ def pack_context(msg, context):
|
|||||||
for args at some point.
|
for args at some point.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
context_d = dict([('_context_%s' % key, value)
|
if isinstance(context, dict):
|
||||||
for (key, value) in context.to_dict().iteritems()])
|
context_d = dict([('_context_%s' % key, value)
|
||||||
|
for (key, value) in six.iteritems(context)])
|
||||||
|
else:
|
||||||
|
context_d = dict([('_context_%s' % key, value)
|
||||||
|
for (key, value) in
|
||||||
|
six.iteritems(context.to_dict())])
|
||||||
|
|
||||||
msg.update(context_d)
|
msg.update(context_d)
|
||||||
|
|
||||||
|
|
||||||
@ -333,7 +339,7 @@ def _add_unique_id(msg):
|
|||||||
"""Add unique_id for checking duplicate messages."""
|
"""Add unique_id for checking duplicate messages."""
|
||||||
unique_id = uuid.uuid4().hex
|
unique_id = uuid.uuid4().hex
|
||||||
msg.update({UNIQUE_ID: unique_id})
|
msg.update({UNIQUE_ID: unique_id})
|
||||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
|
||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
class _ThreadPoolWithWait(object):
|
||||||
@ -359,22 +365,43 @@ class CallbackWrapper(_ThreadPoolWithWait):
|
|||||||
Allows it to be invoked in a green thread.
|
Allows it to be invoked in a green thread.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf, callback, connection_pool):
|
def __init__(self, conf, callback, connection_pool,
|
||||||
|
wait_for_consumers=False):
|
||||||
"""Initiates CallbackWrapper object.
|
"""Initiates CallbackWrapper object.
|
||||||
|
|
||||||
:param conf: cfg.CONF instance
|
:param conf: cfg.CONF instance
|
||||||
:param callback: a callable (probably a function)
|
:param callback: a callable (probably a function)
|
||||||
:param connection_pool: connection pool as returned by
|
:param connection_pool: connection pool as returned by
|
||||||
get_connection_pool()
|
get_connection_pool()
|
||||||
|
:param wait_for_consumers: wait for all green threads to
|
||||||
|
complete and raise the last
|
||||||
|
caught exception, if any.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
super(CallbackWrapper, self).__init__(
|
super(CallbackWrapper, self).__init__(
|
||||||
conf=conf,
|
conf=conf,
|
||||||
connection_pool=connection_pool,
|
connection_pool=connection_pool,
|
||||||
)
|
)
|
||||||
self.callback = callback
|
self.callback = callback
|
||||||
|
self.wait_for_consumers = wait_for_consumers
|
||||||
|
self.exc_info = None
|
||||||
|
|
||||||
|
def _wrap(self, message_data, **kwargs):
|
||||||
|
"""Wrap the callback invocation to catch exceptions.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.callback(message_data, **kwargs)
|
||||||
|
except Exception:
|
||||||
|
self.exc_info = sys.exc_info()
|
||||||
|
|
||||||
def __call__(self, message_data):
|
def __call__(self, message_data):
|
||||||
self.pool.spawn_n(self.callback, message_data)
|
self.exc_info = None
|
||||||
|
self.pool.spawn_n(self._wrap, message_data)
|
||||||
|
|
||||||
|
if self.wait_for_consumers:
|
||||||
|
self.pool.waitall()
|
||||||
|
if self.exc_info:
|
||||||
|
six.reraise(self.exc_info[1], None, self.exc_info[2])
|
||||||
|
|
||||||
|
|
||||||
class ProxyCallback(_ThreadPoolWithWait):
|
class ProxyCallback(_ThreadPoolWithWait):
|
||||||
@ -405,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# the previous context is stored in local.store.context
|
# the previous context is stored in local.store.context
|
||||||
if hasattr(local.store, 'context'):
|
if hasattr(local.store, 'context'):
|
||||||
del local.store.context
|
del local.store.context
|
||||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
|
||||||
self.msg_id_cache.check_duplicate_message(message_data)
|
self.msg_id_cache.check_duplicate_message(message_data)
|
||||||
ctxt = unpack_context(self.conf, message_data)
|
ctxt = unpack_context(self.conf, message_data)
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
@ -442,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
# This final None tells multicall that it is done.
|
# This final None tells multicall that it is done.
|
||||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
LOG.debug('Expected exception during message handling (%s)' %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
ctxt.reply(None, e._exc_info,
|
ctxt.reply(None, e._exc_info,
|
||||||
connection_pool=self.connection_pool,
|
connection_pool=self.connection_pool,
|
||||||
@ -450,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# sys.exc_info() is deleted by LOG.exception().
|
# sys.exc_info() is deleted by LOG.exception().
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.error(_('Exception during message handling'),
|
LOG.error(_LE('Exception during message handling'),
|
||||||
exc_info=exc_info)
|
exc_info=exc_info)
|
||||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
||||||
|
|
||||||
@ -524,10 +551,10 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
|||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||||
"""Make a call that returns multiple times."""
|
"""Make a call that returns multiple times."""
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
LOG.debug('Making synchronous call on %s ...', topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
LOG.debug('MSG_ID is %s' % (msg_id))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
|
|
||||||
@ -553,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
|
|||||||
|
|
||||||
def cast(conf, context, topic, msg, connection_pool):
|
def cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
"""Sends a message on a topic without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
LOG.debug('Making asynchronous cast on %s...', topic)
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -562,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
|
|||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
LOG.debug('Making asynchronous fanout cast...')
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
@ -590,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
|||||||
|
|
||||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||||
"""Sends a notification event on a topic."""
|
"""Sends a notification event on a topic."""
|
||||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
LOG.debug('Sending %(event_type)s on %(topic)s',
|
||||||
dict(event_type=msg.get('event_type'),
|
dict(event_type=msg.get('event_type'),
|
||||||
topic=topic))
|
topic=topic))
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -24,17 +22,19 @@ import traceback
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import jsonutils
|
from muranoapi.openstack.common import jsonutils
|
||||||
from muranoapi.openstack.common import local
|
from muranoapi.openstack.common import local
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
from muranoapi.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
_RPC_ENVELOPE_VERSION = '2.0'
|
||||||
'''RPC Envelope Version.
|
'''RPC Envelope Version.
|
||||||
|
|
||||||
This version number applies to the top level structure of messages sent out.
|
This version number applies to the top level structure of messages sent out.
|
||||||
@ -47,7 +47,7 @@ This version number applies to the message envelope that is used in the
|
|||||||
serialization done inside the rpc layer. See serialize_msg() and
|
serialization done inside the rpc layer. See serialize_msg() and
|
||||||
deserialize_msg().
|
deserialize_msg().
|
||||||
|
|
||||||
The current message format (version 2.0) is very simple. It is:
|
The current message format (version 2.0) is very simple. It is::
|
||||||
|
|
||||||
{
|
{
|
||||||
'oslo.version': <RPC Envelope Version as a String>,
|
'oslo.version': <RPC Envelope Version as a String>,
|
||||||
@ -65,7 +65,6 @@ We will JSON encode the application message payload. The message envelope,
|
|||||||
which includes the JSON encoded application message body, will be passed down
|
which includes the JSON encoded application message body, will be passed down
|
||||||
to the messaging libraries as a dict.
|
to the messaging libraries as a dict.
|
||||||
'''
|
'''
|
||||||
_RPC_ENVELOPE_VERSION = '2.0'
|
|
||||||
|
|
||||||
_VERSION_KEY = 'oslo.version'
|
_VERSION_KEY = 'oslo.version'
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
_MESSAGE_KEY = 'oslo.message'
|
||||||
@ -86,8 +85,8 @@ class RPCException(Exception):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_('Exception in string format operation'))
|
LOG.exception(_LE('Exception in string format operation'))
|
||||||
for name, value in kwargs.iteritems():
|
for name, value in six.iteritems(kwargs):
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
message = self.msg_fmt
|
message = self.msg_fmt
|
||||||
@ -265,11 +264,15 @@ def _safe_log(log_func, msg, msg_data):
|
|||||||
|
|
||||||
def _fix_passwords(d):
|
def _fix_passwords(d):
|
||||||
"""Sanitizes the password fields in the dictionary."""
|
"""Sanitizes the password fields in the dictionary."""
|
||||||
for k in d.iterkeys():
|
for k in six.iterkeys(d):
|
||||||
if k.lower().find('password') != -1:
|
if k.lower().find('password') != -1:
|
||||||
d[k] = '<SANITIZED>'
|
d[k] = '<SANITIZED>'
|
||||||
elif k.lower() in SANITIZE:
|
elif k.lower() in SANITIZE:
|
||||||
d[k] = '<SANITIZED>'
|
d[k] = '<SANITIZED>'
|
||||||
|
elif isinstance(d[k], list):
|
||||||
|
for e in d[k]:
|
||||||
|
if isinstance(e, dict):
|
||||||
|
_fix_passwords(e)
|
||||||
elif isinstance(d[k], dict):
|
elif isinstance(d[k], dict):
|
||||||
_fix_passwords(d[k])
|
_fix_passwords(d[k])
|
||||||
return d
|
return d
|
||||||
@ -286,7 +289,7 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
|||||||
tb = traceback.format_exception(*failure_info)
|
tb = traceback.format_exception(*failure_info)
|
||||||
failure = failure_info[1]
|
failure = failure_info[1]
|
||||||
if log_failure:
|
if log_failure:
|
||||||
LOG.error(_("Returning exception %s to caller"),
|
LOG.error(_LE("Returning exception %s to caller"),
|
||||||
six.text_type(failure))
|
six.text_type(failure))
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
|
|
||||||
@ -441,19 +444,15 @@ def client_exceptions(*exceptions):
|
|||||||
return outer
|
return outer
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(sirp): we should deprecate this in favor of
|
||||||
|
# using `versionutils.is_compatible` directly
|
||||||
def version_is_compatible(imp_version, version):
|
def version_is_compatible(imp_version, version):
|
||||||
"""Determine whether versions are compatible.
|
"""Determine whether versions are compatible.
|
||||||
|
|
||||||
:param imp_version: The version implemented
|
:param imp_version: The version implemented
|
||||||
:param version: The version requested by an incoming message.
|
:param version: The version requested by an incoming message.
|
||||||
"""
|
"""
|
||||||
version_parts = version.split('.')
|
return versionutils.is_compatible(version, imp_version)
|
||||||
imp_version_parts = imp_version.split('.')
|
|
||||||
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
|
|
||||||
return False
|
|
||||||
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_msg(raw_msg):
|
def serialize_msg(raw_msg):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -83,6 +81,8 @@ On the client side, the same changes should be made as in example 1. The
|
|||||||
minimum version that supports the new parameter should be specified.
|
minimum version that supports the new parameter should be specified.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
from muranoapi.openstack.common.rpc import serializer as rpc_serializer
|
from muranoapi.openstack.common.rpc import serializer as rpc_serializer
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ class RpcDispatcher(object):
|
|||||||
:returns: A new set of deserialized args
|
:returns: A new set of deserialized args
|
||||||
"""
|
"""
|
||||||
new_kwargs = dict()
|
new_kwargs = dict()
|
||||||
for argname, arg in kwargs.iteritems():
|
for argname, arg in six.iteritems(kwargs):
|
||||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
||||||
arg)
|
arg)
|
||||||
return new_kwargs
|
return new_kwargs
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -13,6 +11,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Fake RPC implementation which calls proxy methods directly with no
|
"""Fake RPC implementation which calls proxy methods directly with no
|
||||||
queues. Casts will block, but this is very useful for tests.
|
queues. Casts will block, but this is very useful for tests.
|
||||||
"""
|
"""
|
||||||
@ -26,6 +25,7 @@ import json
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ class Consumer(object):
|
|||||||
# Caller might have called ctxt.reply() manually
|
# Caller might have called ctxt.reply() manually
|
||||||
for (reply, failure) in ctxt._response:
|
for (reply, failure) in ctxt._response:
|
||||||
if failure:
|
if failure:
|
||||||
raise failure[0], failure[1], failure[2]
|
six.reraise(failure[0], failure[1], failure[2])
|
||||||
res.append(reply)
|
res.append(reply)
|
||||||
# if ending not 'sent'...we might have more data to
|
# if ending not 'sent'...we might have more data to
|
||||||
# return from the function itself
|
# return from the function itself
|
||||||
@ -140,13 +140,13 @@ def multicall(conf, context, topic, msg, timeout=None):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
consumer = CONSUMERS[topic][0]
|
consumer = CONSUMERS[topic][0]
|
||||||
except (KeyError, IndexError):
|
except (KeyError, IndexError):
|
||||||
return iter([None])
|
raise rpc_common.Timeout("No consumers available")
|
||||||
else:
|
else:
|
||||||
return consumer.call(context, version, method, namespace, args,
|
return consumer.call(context, version, method, namespace, args,
|
||||||
timeout)
|
timeout)
|
||||||
@ -185,8 +185,8 @@ def fanout_cast(conf, context, topic, msg):
|
|||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version')
|
||||||
namespace = msg.get('namespace', None)
|
namespace = msg.get('namespace')
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
for consumer in CONSUMERS.get(topic, []):
|
||||||
try:
|
try:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -28,9 +26,10 @@ import kombu.connection
|
|||||||
import kombu.entity
|
import kombu.entity
|
||||||
import kombu.messaging
|
import kombu.messaging
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common import excutils
|
from muranoapi.openstack.common import excutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from muranoapi.openstack.common import network_utils
|
from muranoapi.openstack.common import network_utils
|
||||||
from muranoapi.openstack.common.rpc import amqp as rpc_amqp
|
from muranoapi.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
@ -39,9 +38,9 @@ from muranoapi.openstack.common import sslutils
|
|||||||
kombu_opts = [
|
kombu_opts = [
|
||||||
cfg.StrOpt('kombu_ssl_version',
|
cfg.StrOpt('kombu_ssl_version',
|
||||||
default='',
|
default='',
|
||||||
help='SSL version to use (valid only if SSL enabled). '
|
help='If SSL is enabled, the SSL version to use. Valid '
|
||||||
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
|
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
|
||||||
'be available on some distributions'
|
'be available on some distributions.'
|
||||||
),
|
),
|
||||||
cfg.StrOpt('kombu_ssl_keyfile',
|
cfg.StrOpt('kombu_ssl_keyfile',
|
||||||
default='',
|
default='',
|
||||||
@ -64,33 +63,33 @@ kombu_opts = [
|
|||||||
help='RabbitMQ HA cluster host:port pairs'),
|
help='RabbitMQ HA cluster host:port pairs'),
|
||||||
cfg.BoolOpt('rabbit_use_ssl',
|
cfg.BoolOpt('rabbit_use_ssl',
|
||||||
default=False,
|
default=False,
|
||||||
help='connect over SSL for RabbitMQ'),
|
help='Connect over SSL for RabbitMQ'),
|
||||||
cfg.StrOpt('rabbit_userid',
|
cfg.StrOpt('rabbit_userid',
|
||||||
default='guest',
|
default='guest',
|
||||||
help='the RabbitMQ userid'),
|
help='The RabbitMQ userid'),
|
||||||
cfg.StrOpt('rabbit_password',
|
cfg.StrOpt('rabbit_password',
|
||||||
default='guest',
|
default='guest',
|
||||||
help='the RabbitMQ password',
|
help='The RabbitMQ password',
|
||||||
secret=True),
|
secret=True),
|
||||||
cfg.StrOpt('rabbit_virtual_host',
|
cfg.StrOpt('rabbit_virtual_host',
|
||||||
default='/',
|
default='/',
|
||||||
help='the RabbitMQ virtual host'),
|
help='The RabbitMQ virtual host'),
|
||||||
cfg.IntOpt('rabbit_retry_interval',
|
cfg.IntOpt('rabbit_retry_interval',
|
||||||
default=1,
|
default=1,
|
||||||
help='how frequently to retry connecting with RabbitMQ'),
|
help='How frequently to retry connecting with RabbitMQ'),
|
||||||
cfg.IntOpt('rabbit_retry_backoff',
|
cfg.IntOpt('rabbit_retry_backoff',
|
||||||
default=2,
|
default=2,
|
||||||
help='how long to backoff for between retries when connecting '
|
help='How long to backoff for between retries when connecting '
|
||||||
'to RabbitMQ'),
|
'to RabbitMQ'),
|
||||||
cfg.IntOpt('rabbit_max_retries',
|
cfg.IntOpt('rabbit_max_retries',
|
||||||
default=0,
|
default=0,
|
||||||
help='maximum retries with trying to connect to RabbitMQ '
|
help='Maximum number of RabbitMQ connection retries. '
|
||||||
'(the default of 0 implies an infinite retry count)'),
|
'Default is 0 (infinite retry count)'),
|
||||||
cfg.BoolOpt('rabbit_ha_queues',
|
cfg.BoolOpt('rabbit_ha_queues',
|
||||||
default=False,
|
default=False,
|
||||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
|
||||||
'You need to wipe RabbitMQ database when '
|
'If you change this option, you must wipe the '
|
||||||
'changing this option.'),
|
'RabbitMQ database.'),
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -146,29 +145,23 @@ class ConsumerBase(object):
|
|||||||
Messages that are processed without exception are ack'ed.
|
Messages that are processed without exception are ack'ed.
|
||||||
|
|
||||||
If the message processing generates an exception, it will be
|
If the message processing generates an exception, it will be
|
||||||
ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
|
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
|
||||||
Rejection is better than waiting for the message to timeout.
|
|
||||||
Rejected messages are immediately requeued.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ack_msg = False
|
|
||||||
try:
|
try:
|
||||||
msg = rpc_common.deserialize_msg(message.payload)
|
msg = rpc_common.deserialize_msg(message.payload)
|
||||||
callback(msg)
|
callback(msg)
|
||||||
ack_msg = True
|
|
||||||
except Exception:
|
except Exception:
|
||||||
if self.ack_on_error:
|
if self.ack_on_error:
|
||||||
ack_msg = True
|
LOG.exception(_LE("Failed to process message"
|
||||||
LOG.exception(_("Failed to process message"
|
" ... skipping it."))
|
||||||
" ... skipping it."))
|
|
||||||
else:
|
|
||||||
LOG.exception(_("Failed to process message"
|
|
||||||
" ... will requeue."))
|
|
||||||
finally:
|
|
||||||
if ack_msg:
|
|
||||||
message.ack()
|
message.ack()
|
||||||
else:
|
else:
|
||||||
message.reject()
|
LOG.exception(_LE("Failed to process message"
|
||||||
|
" ... will requeue."))
|
||||||
|
message.requeue()
|
||||||
|
else:
|
||||||
|
message.ack()
|
||||||
|
|
||||||
def consume(self, *args, **kwargs):
|
def consume(self, *args, **kwargs):
|
||||||
"""Actually declare the consumer on the amqp channel. This will
|
"""Actually declare the consumer on the amqp channel. This will
|
||||||
@ -452,7 +445,7 @@ class Connection(object):
|
|||||||
'virtual_host': self.conf.rabbit_virtual_host,
|
'virtual_host': self.conf.rabbit_virtual_host,
|
||||||
}
|
}
|
||||||
|
|
||||||
for sp_key, value in server_params.iteritems():
|
for sp_key, value in six.iteritems(server_params):
|
||||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||||
params[p_key] = value
|
params[p_key] = value
|
||||||
|
|
||||||
@ -490,12 +483,8 @@ class Connection(object):
|
|||||||
# future with this?
|
# future with this?
|
||||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
||||||
|
|
||||||
if not ssl_params:
|
# Return the extended behavior or just have the default behavior
|
||||||
# Just have the default behavior
|
return ssl_params or True
|
||||||
return True
|
|
||||||
else:
|
|
||||||
# Return the extended behavior
|
|
||||||
return ssl_params
|
|
||||||
|
|
||||||
def _connect(self, params):
|
def _connect(self, params):
|
||||||
"""Connect to rabbit. Re-establish any queues that may have
|
"""Connect to rabbit. Re-establish any queues that may have
|
||||||
@ -503,7 +492,7 @@ class Connection(object):
|
|||||||
be handled by the caller.
|
be handled by the caller.
|
||||||
"""
|
"""
|
||||||
if self.connection:
|
if self.connection:
|
||||||
LOG.info(_("Reconnecting to AMQP server on "
|
LOG.info(_LI("Reconnecting to AMQP server on "
|
||||||
"%(hostname)s:%(port)d") % params)
|
"%(hostname)s:%(port)d") % params)
|
||||||
try:
|
try:
|
||||||
self.connection.release()
|
self.connection.release()
|
||||||
@ -525,7 +514,7 @@ class Connection(object):
|
|||||||
self.channel._new_queue('ae.undeliver')
|
self.channel._new_queue('ae.undeliver')
|
||||||
for consumer in self.consumers:
|
for consumer in self.consumers:
|
||||||
consumer.reconnect(self.channel)
|
consumer.reconnect(self.channel)
|
||||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||||
params)
|
params)
|
||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
@ -576,9 +565,9 @@ class Connection(object):
|
|||||||
sleep_time = min(sleep_time, self.interval_max)
|
sleep_time = min(sleep_time, self.interval_max)
|
||||||
|
|
||||||
log_info['sleep_time'] = sleep_time
|
log_info['sleep_time'] = sleep_time
|
||||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
|
||||||
'unreachable: %(err_str)s. Trying again in '
|
'unreachable: %(err_str)s. Trying again in '
|
||||||
'%(sleep_time)d seconds.') % log_info)
|
'%(sleep_time)d seconds.') % log_info)
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
@ -630,12 +619,12 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
||||||
self.consumer_num.next())
|
six.next(self.consumer_num))
|
||||||
self.consumers.append(consumer)
|
self.consumers.append(consumer)
|
||||||
return consumer
|
return consumer
|
||||||
|
|
||||||
@ -648,11 +637,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, socket.timeout):
|
if isinstance(exc, socket.timeout):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
info['do_consume'] = True
|
info['do_consume'] = True
|
||||||
|
|
||||||
@ -691,7 +680,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publish():
|
def _publish():
|
||||||
@ -742,7 +731,7 @@ class Connection(object):
|
|||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
it.next()
|
six.next(it)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -793,6 +782,7 @@ class Connection(object):
|
|||||||
callback=callback,
|
callback=callback,
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||||
Connection),
|
Connection),
|
||||||
|
wait_for_consumers=not ack_on_error
|
||||||
)
|
)
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
self.proxy_callbacks.append(callback_wrapper)
|
||||||
self.declare_topic_consumer(
|
self.declare_topic_consumer(
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -18,14 +16,14 @@
|
|||||||
import functools
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
import time
|
import time
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common import excutils
|
from muranoapi.openstack.common import excutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import jsonutils
|
from muranoapi.openstack.common import jsonutils
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
@ -67,6 +65,17 @@ qpid_opts = [
|
|||||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
cfg.BoolOpt('qpid_tcp_nodelay',
|
||||||
default=True,
|
default=True,
|
||||||
help='Disable Nagle algorithm'),
|
help='Disable Nagle algorithm'),
|
||||||
|
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
|
||||||
|
# this file could probably use some additional refactoring so that the
|
||||||
|
# differences between each version are split into different classes.
|
||||||
|
cfg.IntOpt('qpid_topology_version',
|
||||||
|
default=1,
|
||||||
|
help="The qpid topology version to use. Version 1 is what "
|
||||||
|
"was originally used by impl_qpid. Version 2 includes "
|
||||||
|
"some backwards-incompatible changes that allow broker "
|
||||||
|
"federation to work. Users should update to version 2 "
|
||||||
|
"when they are able to take everything down, as it "
|
||||||
|
"requires a clean break."),
|
||||||
]
|
]
|
||||||
|
|
||||||
cfg.CONF.register_opts(qpid_opts)
|
cfg.CONF.register_opts(qpid_opts)
|
||||||
@ -74,10 +83,17 @@ cfg.CONF.register_opts(qpid_opts)
|
|||||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
||||||
|
|
||||||
|
|
||||||
|
def raise_invalid_topology_version(conf):
|
||||||
|
msg = (_("Invalid value for qpid_topology_version: %d") %
|
||||||
|
conf.qpid_topology_version)
|
||||||
|
LOG.error(msg)
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
class ConsumerBase(object):
|
||||||
"""Consumer base class."""
|
"""Consumer base class."""
|
||||||
|
|
||||||
def __init__(self, session, callback, node_name, node_opts,
|
def __init__(self, conf, session, callback, node_name, node_opts,
|
||||||
link_name, link_opts):
|
link_name, link_opts):
|
||||||
"""Declare a queue on an amqp session.
|
"""Declare a queue on an amqp session.
|
||||||
|
|
||||||
@ -95,34 +111,48 @@ class ConsumerBase(object):
|
|||||||
self.receiver = None
|
self.receiver = None
|
||||||
self.session = None
|
self.session = None
|
||||||
|
|
||||||
addr_opts = {
|
if conf.qpid_topology_version == 1:
|
||||||
"create": "always",
|
addr_opts = {
|
||||||
"node": {
|
"create": "always",
|
||||||
"type": "topic",
|
"node": {
|
||||||
"x-declare": {
|
"type": "topic",
|
||||||
|
"x-declare": {
|
||||||
|
"durable": True,
|
||||||
|
"auto-delete": True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"link": {
|
||||||
"durable": True,
|
"durable": True,
|
||||||
"auto-delete": True,
|
"x-declare": {
|
||||||
|
"durable": False,
|
||||||
|
"auto-delete": True,
|
||||||
|
"exclusive": False,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
"link": {
|
addr_opts["node"]["x-declare"].update(node_opts)
|
||||||
"name": link_name,
|
elif conf.qpid_topology_version == 2:
|
||||||
"durable": True,
|
addr_opts = {
|
||||||
"x-declare": {
|
"link": {
|
||||||
"durable": False,
|
"x-declare": {
|
||||||
"auto-delete": True,
|
"auto-delete": True,
|
||||||
"exclusive": False,
|
"exclusive": False,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
else:
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
addr_opts["link"]["x-declare"].update(link_opts)
|
addr_opts["link"]["x-declare"].update(link_opts)
|
||||||
|
if link_name:
|
||||||
|
addr_opts["link"]["name"] = link_name
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||||
|
|
||||||
self.connect(session)
|
self.connect(session)
|
||||||
|
|
||||||
def connect(self, session):
|
def connect(self, session):
|
||||||
"""Declare the reciever on connect."""
|
"""Declare the receiver on connect."""
|
||||||
self._declare_receiver(session)
|
self._declare_receiver(session)
|
||||||
|
|
||||||
def reconnect(self, session):
|
def reconnect(self, session):
|
||||||
@ -158,7 +188,7 @@ class ConsumerBase(object):
|
|||||||
msg = rpc_common.deserialize_msg(message.content)
|
msg = rpc_common.deserialize_msg(message.content)
|
||||||
self.callback(msg)
|
self.callback(msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
LOG.exception(_LE("Failed to process message... skipping it."))
|
||||||
finally:
|
finally:
|
||||||
# TODO(sandy): Need support for optional ack_on_error.
|
# TODO(sandy): Need support for optional ack_on_error.
|
||||||
self.session.acknowledge(message)
|
self.session.acknowledge(message)
|
||||||
@ -181,16 +211,26 @@ class DirectConsumer(ConsumerBase):
|
|||||||
'callback' is the callback to call when messages are received
|
'callback' is the callback to call when messages are received
|
||||||
"""
|
"""
|
||||||
|
|
||||||
super(DirectConsumer, self).__init__(
|
link_opts = {
|
||||||
session, callback,
|
"auto-delete": conf.amqp_auto_delete,
|
||||||
"%s/%s" % (msg_id, msg_id),
|
"exclusive": True,
|
||||||
{"type": "direct"},
|
"durable": conf.amqp_durable_queues,
|
||||||
msg_id,
|
}
|
||||||
{
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
if conf.qpid_topology_version == 1:
|
||||||
"exclusive": True,
|
node_name = "%s/%s" % (msg_id, msg_id)
|
||||||
"durable": conf.amqp_durable_queues,
|
node_opts = {"type": "direct"}
|
||||||
})
|
link_name = msg_id
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.direct/%s" % msg_id
|
||||||
|
node_opts = {}
|
||||||
|
link_name = None
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(DirectConsumer, self).__init__(conf, session, callback,
|
||||||
|
node_name, node_opts, link_name,
|
||||||
|
link_opts)
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
class TopicConsumer(ConsumerBase):
|
||||||
@ -208,14 +248,20 @@ class TopicConsumer(ConsumerBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||||
super(TopicConsumer, self).__init__(
|
link_opts = {
|
||||||
session, callback,
|
"auto-delete": conf.amqp_auto_delete,
|
||||||
"%s/%s" % (exchange_name, topic),
|
"durable": conf.amqp_durable_queues,
|
||||||
{}, name or topic,
|
}
|
||||||
{
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
if conf.qpid_topology_version == 1:
|
||||||
"durable": conf.amqp_durable_queues,
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
})
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
|
||||||
|
{}, name or topic, link_opts)
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
class FanoutConsumer(ConsumerBase):
|
||||||
@ -230,52 +276,53 @@ class FanoutConsumer(ConsumerBase):
|
|||||||
"""
|
"""
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
|
|
||||||
super(FanoutConsumer, self).__init__(
|
link_opts = {"exclusive": True}
|
||||||
session, callback,
|
|
||||||
"%s_fanout" % topic,
|
|
||||||
{"durable": False, "type": "fanout"},
|
|
||||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
|
||||||
{"exclusive": True})
|
|
||||||
|
|
||||||
def reconnect(self, session):
|
if conf.qpid_topology_version == 1:
|
||||||
topic = self.get_node_name().rpartition('_fanout')[0]
|
node_name = "%s_fanout" % topic
|
||||||
params = {
|
node_opts = {"durable": False, "type": "fanout"}
|
||||||
'session': session,
|
elif conf.qpid_topology_version == 2:
|
||||||
'topic': topic,
|
node_name = "amq.topic/fanout/%s" % topic
|
||||||
'callback': self.callback,
|
node_opts = {}
|
||||||
}
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
self.__init__(conf=self.conf, **params)
|
super(FanoutConsumer, self).__init__(conf, session, callback,
|
||||||
|
node_name, node_opts, None,
|
||||||
super(FanoutConsumer, self).reconnect(session)
|
link_opts)
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
class Publisher(object):
|
||||||
"""Base Publisher class."""
|
"""Base Publisher class."""
|
||||||
|
|
||||||
def __init__(self, session, node_name, node_opts=None):
|
def __init__(self, conf, session, node_name, node_opts=None):
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
"""Init the Publisher class with the exchange_name, routing_key,
|
||||||
and other options
|
and other options
|
||||||
"""
|
"""
|
||||||
self.sender = None
|
self.sender = None
|
||||||
self.session = session
|
self.session = session
|
||||||
|
|
||||||
addr_opts = {
|
if conf.qpid_topology_version == 1:
|
||||||
"create": "always",
|
addr_opts = {
|
||||||
"node": {
|
"create": "always",
|
||||||
"type": "topic",
|
"node": {
|
||||||
"x-declare": {
|
"type": "topic",
|
||||||
"durable": False,
|
"x-declare": {
|
||||||
# auto-delete isn't implemented for exchanges in qpid,
|
"durable": False,
|
||||||
# but put in here anyway
|
# auto-delete isn't implemented for exchanges in qpid,
|
||||||
"auto-delete": True,
|
# but put in here anyway
|
||||||
|
"auto-delete": True,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
if node_opts:
|
||||||
if node_opts:
|
addr_opts["node"]["x-declare"].update(node_opts)
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
self.address = node_name
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
self.reconnect(session)
|
self.reconnect(session)
|
||||||
|
|
||||||
@ -319,39 +366,73 @@ class DirectPublisher(Publisher):
|
|||||||
"""Publisher class for 'direct'."""
|
"""Publisher class for 'direct'."""
|
||||||
def __init__(self, conf, session, msg_id):
|
def __init__(self, conf, session, msg_id):
|
||||||
"""Init a 'direct' publisher."""
|
"""Init a 'direct' publisher."""
|
||||||
super(DirectPublisher, self).__init__(session, msg_id,
|
|
||||||
{"type": "Direct"})
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = msg_id
|
||||||
|
node_opts = {"type": "direct"}
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.direct/%s" % msg_id
|
||||||
|
node_opts = {}
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(DirectPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publisher class for 'topic'."""
|
"""Publisher class for 'topic'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""Init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||||
super(TopicPublisher, self).__init__(session,
|
|
||||||
"%s/%s" % (exchange_name, topic))
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(TopicPublisher, self).__init__(conf, session, node_name)
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
class FanoutPublisher(Publisher):
|
||||||
"""Publisher class for 'fanout'."""
|
"""Publisher class for 'fanout'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'fanout' publisher.
|
"""Init a 'fanout' publisher.
|
||||||
"""
|
"""
|
||||||
super(FanoutPublisher, self).__init__(
|
|
||||||
session,
|
if conf.qpid_topology_version == 1:
|
||||||
"%s_fanout" % topic, {"type": "fanout"})
|
node_name = "%s_fanout" % topic
|
||||||
|
node_opts = {"type": "fanout"}
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/fanout/%s" % topic
|
||||||
|
node_opts = {}
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(FanoutPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(Publisher):
|
class NotifyPublisher(Publisher):
|
||||||
"""Publisher class for notifications."""
|
"""Publisher class for notifications."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""Init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||||
super(NotifyPublisher, self).__init__(session,
|
node_opts = {"durable": True}
|
||||||
"%s/%s" % (exchange_name, topic),
|
|
||||||
{"durable": True})
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(NotifyPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
class Connection(object):
|
||||||
@ -431,13 +512,13 @@ class Connection(object):
|
|||||||
self.connection.open()
|
self.connection.open()
|
||||||
except qpid_exceptions.ConnectionError as e:
|
except qpid_exceptions.ConnectionError as e:
|
||||||
msg_dict = dict(e=e, delay=delay)
|
msg_dict = dict(e=e, delay=delay)
|
||||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
msg = _LE("Unable to connect to AMQP server: %(e)s. "
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
"Sleeping %(delay)s seconds") % msg_dict
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
delay = min(2 * delay, 60)
|
delay = min(2 * delay, 60)
|
||||||
else:
|
else:
|
||||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
LOG.info(_LI('Connected to AMQP server on %s'), broker)
|
||||||
break
|
break
|
||||||
|
|
||||||
self.session = self.connection.session()
|
self.session = self.connection.session()
|
||||||
@ -446,11 +527,11 @@ class Connection(object):
|
|||||||
consumers = self.consumers
|
consumers = self.consumers
|
||||||
self.consumers = {}
|
self.consumers = {}
|
||||||
|
|
||||||
for consumer in consumers.itervalues():
|
for consumer in six.itervalues(consumers):
|
||||||
consumer.reconnect(self.session)
|
consumer.reconnect(self.session)
|
||||||
self._register_consumer(consumer)
|
self._register_consumer(consumer)
|
||||||
|
|
||||||
LOG.debug(_("Re-established AMQP queues"))
|
LOG.debug("Re-established AMQP queues")
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
def ensure(self, error_callback, method, *args, **kwargs):
|
||||||
while True:
|
while True:
|
||||||
@ -489,7 +570,7 @@ class Connection(object):
|
|||||||
"""
|
"""
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
||||||
"%(err_str)s") % log_info)
|
"%(err_str)s") % log_info)
|
||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
@ -504,11 +585,11 @@ class Connection(object):
|
|||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
if isinstance(exc, qpid_exceptions.Empty):
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
LOG.debug('Timed out waiting for RPC response: %s' %
|
||||||
str(exc))
|
str(exc))
|
||||||
raise rpc_common.Timeout()
|
raise rpc_common.Timeout()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
||||||
str(exc))
|
str(exc))
|
||||||
|
|
||||||
def _consume():
|
def _consume():
|
||||||
@ -516,7 +597,7 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
self._lookup_consumer(nxt_receiver).consume()
|
self._lookup_consumer(nxt_receiver).consume()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Error processing message. Skipping it."))
|
LOG.exception(_LE("Error processing message. Skipping it."))
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
for iteration in itertools.count(0):
|
||||||
if limit and iteration >= limit:
|
if limit and iteration >= limit:
|
||||||
@ -543,7 +624,7 @@ class Connection(object):
|
|||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
LOG.exception(_LE("Failed to publish message to topic "
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
"'%(topic)s': %(err_str)s") % log_info)
|
||||||
|
|
||||||
def _publisher_send():
|
def _publisher_send():
|
||||||
@ -604,7 +685,7 @@ class Connection(object):
|
|||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
it.next()
|
six.next(it)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -665,6 +746,7 @@ class Connection(object):
|
|||||||
callback=callback,
|
callback=callback,
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||||
Connection),
|
Connection),
|
||||||
|
wait_for_consumers=not ack_on_error
|
||||||
)
|
)
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
self.proxy_callbacks.append(callback_wrapper)
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -25,9 +23,11 @@ import uuid
|
|||||||
import eventlet
|
import eventlet
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
from muranoapi.openstack.common import excutils
|
from muranoapi.openstack.common import excutils
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LE, _LI
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import jsonutils
|
from muranoapi.openstack.common import jsonutils
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
@ -80,7 +80,7 @@ CONF = cfg.CONF
|
|||||||
CONF.register_opts(zmq_opts)
|
CONF.register_opts(zmq_opts)
|
||||||
|
|
||||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
||||||
matchmaker = None # memoized matchmaker object
|
matchmaker = None # memorized matchmaker object
|
||||||
|
|
||||||
|
|
||||||
def _serialize(data):
|
def _serialize(data):
|
||||||
@ -93,12 +93,12 @@ def _serialize(data):
|
|||||||
return jsonutils.dumps(data, ensure_ascii=True)
|
return jsonutils.dumps(data, ensure_ascii=True)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("JSON serialization failed."))
|
LOG.error(_LE("JSON serialization failed."))
|
||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
def _deserialize(data):
|
||||||
"""Deserialization wrapper."""
|
"""Deserialization wrapper."""
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
LOG.debug("Deserializing: %s", data)
|
||||||
return jsonutils.loads(data)
|
return jsonutils.loads(data)
|
||||||
|
|
||||||
|
|
||||||
@ -133,9 +133,9 @@ class ZmqSocket(object):
|
|||||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||||
'subscribe': subscribe, 'bind': bind}
|
'subscribe': subscribe, 'bind': bind}
|
||||||
|
|
||||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
|
||||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
|
||||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
LOG.debug("-> bind: %(bind)s", str_data)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if bind:
|
if bind:
|
||||||
@ -155,7 +155,7 @@ class ZmqSocket(object):
|
|||||||
"""Subscribe."""
|
"""Subscribe."""
|
||||||
if not self.can_sub:
|
if not self.can_sub:
|
||||||
raise RPCException("Cannot subscribe on this socket.")
|
raise RPCException("Cannot subscribe on this socket.")
|
||||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
LOG.debug("Subscribing to %s", msg_filter)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||||
@ -192,7 +192,7 @@ class ZmqSocket(object):
|
|||||||
# it would be much worse if some of the code calling this
|
# it would be much worse if some of the code calling this
|
||||||
# were to fail. For now, lets log, and later evaluate
|
# were to fail. For now, lets log, and later evaluate
|
||||||
# if we can safely raise here.
|
# if we can safely raise here.
|
||||||
LOG.error("ZeroMQ socket could not be closed.")
|
LOG.error(_LE("ZeroMQ socket could not be closed."))
|
||||||
self.sock = None
|
self.sock = None
|
||||||
|
|
||||||
def recv(self, **kwargs):
|
def recv(self, **kwargs):
|
||||||
@ -221,7 +221,7 @@ class ZmqClient(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
||||||
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
|
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
|
||||||
self.outq.send(map(bytes,
|
self.outq.send(map(bytes,
|
||||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ class InternalContext(object):
|
|||||||
|
|
||||||
def _get_response(self, ctx, proxy, topic, data):
|
def _get_response(self, ctx, proxy, topic, data):
|
||||||
"""Process a curried message and cast the result to topic."""
|
"""Process a curried message and cast the result to topic."""
|
||||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
LOG.debug("Running func with context: %s", ctx.to_dict())
|
||||||
data.setdefault('version', None)
|
data.setdefault('version', None)
|
||||||
data.setdefault('args', {})
|
data.setdefault('args', {})
|
||||||
|
|
||||||
@ -277,13 +277,13 @@ class InternalContext(object):
|
|||||||
# ignore these since they are just from shutdowns
|
# ignore these since they are just from shutdowns
|
||||||
pass
|
pass
|
||||||
except rpc_common.ClientException as e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
LOG.debug("Expected exception during message handling (%s)" %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(e._exc_info,
|
rpc_common.serialize_remote_exception(e._exc_info,
|
||||||
log_failure=False)}
|
log_failure=False)}
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Exception during message handling"))
|
LOG.error(_LE("Exception during message handling"))
|
||||||
return {'exc':
|
return {'exc':
|
||||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||||
|
|
||||||
@ -302,7 +302,7 @@ class InternalContext(object):
|
|||||||
self._get_response(ctx, proxy, topic, payload),
|
self._get_response(ctx, proxy, topic, payload),
|
||||||
ctx.replies)
|
ctx.replies)
|
||||||
|
|
||||||
LOG.debug(_("Sending reply"))
|
LOG.debug("Sending reply")
|
||||||
_multi_send(_cast, ctx, topic, {
|
_multi_send(_cast, ctx, topic, {
|
||||||
'method': '-process_reply',
|
'method': '-process_reply',
|
||||||
'args': {
|
'args': {
|
||||||
@ -336,7 +336,7 @@ class ConsumerBase(object):
|
|||||||
# processed internally. (non-valid method name)
|
# processed internally. (non-valid method name)
|
||||||
method = data.get('method')
|
method = data.get('method')
|
||||||
if not method:
|
if not method:
|
||||||
LOG.error(_("RPC message did not include method."))
|
LOG.error(_LE("RPC message did not include method."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Internal method
|
# Internal method
|
||||||
@ -368,7 +368,7 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
def register(self, proxy, in_addr, zmq_type_in,
|
def register(self, proxy, in_addr, zmq_type_in,
|
||||||
in_bind=True, subscribe=None):
|
in_bind=True, subscribe=None):
|
||||||
|
|
||||||
LOG.info(_("Registering reactor"))
|
LOG.info(_LI("Registering reactor"))
|
||||||
|
|
||||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||||
raise RPCException("Bad input socktype")
|
raise RPCException("Bad input socktype")
|
||||||
@ -380,11 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
self.proxies[inq] = proxy
|
self.proxies[inq] = proxy
|
||||||
self.sockets.append(inq)
|
self.sockets.append(inq)
|
||||||
|
|
||||||
LOG.info(_("In reactor registered"))
|
LOG.info(_LI("In reactor registered"))
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consume(sock):
|
def _consume(sock):
|
||||||
LOG.info(_("Consuming socket"))
|
LOG.info(_LI("Consuming socket"))
|
||||||
while True:
|
while True:
|
||||||
self.consume(sock)
|
self.consume(sock)
|
||||||
|
|
||||||
@ -434,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
if topic not in self.topic_proxy:
|
if topic not in self.topic_proxy:
|
||||||
def publisher(waiter):
|
def publisher(waiter):
|
||||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
LOG.info(_LI("Creating proxy for topic: %s"), topic)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# The topic is received over the network,
|
# The topic is received over the network,
|
||||||
@ -472,14 +473,14 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
try:
|
try:
|
||||||
wait_sock_creation.wait()
|
wait_sock_creation.wait()
|
||||||
except RPCException:
|
except RPCException:
|
||||||
LOG.error(_("Topic socket file creation failed."))
|
LOG.error(_LE("Topic socket file creation failed."))
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
self.topic_proxy[topic].put_nowait(data)
|
||||||
except eventlet.queue.Full:
|
except eventlet.queue.Full:
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
LOG.error(_LE("Local per-topic backlog buffer full for topic "
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Runs the ZmqProxy service."""
|
"""Runs the ZmqProxy service."""
|
||||||
@ -494,8 +495,8 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except os.error:
|
except os.error:
|
||||||
if not os.path.isdir(ipc_dir):
|
if not os.path.isdir(ipc_dir):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Required IPC directory does not exist at"
|
LOG.error(_LE("Required IPC directory does not exist at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
try:
|
try:
|
||||||
self.register(consumption_proxy,
|
self.register(consumption_proxy,
|
||||||
consume_in,
|
consume_in,
|
||||||
@ -503,11 +504,11 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
except zmq.ZMQError:
|
except zmq.ZMQError:
|
||||||
if os.access(ipc_dir, os.X_OK):
|
if os.access(ipc_dir, os.X_OK):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Permission denied to IPC directory at"
|
LOG.error(_LE("Permission denied to IPC directory at"
|
||||||
" %s") % (ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
|
||||||
"Socket may already be in use."))
|
"Socket may already be in use."))
|
||||||
|
|
||||||
super(ZmqProxy, self).consume_in_thread()
|
super(ZmqProxy, self).consume_in_thread()
|
||||||
|
|
||||||
@ -522,8 +523,8 @@ def unflatten_envelope(packenv):
|
|||||||
h = {}
|
h = {}
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
k = i.next()
|
k = six.next(i)
|
||||||
h[k] = i.next()
|
h[k] = six.next(i)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return h
|
return h
|
||||||
|
|
||||||
@ -540,7 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
def consume(self, sock):
|
def consume(self, sock):
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||||
data = sock.recv()
|
data = sock.recv()
|
||||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
proxy = self.proxies[sock]
|
||||||
|
|
||||||
@ -559,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
# Unmarshal only after verifying the message.
|
# Unmarshal only after verifying the message.
|
||||||
ctx = RpcContext.unmarshal(data[3])
|
ctx = RpcContext.unmarshal(data[3])
|
||||||
else:
|
else:
|
||||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
|
||||||
return
|
return
|
||||||
|
|
||||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
self.pool.spawn_n(self.process, proxy, ctx, request)
|
||||||
@ -587,14 +588,14 @@ class Connection(rpc_common.Connection):
|
|||||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
||||||
|
|
||||||
if topic in self.topics:
|
if topic in self.topics:
|
||||||
LOG.info(_("Skipping topic registration. Already registered."))
|
LOG.info(_LI("Skipping topic registration. Already registered."))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Receive messages from (local) proxy
|
# Receive messages from (local) proxy
|
||||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||||
(CONF.rpc_zmq_ipc_dir, topic)
|
(CONF.rpc_zmq_ipc_dir, topic)
|
||||||
|
|
||||||
LOG.debug(_("Consumer is a zmq.%s"),
|
LOG.debug("Consumer is a zmq.%s",
|
||||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||||
|
|
||||||
self.reactor.register(proxy, inaddr, sock_type,
|
self.reactor.register(proxy, inaddr, sock_type,
|
||||||
@ -646,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
# Replies always come into the reply service.
|
# Replies always come into the reply service.
|
||||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||||
|
|
||||||
LOG.debug(_("Creating payload"))
|
LOG.debug("Creating payload")
|
||||||
# Curry the original request into a reply method.
|
# Curry the original request into a reply method.
|
||||||
mcontext = RpcContext.marshal(context)
|
mcontext = RpcContext.marshal(context)
|
||||||
payload = {
|
payload = {
|
||||||
@ -659,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
LOG.debug("Creating queue socket for reply waiter")
|
||||||
|
|
||||||
# Messages arriving async.
|
# Messages arriving async.
|
||||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||||
@ -672,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
zmq.SUB, subscribe=msg_id, bind=False
|
zmq.SUB, subscribe=msg_id, bind=False
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.debug(_("Sending cast"))
|
LOG.debug("Sending cast")
|
||||||
_cast(addr, context, topic, payload, envelope)
|
_cast(addr, context, topic, payload, envelope)
|
||||||
|
|
||||||
LOG.debug(_("Cast sent; Waiting reply"))
|
LOG.debug("Cast sent; Waiting reply")
|
||||||
# Blocks until receives reply
|
# Blocks until receives reply
|
||||||
msg = msg_waiter.recv()
|
msg = msg_waiter.recv()
|
||||||
LOG.debug(_("Received message: %s"), msg)
|
LOG.debug("Received message: %s", msg)
|
||||||
LOG.debug(_("Unpacking response"))
|
LOG.debug("Unpacking response")
|
||||||
|
|
||||||
if msg[2] == 'cast': # Legacy version
|
if msg[2] == 'cast': # Legacy version
|
||||||
raw_msg = _deserialize(msg[-1])[-1]
|
raw_msg = _deserialize(msg[-1])[-1]
|
||||||
@ -718,10 +719,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
|||||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||||
"""
|
"""
|
||||||
conf = CONF
|
conf = CONF
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||||
|
|
||||||
queues = _get_matchmaker().queues(topic)
|
queues = _get_matchmaker().queues(topic)
|
||||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
LOG.debug("Sending message(s) to: %s", queues)
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
# Don't stack if we have no matchmaker results
|
||||||
if not queues:
|
if not queues:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -13,6 +11,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||||
@ -23,7 +22,7 @@ import contextlib
|
|||||||
import eventlet
|
import eventlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _, _LI
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -92,7 +91,7 @@ class MatchMakerBase(object):
|
|||||||
"""Acknowledge that a key.host is alive.
|
"""Acknowledge that a key.host is alive.
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
Used internally for updating heartbeats, but may also be used
|
||||||
publically to acknowledge a system is alive (i.e. rpc message
|
publicly to acknowledge a system is alive (i.e. rpc message
|
||||||
successfully sent to host)
|
successfully sent to host)
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
@ -174,7 +173,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
"""Acknowledge that a host.topic is alive.
|
"""Acknowledge that a host.topic is alive.
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
Used internally for updating heartbeats, but may also be used
|
||||||
publically to acknowledge a system is alive (i.e. rpc message
|
publicly to acknowledge a system is alive (i.e. rpc message
|
||||||
successfully sent to host)
|
successfully sent to host)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement ack_alive")
|
raise NotImplementedError("Must implement ack_alive")
|
||||||
@ -214,7 +213,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.hosts.discard(host)
|
self.hosts.discard(host)
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
self.backend_unregister(key, '.'.join((key, host)))
|
||||||
|
|
||||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||||
{'key': key, 'host': host})
|
{'key': key, 'host': host})
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
@ -248,9 +247,7 @@ class DirectBinding(Binding):
|
|||||||
that it maps directly to a host, thus direct.
|
that it maps directly to a host, thus direct.
|
||||||
"""
|
"""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if '.' in key:
|
return '.' in key
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class TopicBinding(Binding):
|
class TopicBinding(Binding):
|
||||||
@ -262,17 +259,13 @@ class TopicBinding(Binding):
|
|||||||
matches that of a direct exchange.
|
matches that of a direct exchange.
|
||||||
"""
|
"""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if '.' not in key:
|
return '.' not in key
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutBinding(Binding):
|
class FanoutBinding(Binding):
|
||||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if key.startswith('fanout~'):
|
return key.startswith('fanout~')
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class StubExchange(Exchange):
|
class StubExchange(Exchange):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudscaling Group, Inc
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -13,6 +11,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||||
@ -95,7 +94,7 @@ class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
|||||||
if not redis:
|
if not redis:
|
||||||
raise ImportError("Failed to import module redis.")
|
raise ImportError("Failed to import module redis.")
|
||||||
|
|
||||||
self.redis = redis.StrictRedis(
|
self.redis = redis.Redis(
|
||||||
host=CONF.matchmaker_redis.host,
|
host=CONF.matchmaker_redis.host,
|
||||||
port=CONF.matchmaker_redis.port,
|
port=CONF.matchmaker_redis.port,
|
||||||
password=CONF.matchmaker_redis.password)
|
password=CONF.matchmaker_redis.password)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
# Copyright 2011-2013 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -13,6 +11,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||||
@ -23,7 +22,7 @@ import json
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LW
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common.rpc import matchmaker as mm
|
from muranoapi.openstack.common.rpc import matchmaker as mm
|
||||||
|
|
||||||
@ -54,18 +53,15 @@ class RingExchange(mm.Exchange):
|
|||||||
if ring:
|
if ring:
|
||||||
self.ring = ring
|
self.ring = ring
|
||||||
else:
|
else:
|
||||||
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
|
||||||
self.ring = json.load(fh)
|
self.ring = json.load(fh)
|
||||||
fh.close()
|
|
||||||
|
|
||||||
self.ring0 = {}
|
self.ring0 = {}
|
||||||
for k in self.ring.keys():
|
for k in self.ring.keys():
|
||||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||||
|
|
||||||
def _ring_has(self, key):
|
def _ring_has(self, key):
|
||||||
if key in self.ring0:
|
return key in self.ring0
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class RoundRobinRingExchange(RingExchange):
|
class RoundRobinRingExchange(RingExchange):
|
||||||
@ -76,8 +72,8 @@ class RoundRobinRingExchange(RingExchange):
|
|||||||
def run(self, key):
|
def run(self, key):
|
||||||
if not self._ring_has(key):
|
if not self._ring_has(key):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (key, )
|
"see ringfile") % (key, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
host = next(self.ring0[key])
|
host = next(self.ring0[key])
|
||||||
@ -94,8 +90,8 @@ class FanoutRingExchange(RingExchange):
|
|||||||
nkey = key.split('fanout~')[1:][0]
|
nkey = key.split('fanout~')[1:][0]
|
||||||
if not self._ring_has(nkey):
|
if not self._ring_has(nkey):
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
_("No key defining hosts for topic '%s', "
|
_LW("No key defining hosts for topic '%s', "
|
||||||
"see ringfile") % (nkey, )
|
"see ringfile") % (nkey, )
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012-2013 Red Hat, Inc.
|
# Copyright 2012-2013 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -21,6 +19,7 @@ For more information about rpc API version numbers, see:
|
|||||||
rpc/dispatcher.py
|
rpc/dispatcher.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from muranoapi.openstack.common import rpc
|
from muranoapi.openstack.common import rpc
|
||||||
from muranoapi.openstack.common.rpc import common as rpc_common
|
from muranoapi.openstack.common.rpc import common as rpc_common
|
||||||
@ -36,7 +35,7 @@ class RpcProxy(object):
|
|||||||
rpc API.
|
rpc API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# The default namespace, which can be overriden in a subclass.
|
# The default namespace, which can be overridden in a subclass.
|
||||||
RPC_API_NAMESPACE = None
|
RPC_API_NAMESPACE = None
|
||||||
|
|
||||||
def __init__(self, topic, default_version, version_cap=None,
|
def __init__(self, topic, default_version, version_cap=None,
|
||||||
@ -100,7 +99,7 @@ class RpcProxy(object):
|
|||||||
:returns: A new set of serialized arguments
|
:returns: A new set of serialized arguments
|
||||||
"""
|
"""
|
||||||
new_kwargs = dict()
|
new_kwargs = dict()
|
||||||
for argname, arg in kwargs.iteritems():
|
for argname, arg in six.iteritems(kwargs):
|
||||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
||||||
arg)
|
arg)
|
||||||
return new_kwargs
|
return new_kwargs
|
||||||
|
@ -16,10 +16,12 @@
|
|||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class Serializer(object):
|
class Serializer(object):
|
||||||
"""Generic (de-)serialization definition base class."""
|
"""Generic (de-)serialization definition base class."""
|
||||||
__metaclass__ = abc.ABCMeta
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def serialize_entity(self, context, entity):
|
def serialize_entity(self, context, entity):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -17,7 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common import rpc
|
from muranoapi.openstack.common import rpc
|
||||||
from muranoapi.openstack.common.rpc import dispatcher as rpc_dispatcher
|
from muranoapi.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||||
@ -46,7 +43,7 @@ class Service(service.Service):
|
|||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
self.conn = rpc.create_connection(new=True)
|
self.conn = rpc.create_connection(new=True)
|
||||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
LOG.debug("Creating Consumer connection for Service %s" %
|
||||||
self.topic)
|
self.topic)
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
||||||
|
3
muranoapi/openstack/common/rpc/zmq_receiver.py
Executable file → Normal file
3
muranoapi/openstack/common/rpc/zmq_receiver.py
Executable file → Normal file
@ -1,6 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -20,21 +18,30 @@
|
|||||||
"""Generic Node base class for all workers that run on hosts."""
|
"""Generic Node base class for all workers that run on hosts."""
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
|
import logging as std_logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Importing just the symbol here because the io module does not
|
||||||
|
# exist in Python 2.6.
|
||||||
|
from io import UnsupportedOperation # noqa
|
||||||
|
except ImportError:
|
||||||
|
# Python 2.6
|
||||||
|
UnsupportedOperation = None
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import event
|
from eventlet import event
|
||||||
import logging as std_logging
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common import eventlet_backdoor
|
from muranoapi.openstack.common import eventlet_backdoor
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _LE, _LI, _LW
|
||||||
from muranoapi.openstack.common import importutils
|
from muranoapi.openstack.common import importutils
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
|
from muranoapi.openstack.common import systemd
|
||||||
from muranoapi.openstack.common import threadgroup
|
from muranoapi.openstack.common import threadgroup
|
||||||
|
|
||||||
|
|
||||||
@ -43,6 +50,53 @@ CONF = cfg.CONF
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _sighup_supported():
|
||||||
|
return hasattr(signal, 'SIGHUP')
|
||||||
|
|
||||||
|
|
||||||
|
def _is_daemon():
|
||||||
|
# The process group for a foreground process will match the
|
||||||
|
# process group of the controlling terminal. If those values do
|
||||||
|
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||||
|
# the process is running in the background as a daemon.
|
||||||
|
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||||
|
try:
|
||||||
|
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ENOTTY:
|
||||||
|
# Assume we are a daemon because there is no terminal.
|
||||||
|
is_daemon = True
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except UnsupportedOperation:
|
||||||
|
# Could not get the fileno for stdout, so we must be a daemon.
|
||||||
|
is_daemon = True
|
||||||
|
return is_daemon
|
||||||
|
|
||||||
|
|
||||||
|
def _is_sighup_and_daemon(signo):
|
||||||
|
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||||
|
# Avoid checking if we are a daemon, because the signal isn't
|
||||||
|
# SIGHUP.
|
||||||
|
return False
|
||||||
|
return _is_daemon()
|
||||||
|
|
||||||
|
|
||||||
|
def _signo_to_signame(signo):
|
||||||
|
signals = {signal.SIGTERM: 'SIGTERM',
|
||||||
|
signal.SIGINT: 'SIGINT'}
|
||||||
|
if _sighup_supported():
|
||||||
|
signals[signal.SIGHUP] = 'SIGHUP'
|
||||||
|
return signals[signo]
|
||||||
|
|
||||||
|
|
||||||
|
def _set_signals_handler(handler):
|
||||||
|
signal.signal(signal.SIGTERM, handler)
|
||||||
|
signal.signal(signal.SIGINT, handler)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, handler)
|
||||||
|
|
||||||
|
|
||||||
class Launcher(object):
|
class Launcher(object):
|
||||||
"""Launch one or more services and wait for them to complete."""
|
"""Launch one or more services and wait for them to complete."""
|
||||||
|
|
||||||
@ -81,6 +135,15 @@ class Launcher(object):
|
|||||||
"""
|
"""
|
||||||
self.services.wait()
|
self.services.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
"""Reload config files and restart service.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
cfg.CONF.reload_config_files()
|
||||||
|
self.services.restart()
|
||||||
|
|
||||||
|
|
||||||
class SignalExit(SystemExit):
|
class SignalExit(SystemExit):
|
||||||
def __init__(self, signo, exccode=1):
|
def __init__(self, signo, exccode=1):
|
||||||
@ -91,26 +154,28 @@ class SignalExit(SystemExit):
|
|||||||
class ServiceLauncher(Launcher):
|
class ServiceLauncher(Launcher):
|
||||||
def _handle_signal(self, signo, frame):
|
def _handle_signal(self, signo, frame):
|
||||||
# Allow the process to be killed again and die from natural causes
|
# Allow the process to be killed again and die from natural causes
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
||||||
|
|
||||||
raise SignalExit(signo)
|
raise SignalExit(signo)
|
||||||
|
|
||||||
def wait(self):
|
def handle_signal(self):
|
||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
_set_signals_handler(self._handle_signal)
|
||||||
signal.signal(signal.SIGINT, self._handle_signal)
|
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
def _wait_for_exit_or_signal(self, ready_callback=None):
|
||||||
|
status = None
|
||||||
|
signo = 0
|
||||||
|
|
||||||
|
LOG.debug('Full set of CONF:')
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
status = None
|
|
||||||
try:
|
try:
|
||||||
|
if ready_callback:
|
||||||
|
ready_callback()
|
||||||
super(ServiceLauncher, self).wait()
|
super(ServiceLauncher, self).wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
signame = _signo_to_signame(exc.signo)
|
||||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
|
||||||
status = exc.code
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
status = exc.code
|
status = exc.code
|
||||||
finally:
|
finally:
|
||||||
@ -120,8 +185,17 @@ class ServiceLauncher(Launcher):
|
|||||||
rpc.cleanup()
|
rpc.cleanup()
|
||||||
except Exception:
|
except Exception:
|
||||||
# We're shutting down, so it doesn't matter at this point.
|
# We're shutting down, so it doesn't matter at this point.
|
||||||
LOG.exception(_('Exception during rpc cleanup.'))
|
LOG.exception(_LE('Exception during rpc cleanup.'))
|
||||||
return status
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def wait(self, ready_callback=None):
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||||
|
if not _is_sighup_and_daemon(signo):
|
||||||
|
return status
|
||||||
|
self.restart()
|
||||||
|
|
||||||
|
|
||||||
class ServiceWrapper(object):
|
class ServiceWrapper(object):
|
||||||
@ -133,43 +207,82 @@ class ServiceWrapper(object):
|
|||||||
|
|
||||||
|
|
||||||
class ProcessLauncher(object):
|
class ProcessLauncher(object):
|
||||||
def __init__(self):
|
def __init__(self, wait_interval=0.01):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param wait_interval: The interval to sleep for between checks
|
||||||
|
of child process exit.
|
||||||
|
"""
|
||||||
self.children = {}
|
self.children = {}
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
self.running = True
|
self.running = True
|
||||||
|
self.wait_interval = wait_interval
|
||||||
rfd, self.writepipe = os.pipe()
|
rfd, self.writepipe = os.pipe()
|
||||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||||
|
self.handle_signal()
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
def handle_signal(self):
|
||||||
signal.signal(signal.SIGINT, self._handle_signal)
|
_set_signals_handler(self._handle_signal)
|
||||||
|
|
||||||
def _handle_signal(self, signo, frame):
|
def _handle_signal(self, signo, frame):
|
||||||
self.sigcaught = signo
|
self.sigcaught = signo
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|
||||||
# Allow the process to be killed again and die from natural causes
|
# Allow the process to be killed again and die from natural causes
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
||||||
|
|
||||||
def _pipe_watcher(self):
|
def _pipe_watcher(self):
|
||||||
# This will block until the write end is closed when the parent
|
# This will block until the write end is closed when the parent
|
||||||
# dies unexpectedly
|
# dies unexpectedly
|
||||||
self.readpipe.read()
|
self.readpipe.read()
|
||||||
|
|
||||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def _child_process(self, service):
|
def _child_process_handle_signal(self):
|
||||||
# Setup child signal handlers differently
|
# Setup child signal handlers differently
|
||||||
def _sigterm(*args):
|
def _sigterm(*args):
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
raise SignalExit(signal.SIGTERM)
|
raise SignalExit(signal.SIGTERM)
|
||||||
|
|
||||||
|
def _sighup(*args):
|
||||||
|
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||||
|
raise SignalExit(signal.SIGHUP)
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, _sigterm)
|
signal.signal(signal.SIGTERM, _sigterm)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, _sighup)
|
||||||
# Block SIGINT and let the parent send us a SIGTERM
|
# Block SIGINT and let the parent send us a SIGTERM
|
||||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
def _child_wait_for_exit_or_signal(self, launcher):
|
||||||
|
status = 0
|
||||||
|
signo = 0
|
||||||
|
|
||||||
|
# NOTE(johannes): All exceptions are caught to ensure this
|
||||||
|
# doesn't fallback into the loop spawning children. It would
|
||||||
|
# be bad for a child to spawn more children.
|
||||||
|
try:
|
||||||
|
launcher.wait()
|
||||||
|
except SignalExit as exc:
|
||||||
|
signame = _signo_to_signame(exc.signo)
|
||||||
|
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||||
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
|
except SystemExit as exc:
|
||||||
|
status = exc.code
|
||||||
|
except BaseException:
|
||||||
|
LOG.exception(_LE('Unhandled exception'))
|
||||||
|
status = 2
|
||||||
|
finally:
|
||||||
|
launcher.stop()
|
||||||
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def _child_process(self, service):
|
||||||
|
self._child_process_handle_signal()
|
||||||
|
|
||||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||||
# fd with parent and/or siblings, which would be bad
|
# fd with parent and/or siblings, which would be bad
|
||||||
eventlet.hubs.use_hub()
|
eventlet.hubs.use_hub()
|
||||||
@ -184,7 +297,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
launcher = Launcher()
|
launcher = Launcher()
|
||||||
launcher.launch_service(service)
|
launcher.launch_service(service)
|
||||||
launcher.wait()
|
return launcher
|
||||||
|
|
||||||
def _start_child(self, wrap):
|
def _start_child(self, wrap):
|
||||||
if len(wrap.forktimes) > wrap.workers:
|
if len(wrap.forktimes) > wrap.workers:
|
||||||
@ -193,7 +306,7 @@ class ProcessLauncher(object):
|
|||||||
# start up quickly but ensure we don't fork off children that
|
# start up quickly but ensure we don't fork off children that
|
||||||
# die instantly too quickly.
|
# die instantly too quickly.
|
||||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||||
LOG.info(_('Forking too fast, sleeping'))
|
LOG.info(_LI('Forking too fast, sleeping'))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
wrap.forktimes.pop(0)
|
wrap.forktimes.pop(0)
|
||||||
@ -202,28 +315,17 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
pid = os.fork()
|
pid = os.fork()
|
||||||
if pid == 0:
|
if pid == 0:
|
||||||
# NOTE(johannes): All exceptions are caught to ensure this
|
launcher = self._child_process(wrap.service)
|
||||||
# doesn't fallback into the loop spawning children. It would
|
while True:
|
||||||
# be bad for a child to spawn more children.
|
self._child_process_handle_signal()
|
||||||
status = 0
|
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||||
try:
|
if not _is_sighup_and_daemon(signo):
|
||||||
self._child_process(wrap.service)
|
break
|
||||||
except SignalExit as exc:
|
launcher.restart()
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
|
||||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
|
||||||
status = exc.code
|
|
||||||
except SystemExit as exc:
|
|
||||||
status = exc.code
|
|
||||||
except BaseException:
|
|
||||||
LOG.exception(_('Unhandled exception'))
|
|
||||||
status = 2
|
|
||||||
finally:
|
|
||||||
wrap.service.stop()
|
|
||||||
|
|
||||||
os._exit(status)
|
os._exit(status)
|
||||||
|
|
||||||
LOG.info(_('Started child %d'), pid)
|
LOG.info(_LI('Started child %d'), pid)
|
||||||
|
|
||||||
wrap.children.add(pid)
|
wrap.children.add(pid)
|
||||||
self.children[pid] = wrap
|
self.children[pid] = wrap
|
||||||
@ -233,7 +335,7 @@ class ProcessLauncher(object):
|
|||||||
def launch_service(self, service, workers=1):
|
def launch_service(self, service, workers=1):
|
||||||
wrap = ServiceWrapper(service, workers)
|
wrap = ServiceWrapper(service, workers)
|
||||||
|
|
||||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
|
|
||||||
@ -250,43 +352,55 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
if os.WIFSIGNALED(status):
|
if os.WIFSIGNALED(status):
|
||||||
sig = os.WTERMSIG(status)
|
sig = os.WTERMSIG(status)
|
||||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||||
dict(pid=pid, sig=sig))
|
dict(pid=pid, sig=sig))
|
||||||
else:
|
else:
|
||||||
code = os.WEXITSTATUS(status)
|
code = os.WEXITSTATUS(status)
|
||||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||||
dict(pid=pid, code=code))
|
dict(pid=pid, code=code))
|
||||||
|
|
||||||
if pid not in self.children:
|
if pid not in self.children:
|
||||||
LOG.warning(_('pid %d not in child list'), pid)
|
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
wrap = self.children.pop(pid)
|
wrap = self.children.pop(pid)
|
||||||
wrap.children.remove(pid)
|
wrap.children.remove(pid)
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
def wait(self):
|
def _respawn_children(self):
|
||||||
"""Loop waiting on children to die and respawning as necessary."""
|
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
|
||||||
|
|
||||||
while self.running:
|
while self.running:
|
||||||
wrap = self._wait_child()
|
wrap = self._wait_child()
|
||||||
if not wrap:
|
if not wrap:
|
||||||
# Yield to other threads if no children have exited
|
# Yield to other threads if no children have exited
|
||||||
# Sleep for a short time to avoid excessive CPU usage
|
# Sleep for a short time to avoid excessive CPU usage
|
||||||
# (see bug #1095346)
|
# (see bug #1095346)
|
||||||
eventlet.greenthread.sleep(.01)
|
eventlet.greenthread.sleep(self.wait_interval)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
|
|
||||||
if self.sigcaught:
|
def wait(self):
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
|
||||||
LOG.info(_('Caught %s, stopping children'), signame)
|
LOG.debug('Full set of CONF:')
|
||||||
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
self._respawn_children()
|
||||||
|
if self.sigcaught:
|
||||||
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
|
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||||
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
|
break
|
||||||
|
|
||||||
|
for pid in self.children:
|
||||||
|
os.kill(pid, signal.SIGHUP)
|
||||||
|
self.running = True
|
||||||
|
self.sigcaught = None
|
||||||
|
except eventlet.greenlet.GreenletExit:
|
||||||
|
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
try:
|
try:
|
||||||
@ -297,7 +411,7 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
# Wait for children to die
|
# Wait for children to die
|
||||||
if self.children:
|
if self.children:
|
||||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||||
while self.children:
|
while self.children:
|
||||||
self._wait_child()
|
self._wait_child()
|
||||||
|
|
||||||
@ -311,6 +425,10 @@ class Service(object):
|
|||||||
# signal that the service is done shutting itself down:
|
# signal that the service is done shutting itself down:
|
||||||
self._done = event.Event()
|
self._done = event.Event()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
# NOTE(Fengqian): docs for Event.reset() recommend against using it
|
||||||
|
self._done = event.Event()
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -353,6 +471,13 @@ class Services(object):
|
|||||||
def wait(self):
|
def wait(self):
|
||||||
self.tg.wait()
|
self.tg.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
self.stop()
|
||||||
|
self.done = event.Event()
|
||||||
|
for restart_service in self.services:
|
||||||
|
restart_service.reset()
|
||||||
|
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def run_service(service, done):
|
def run_service(service, done):
|
||||||
"""Service start wrapper.
|
"""Service start wrapper.
|
||||||
@ -363,14 +488,16 @@ class Services(object):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
service.start()
|
service.start()
|
||||||
|
systemd.notify_once()
|
||||||
done.wait()
|
done.wait()
|
||||||
|
|
||||||
|
|
||||||
def launch(service, workers=None):
|
def launch(service, workers=1):
|
||||||
if workers:
|
if workers is None or workers == 1:
|
||||||
launcher = ProcessLauncher()
|
|
||||||
launcher.launch_service(service, workers=workers)
|
|
||||||
else:
|
|
||||||
launcher = ServiceLauncher()
|
launcher = ServiceLauncher()
|
||||||
launcher.launch_service(service)
|
launcher.launch_service(service)
|
||||||
|
else:
|
||||||
|
launcher = ProcessLauncher()
|
||||||
|
launcher.launch_service(service, workers=workers)
|
||||||
|
|
||||||
return launcher
|
return launcher
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -19,22 +17,22 @@ import ssl
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from muranoapi.openstack.common.gettextutils import _ # noqa
|
from muranoapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
ssl_opts = [
|
ssl_opts = [
|
||||||
cfg.StrOpt('ca_file',
|
cfg.StrOpt('ca_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="CA certificate file to use to verify "
|
help="CA certificate file to use to verify "
|
||||||
"connecting clients"),
|
"connecting clients."),
|
||||||
cfg.StrOpt('cert_file',
|
cfg.StrOpt('cert_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Certificate file to use when starting "
|
help="Certificate file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
cfg.StrOpt('key_file',
|
cfg.StrOpt('key_file',
|
||||||
default=None,
|
default=None,
|
||||||
help="Private key file to use when starting "
|
help="Private key file to use when starting "
|
||||||
"the server securely"),
|
"the server securely."),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
104
muranoapi/openstack/common/systemd.py
Normal file
104
muranoapi/openstack/common/systemd.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Copyright 2012-2014 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper module for systemd service readiness notification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _abstractify(socket_name):
|
||||||
|
if socket_name.startswith('@'):
|
||||||
|
# abstract namespace socket
|
||||||
|
socket_name = '\0%s' % socket_name[1:]
|
||||||
|
return socket_name
|
||||||
|
|
||||||
|
|
||||||
|
def _sd_notify(unset_env, msg):
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
try:
|
||||||
|
sock.connect(_abstractify(notify_socket))
|
||||||
|
sock.sendall(msg)
|
||||||
|
if unset_env:
|
||||||
|
del os.environ['NOTIFY_SOCKET']
|
||||||
|
except EnvironmentError:
|
||||||
|
LOG.debug("Systemd notification failed", exc_info=True)
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
|
||||||
|
def notify():
|
||||||
|
"""Send notification to Systemd that service is ready.
|
||||||
|
For details see
|
||||||
|
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||||
|
"""
|
||||||
|
_sd_notify(False, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def notify_once():
|
||||||
|
"""Send notification once to Systemd that service is ready.
|
||||||
|
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||||
|
socket listening for notifications from services.
|
||||||
|
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||||
|
notification is sent only once.
|
||||||
|
"""
|
||||||
|
_sd_notify(True, 'READY=1')
|
||||||
|
|
||||||
|
|
||||||
|
def onready(notify_socket, timeout):
|
||||||
|
"""Wait for systemd style notification on the socket.
|
||||||
|
|
||||||
|
:param notify_socket: local socket address
|
||||||
|
:type notify_socket: string
|
||||||
|
:param timeout: socket timeout
|
||||||
|
:type timeout: float
|
||||||
|
:returns: 0 service ready
|
||||||
|
1 service not ready
|
||||||
|
2 timeout occured
|
||||||
|
"""
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
sock.bind(_abstractify(notify_socket))
|
||||||
|
try:
|
||||||
|
msg = sock.recv(512)
|
||||||
|
except socket.timeout:
|
||||||
|
return 2
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
if 'READY=1' in msg:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# simple CLI for testing
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
notify()
|
||||||
|
elif len(sys.argv) >= 2:
|
||||||
|
timeout = float(sys.argv[1])
|
||||||
|
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||||
|
if notify_socket:
|
||||||
|
retval = onready(notify_socket, timeout)
|
||||||
|
sys.exit(retval)
|
99
muranoapi/openstack/common/test.py
Normal file
99
muranoapi/openstack/common/test.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
##
|
||||||
|
## DO NOT MODIFY THIS FILE
|
||||||
|
##
|
||||||
|
## This file is being graduated to the muranoapitest library. Please make all
|
||||||
|
## changes there, and only backport critical fixes here. - dhellmann
|
||||||
|
##
|
||||||
|
##############################################################################
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
"""Common utilities used in testing"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
_TRUE_VALUES = ('True', 'true', '1', 'yes')
|
||||||
|
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTestCase(testtools.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(BaseTestCase, self).setUp()
|
||||||
|
self._set_timeout()
|
||||||
|
self._fake_output()
|
||||||
|
self._fake_logs()
|
||||||
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
self.tempdirs = []
|
||||||
|
|
||||||
|
def _set_timeout(self):
|
||||||
|
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||||
|
try:
|
||||||
|
test_timeout = int(test_timeout)
|
||||||
|
except ValueError:
|
||||||
|
# If timeout value is invalid do not set a timeout.
|
||||||
|
test_timeout = 0
|
||||||
|
if test_timeout > 0:
|
||||||
|
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||||
|
|
||||||
|
def _fake_output(self):
|
||||||
|
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
|
||||||
|
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||||
|
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
|
||||||
|
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||||
|
|
||||||
|
def _fake_logs(self):
|
||||||
|
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
|
||||||
|
level = logging.DEBUG
|
||||||
|
else:
|
||||||
|
level = logging.INFO
|
||||||
|
capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES
|
||||||
|
if capture_logs:
|
||||||
|
self.useFixture(
|
||||||
|
fixtures.FakeLogger(
|
||||||
|
format=_LOG_FORMAT,
|
||||||
|
level=level,
|
||||||
|
nuke_handlers=capture_logs,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(format=_LOG_FORMAT, level=level)
|
||||||
|
|
||||||
|
def create_tempfiles(self, files, ext='.conf'):
|
||||||
|
tempfiles = []
|
||||||
|
for (basename, contents) in files:
|
||||||
|
if not os.path.isabs(basename):
|
||||||
|
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
|
||||||
|
else:
|
||||||
|
path = basename + ext
|
||||||
|
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
|
||||||
|
tempfiles.append(path)
|
||||||
|
try:
|
||||||
|
os.write(fd, contents)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return tempfiles
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -13,10 +11,10 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import threading
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
|
||||||
|
|
||||||
from muranoapi.openstack.common import log as logging
|
from muranoapi.openstack.common import log as logging
|
||||||
from muranoapi.openstack.common import loopingcall
|
from muranoapi.openstack.common import loopingcall
|
||||||
@ -48,9 +46,12 @@ class Thread(object):
|
|||||||
def wait(self):
|
def wait(self):
|
||||||
return self.thread.wait()
|
return self.thread.wait()
|
||||||
|
|
||||||
|
def link(self, func, *args, **kwargs):
|
||||||
|
self.thread.link(func, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ThreadGroup(object):
|
class ThreadGroup(object):
|
||||||
"""The point of the ThreadGroup classis to:
|
"""The point of the ThreadGroup class is to:
|
||||||
|
|
||||||
* keep track of timers and greenthreads (making it easier to stop them
|
* keep track of timers and greenthreads (making it easier to stop them
|
||||||
when need be).
|
when need be).
|
||||||
@ -79,13 +80,17 @@ class ThreadGroup(object):
|
|||||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||||
th = Thread(gt, self)
|
th = Thread(gt, self)
|
||||||
self.threads.append(th)
|
self.threads.append(th)
|
||||||
|
return th
|
||||||
|
|
||||||
def thread_done(self, thread):
|
def thread_done(self, thread):
|
||||||
self.threads.remove(thread)
|
self.threads.remove(thread)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
current = greenthread.getcurrent()
|
current = threading.current_thread()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
# don't kill the current thread.
|
# don't kill the current thread.
|
||||||
continue
|
continue
|
||||||
@ -109,8 +114,11 @@ class ThreadGroup(object):
|
|||||||
pass
|
pass
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
current = greenthread.getcurrent()
|
current = threading.current_thread()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -21,6 +19,7 @@ Time related utilities and helper functions.
|
|||||||
|
|
||||||
import calendar
|
import calendar
|
||||||
import datetime
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
import iso8601
|
import iso8601
|
||||||
import six
|
import six
|
||||||
@ -49,9 +48,9 @@ def parse_isotime(timestr):
|
|||||||
try:
|
try:
|
||||||
return iso8601.parse_date(timestr)
|
return iso8601.parse_date(timestr)
|
||||||
except iso8601.ParseError as e:
|
except iso8601.ParseError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(six.text_type(e))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(six.text_type(e))
|
||||||
|
|
||||||
|
|
||||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
@ -78,6 +77,9 @@ def is_older_than(before, seconds):
|
|||||||
"""Return True if before is older than seconds."""
|
"""Return True if before is older than seconds."""
|
||||||
if isinstance(before, six.string_types):
|
if isinstance(before, six.string_types):
|
||||||
before = parse_strtime(before).replace(tzinfo=None)
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
@ -85,11 +87,19 @@ def is_newer_than(after, seconds):
|
|||||||
"""Return True if after is newer than seconds."""
|
"""Return True if after is newer than seconds."""
|
||||||
if isinstance(after, six.string_types):
|
if isinstance(after, six.string_types):
|
||||||
after = parse_strtime(after).replace(tzinfo=None)
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
def utcnow_ts():
|
def utcnow_ts():
|
||||||
"""Timestamp version of our utcnow function."""
|
"""Timestamp version of our utcnow function."""
|
||||||
|
if utcnow.override_time is None:
|
||||||
|
# NOTE(kgriffs): This is several times faster
|
||||||
|
# than going through calendar.timegm(...)
|
||||||
|
return int(time.time())
|
||||||
|
|
||||||
return calendar.timegm(utcnow().timetuple())
|
return calendar.timegm(utcnow().timetuple())
|
||||||
|
|
||||||
|
|
||||||
@ -104,19 +114,22 @@ def utcnow():
|
|||||||
|
|
||||||
|
|
||||||
def iso8601_from_timestamp(timestamp):
|
def iso8601_from_timestamp(timestamp):
|
||||||
"""Returns a iso8601 formated date from timestamp."""
|
"""Returns a iso8601 formatted date from timestamp."""
|
||||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||||
|
|
||||||
|
|
||||||
utcnow.override_time = None
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
def set_time_override(override_time=None):
|
||||||
"""Overrides utils.utcnow.
|
"""Overrides utils.utcnow.
|
||||||
|
|
||||||
Make it return a constant time or a list thereof, one at a time.
|
Make it return a constant time or a list thereof, one at a time.
|
||||||
|
|
||||||
|
:param override_time: datetime instance or list thereof. If not
|
||||||
|
given, defaults to the current UTC time.
|
||||||
"""
|
"""
|
||||||
utcnow.override_time = override_time
|
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||||
|
|
||||||
|
|
||||||
def advance_time_delta(timedelta):
|
def advance_time_delta(timedelta):
|
||||||
@ -169,6 +182,15 @@ def delta_seconds(before, after):
|
|||||||
datetime objects (as a float, to microsecond resolution).
|
datetime objects (as a float, to microsecond resolution).
|
||||||
"""
|
"""
|
||||||
delta = after - before
|
delta = after - before
|
||||||
|
return total_seconds(delta)
|
||||||
|
|
||||||
|
|
||||||
|
def total_seconds(delta):
|
||||||
|
"""Return the total seconds of datetime.timedelta object.
|
||||||
|
|
||||||
|
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||||
|
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return delta.total_seconds()
|
return delta.total_seconds()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -179,8 +201,8 @@ def delta_seconds(before, after):
|
|||||||
def is_soon(dt, window):
|
def is_soon(dt, window):
|
||||||
"""Determines if time is going to happen in the next window seconds.
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
|
|
||||||
:params dt: the time
|
:param dt: the time
|
||||||
:params window: minimum seconds to remain to consider the time not soon
|
:param window: minimum seconds to remain to consider the time not soon
|
||||||
|
|
||||||
:return: True if expiration is within the given duration
|
:return: True if expiration is within the given duration
|
||||||
"""
|
"""
|
||||||
|
148
muranoapi/openstack/common/versionutils.py
Normal file
148
muranoapi/openstack/common/versionutils.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for comparing version strings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
|
from muranoapi.openstack.common.gettextutils import _
|
||||||
|
from muranoapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class deprecated(object):
|
||||||
|
"""A decorator to mark callables as deprecated.
|
||||||
|
|
||||||
|
This decorator logs a deprecation message when the callable it decorates is
|
||||||
|
used. The message will include the release where the callable was
|
||||||
|
deprecated, the release where it may be removed and possibly an optional
|
||||||
|
replacement.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. Specifying the required deprecated release
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE)
|
||||||
|
... def a(): pass
|
||||||
|
|
||||||
|
2. Specifying a replacement:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
|
||||||
|
... def b(): pass
|
||||||
|
|
||||||
|
3. Specifying the release where the functionality may be removed:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
|
||||||
|
... def c(): pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
FOLSOM = 'F'
|
||||||
|
GRIZZLY = 'G'
|
||||||
|
HAVANA = 'H'
|
||||||
|
ICEHOUSE = 'I'
|
||||||
|
|
||||||
|
_RELEASES = {
|
||||||
|
'F': 'Folsom',
|
||||||
|
'G': 'Grizzly',
|
||||||
|
'H': 'Havana',
|
||||||
|
'I': 'Icehouse',
|
||||||
|
}
|
||||||
|
|
||||||
|
_deprecated_msg_with_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s in favor of '
|
||||||
|
'%(in_favor_of)s and may be removed in %(remove_in)s.')
|
||||||
|
|
||||||
|
_deprecated_msg_no_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s and may be '
|
||||||
|
'removed in %(remove_in)s. It will not be superseded.')
|
||||||
|
|
||||||
|
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
|
||||||
|
"""Initialize decorator
|
||||||
|
|
||||||
|
:param as_of: the release deprecating the callable. Constants
|
||||||
|
are define in this class for convenience.
|
||||||
|
:param in_favor_of: the replacement for the callable (optional)
|
||||||
|
:param remove_in: an integer specifying how many releases to wait
|
||||||
|
before removing (default: 2)
|
||||||
|
:param what: name of the thing being deprecated (default: the
|
||||||
|
callable's name)
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.as_of = as_of
|
||||||
|
self.in_favor_of = in_favor_of
|
||||||
|
self.remove_in = remove_in
|
||||||
|
self.what = what
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
if not self.what:
|
||||||
|
self.what = func.__name__ + '()'
|
||||||
|
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
msg, details = self._build_message()
|
||||||
|
LOG.deprecated(msg, details)
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
def _get_safe_to_remove_release(self, release):
|
||||||
|
# TODO(dstanek): this method will have to be reimplemented once
|
||||||
|
# when we get to the X release because once we get to the Y
|
||||||
|
# release, what is Y+2?
|
||||||
|
new_release = chr(ord(release) + self.remove_in)
|
||||||
|
if new_release in self._RELEASES:
|
||||||
|
return self._RELEASES[new_release]
|
||||||
|
else:
|
||||||
|
return new_release
|
||||||
|
|
||||||
|
def _build_message(self):
|
||||||
|
details = dict(what=self.what,
|
||||||
|
as_of=self._RELEASES[self.as_of],
|
||||||
|
remove_in=self._get_safe_to_remove_release(self.as_of))
|
||||||
|
|
||||||
|
if self.in_favor_of:
|
||||||
|
details['in_favor_of'] = self.in_favor_of
|
||||||
|
msg = self._deprecated_msg_with_alternative
|
||||||
|
else:
|
||||||
|
msg = self._deprecated_msg_no_alternative
|
||||||
|
return msg, details
|
||||||
|
|
||||||
|
|
||||||
|
def is_compatible(requested_version, current_version, same_major=True):
|
||||||
|
"""Determine whether `requested_version` is satisfied by
|
||||||
|
`current_version`; in other words, `current_version` is >=
|
||||||
|
`requested_version`.
|
||||||
|
|
||||||
|
:param requested_version: version to check for compatibility
|
||||||
|
:param current_version: version to check against
|
||||||
|
:param same_major: if True, the major version must be identical between
|
||||||
|
`requested_version` and `current_version`. This is used when a
|
||||||
|
major-version difference indicates incompatibility between the two
|
||||||
|
versions. Since this is the common-case in practice, the default is
|
||||||
|
True.
|
||||||
|
:returns: True if compatible, False if not
|
||||||
|
"""
|
||||||
|
requested_parts = pkg_resources.parse_version(requested_version)
|
||||||
|
current_parts = pkg_resources.parse_version(current_version)
|
||||||
|
|
||||||
|
if same_major and (requested_parts[0] != current_parts[0]):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return current_parts >= requested_parts
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
@ -12,6 +12,7 @@ boto>=2.12.0,!=2.13.0
|
|||||||
sqlalchemy-migrate>=0.8.2,!=0.8.4
|
sqlalchemy-migrate>=0.8.2,!=0.8.4
|
||||||
httplib2>=0.7.5
|
httplib2>=0.7.5
|
||||||
kombu>=2.4.8
|
kombu>=2.4.8
|
||||||
|
lockfile>=0.8
|
||||||
pycrypto>=2.6
|
pycrypto>=2.6
|
||||||
iso8601>=0.1.8
|
iso8601>=0.1.8
|
||||||
six>=1.5.2
|
six>=1.5.2
|
||||||
|
@ -10,7 +10,7 @@ openstack.nose_plugin>=0.7
|
|||||||
nosehtmloutput>=0.0.3
|
nosehtmloutput>=0.0.3
|
||||||
sphinx>=1.1.2,<1.2
|
sphinx>=1.1.2,<1.2
|
||||||
requests>=1.1
|
requests>=1.1
|
||||||
testtools>=0.9.32
|
testtools>=0.9.34
|
||||||
mock>=1.0
|
mock>=1.0
|
||||||
|
|
||||||
# Optional packages that should be installed when testing
|
# Optional packages that should be installed when testing
|
||||||
|
25
tools/config/check_uptodate.sh
Executable file
25
tools/config/check_uptodate.sh
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
PROJECT_NAME=${PROJECT_NAME:-murano}
|
||||||
|
CFGFILE_NAME=murano-api.conf.sample
|
||||||
|
|
||||||
|
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
|
||||||
|
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
|
||||||
|
elif [ -e etc/${CFGFILE_NAME} ]; then
|
||||||
|
CFGFILE=etc/${CFGFILE_NAME}
|
||||||
|
else
|
||||||
|
echo "${0##*/}: can not find config file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
|
||||||
|
trap "rm -rf $TEMPDIR" EXIT
|
||||||
|
|
||||||
|
tools/config/generate_sample.sh -b ./ -p muranoapi -o ${TEMPDIR}
|
||||||
|
|
||||||
|
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
|
||||||
|
then
|
||||||
|
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
|
||||||
|
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
|
||||||
|
exit 1
|
||||||
|
fi
|
@ -4,8 +4,8 @@ print_hint() {
|
|||||||
echo "Try \`${0##*/} --help' for more information." >&2
|
echo "Try \`${0##*/} --help' for more information." >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
|
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
|
||||||
--long help,base-dir:,package-name:,output-dir: -- "$@")
|
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
|
||||||
|
|
||||||
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
|
||||||
|
|
||||||
@ -18,9 +18,11 @@ while true; do
|
|||||||
echo ""
|
echo ""
|
||||||
echo "options:"
|
echo "options:"
|
||||||
echo "-h, --help show brief help"
|
echo "-h, --help show brief help"
|
||||||
echo "-b, --base-dir=DIR Project base directory (required)"
|
echo "-b, --base-dir=DIR project base directory"
|
||||||
echo "-p, --package-name=NAME Project package name"
|
echo "-p, --package-name=NAME project package name"
|
||||||
echo "-o, --output-dir=DIR File output directory"
|
echo "-o, --output-dir=DIR file output directory"
|
||||||
|
echo "-m, --module=MOD extra python module to interrogate for options"
|
||||||
|
echo "-l, --library=LIB extra library that registers options for discovery"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
-b|--base-dir)
|
-b|--base-dir)
|
||||||
@ -38,32 +40,80 @@ while true; do
|
|||||||
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
-m|--module)
|
||||||
|
shift
|
||||||
|
MODULES="$MODULES -m $1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-l|--library)
|
||||||
|
shift
|
||||||
|
LIBRARIES="$LIBRARIES -l $1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--)
|
--)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -z $BASEDIR ] || ! [ -d $BASEDIR ]
|
BASEDIR=${BASEDIR:-`pwd`}
|
||||||
|
if ! [ -d $BASEDIR ]
|
||||||
then
|
then
|
||||||
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
|
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
|
||||||
|
elif [[ $BASEDIR != /* ]]
|
||||||
|
then
|
||||||
|
BASEDIR=$(cd "$BASEDIR" && pwd)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
|
||||||
|
TARGETDIR=$BASEDIR/$PACKAGENAME
|
||||||
|
if ! [ -d $TARGETDIR ]
|
||||||
|
then
|
||||||
|
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
|
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
|
||||||
if ! [ -d $OUTPUTDIR ]
|
# NOTE(bnemec): Some projects put their sample config in etc/,
|
||||||
|
# some in etc/$PACKAGENAME/
|
||||||
|
if [ -d $OUTPUTDIR/$PACKAGENAME ]
|
||||||
|
then
|
||||||
|
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
|
||||||
|
elif ! [ -d $OUTPUTDIR ]
|
||||||
then
|
then
|
||||||
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
|
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
||||||
FILES=$(find $BASEDIR/$PACKAGENAME -type f -name "*.py" ! -path "*/tests/*" \
|
find $TARGETDIR -type f -name "*.pyc" -delete
|
||||||
|
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
||||||
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
||||||
|
|
||||||
|
RC_FILE="`dirname $0`/oslo.config.generator.rc"
|
||||||
|
if test -r "$RC_FILE"
|
||||||
|
then
|
||||||
|
source "$RC_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
for mod in ${MURANOAPI_CONFIG_GENERATOR_EXTRA_MODULES}; do
|
||||||
|
MODULES="$MODULES -m $mod"
|
||||||
|
done
|
||||||
|
|
||||||
|
for lib in ${MURANOAPI_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
|
||||||
|
LIBRARIES="$LIBRARIES -l $lib"
|
||||||
|
done
|
||||||
|
|
||||||
export EVENTLET_NO_GREENDNS=yes
|
export EVENTLET_NO_GREENDNS=yes
|
||||||
|
|
||||||
MODULEPATH=muranoapi.openstack.common.config.generator
|
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
||||||
|
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
|
||||||
|
DEFAULT_MODULEPATH=muranoapi.openstack.common.config.generator
|
||||||
|
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
|
||||||
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
||||||
python -m $MODULEPATH $FILES > $OUTPUTFILE
|
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
|
||||||
|
|
||||||
|
# Hook to allow projects to append custom config file snippets
|
||||||
|
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
|
||||||
|
for CONCAT_FILE in $CONCAT_FILES; do
|
||||||
|
cat $CONCAT_FILE >> $OUTPUTFILE
|
||||||
|
done
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
#
|
#
|
||||||
@ -114,15 +112,12 @@ class InstallVenv(object):
|
|||||||
print('Installing dependencies with pip (this can take a while)...')
|
print('Installing dependencies with pip (this can take a while)...')
|
||||||
|
|
||||||
# First things first, make sure our venv has the latest pip and
|
# First things first, make sure our venv has the latest pip and
|
||||||
# setuptools.
|
# setuptools and pbr
|
||||||
self.pip_install('pip>=1.3')
|
self.pip_install('pip>=1.4')
|
||||||
self.pip_install('setuptools')
|
self.pip_install('setuptools')
|
||||||
|
self.pip_install('pbr')
|
||||||
|
|
||||||
self.pip_install('-r', self.requirements)
|
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
|
||||||
self.pip_install('-r', self.test_requirements)
|
|
||||||
|
|
||||||
def post_process(self):
|
|
||||||
self.get_distro().post_process()
|
|
||||||
|
|
||||||
def parse_args(self, argv):
|
def parse_args(self, argv):
|
||||||
"""Parses command-line arguments."""
|
"""Parses command-line arguments."""
|
||||||
@ -156,14 +151,6 @@ class Distro(InstallVenv):
|
|||||||
' requires virtualenv, please install it using your'
|
' requires virtualenv, please install it using your'
|
||||||
' favorite package management tool' % self.project)
|
' favorite package management tool' % self.project)
|
||||||
|
|
||||||
def post_process(self):
|
|
||||||
"""Any distribution-specific post-processing gets done here.
|
|
||||||
|
|
||||||
In particular, this is useful for applying patches to code inside
|
|
||||||
the venv.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Fedora(Distro):
|
class Fedora(Distro):
|
||||||
"""This covers all Fedora-based distributions.
|
"""This covers all Fedora-based distributions.
|
||||||
@ -175,10 +162,6 @@ class Fedora(Distro):
|
|||||||
return self.run_command_with_code(['rpm', '-q', pkg],
|
return self.run_command_with_code(['rpm', '-q', pkg],
|
||||||
check_exit_code=False)[1] == 0
|
check_exit_code=False)[1] == 0
|
||||||
|
|
||||||
def apply_patch(self, originalfile, patchfile):
|
|
||||||
self.run_command(['patch', '-N', originalfile, patchfile],
|
|
||||||
check_exit_code=False)
|
|
||||||
|
|
||||||
def install_virtualenv(self):
|
def install_virtualenv(self):
|
||||||
if self.check_cmd('virtualenv'):
|
if self.check_cmd('virtualenv'):
|
||||||
return
|
return
|
||||||
@ -187,26 +170,3 @@ class Fedora(Distro):
|
|||||||
self.die("Please install 'python-virtualenv'.")
|
self.die("Please install 'python-virtualenv'.")
|
||||||
|
|
||||||
super(Fedora, self).install_virtualenv()
|
super(Fedora, self).install_virtualenv()
|
||||||
|
|
||||||
def post_process(self):
|
|
||||||
"""Workaround for a bug in eventlet.
|
|
||||||
|
|
||||||
This currently affects RHEL6.1, but the fix can safely be
|
|
||||||
applied to all RHEL and Fedora distributions.
|
|
||||||
|
|
||||||
This can be removed when the fix is applied upstream.
|
|
||||||
|
|
||||||
Nova: https://bugs.launchpad.net/nova/+bug/884915
|
|
||||||
Upstream: https://bitbucket.org/eventlet/eventlet/issue/89
|
|
||||||
RHEL: https://bugzilla.redhat.com/958868
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Install "patch" program if it's not there
|
|
||||||
if not self.check_pkg('patch'):
|
|
||||||
self.die("Please install 'patch'.")
|
|
||||||
|
|
||||||
# Apply the eventlet patch
|
|
||||||
self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
|
|
||||||
'site-packages',
|
|
||||||
'eventlet/green/subprocess.py'),
|
|
||||||
'contrib/redhat-eventlet.patch')
|
|
||||||
|
Loading…
Reference in New Issue
Block a user