Revert "import openstack.common from oslo-incubator"
This reverts commit f8d710517b
.
This commit is contained in:
parent
f8d710517b
commit
b6c2efaed6
@ -1,24 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This ensures the openstack namespace is defined
|
||||
try:
|
||||
import pkg_resources
|
||||
pkg_resources.declare_namespace(__name__)
|
||||
except ImportError:
|
||||
import pkgutil
|
||||
__path__ = pkgutil.extend_path(__path__, __name__)
|
@ -1,19 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# TODO(jaypipes) Code in this module is intended to be ported to the eventual
|
||||
# openstack-common library
|
@ -1,44 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Authentication related utilities and helper functions.
|
||||
"""
|
||||
|
||||
|
||||
def auth_str_equal(provided, known):
|
||||
"""Constant-time string comparison.
|
||||
|
||||
:params provided: the first string
|
||||
:params known: the second string
|
||||
|
||||
:return: True if the strings are equal.
|
||||
|
||||
This function takes two strings and compares them. It is intended to be
|
||||
used when doing a comparison for authentication purposes to help guard
|
||||
against timing attacks. When using the function for this purpose, always
|
||||
provide the user-provided password as the first argument. The time this
|
||||
function will take is always a factor of the length of this string.
|
||||
"""
|
||||
result = 0
|
||||
p_len = len(provided)
|
||||
k_len = len(known)
|
||||
for i in range(p_len):
|
||||
a = ord(provided[i]) if i < p_len else 0
|
||||
b = ord(known[i]) if i < k_len else 0
|
||||
result |= a ^ b
|
||||
return (p_len == k_len) & (result == 0)
|
@ -1,242 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
r"""
|
||||
When using the global cfg.CONF object, it is quite common for a module
|
||||
to require the existence of configuration options registered by other
|
||||
modules.
|
||||
|
||||
For example, if module 'foo' registers the 'blaa' option and the module
|
||||
'bar' uses the 'blaa' option then 'bar' might do:
|
||||
|
||||
import foo
|
||||
|
||||
print(CONF.blaa)
|
||||
|
||||
However, it's completely non-obvious why foo is being imported (is it
|
||||
unused, can we remove the import) and where the 'blaa' option comes from.
|
||||
|
||||
The CONF.import_opt() method allows such a dependency to be explicitly
|
||||
declared:
|
||||
|
||||
CONF.import_opt('blaa', 'foo')
|
||||
print(CONF.blaa)
|
||||
|
||||
However, import_opt() has a weakness - if 'bar' imports 'foo' using the
|
||||
import builtin and doesn't use import_opt() to import 'blaa', then 'blaa'
|
||||
can still be used without problems. Similarily, where multiple options
|
||||
are registered a module imported via importopt(), a lazy programmer can
|
||||
get away with only declaring a dependency on a single option.
|
||||
|
||||
The ConfigFilter class provides a way to ensure that options are not
|
||||
available unless they have been registered in the module or imported using
|
||||
import_opt() e.g. with:
|
||||
|
||||
CONF = ConfigFilter(cfg.CONF)
|
||||
CONF.import_opt('blaa', 'foo')
|
||||
print(CONF.blaa)
|
||||
|
||||
no other options other than 'blaa' are available via CONF.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
class ConfigFilter(collections.Mapping):
|
||||
|
||||
"""A helper class which wraps a ConfigOpts object.
|
||||
|
||||
ConfigFilter enforces the explicit declaration of dependencies on external
|
||||
options.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
"""Construct a ConfigFilter object.
|
||||
|
||||
:param conf: a ConfigOpts object
|
||||
"""
|
||||
self._conf = conf
|
||||
self._opts = set()
|
||||
self._groups = dict()
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Look up an option value.
|
||||
|
||||
:param name: the opt name (or 'dest', more precisely)
|
||||
:returns: the option value (after string subsititution) or a GroupAttr
|
||||
:raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
|
||||
"""
|
||||
if name in self._groups:
|
||||
return self._groups[name]
|
||||
if name not in self._opts:
|
||||
raise cfg.NoSuchOptError(name)
|
||||
return getattr(self._conf, name)
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Look up an option value."""
|
||||
return getattr(self, key)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return True if key is the name of a registered opt or group."""
|
||||
return key in self._opts or key in self._groups
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over all registered opt and group names."""
|
||||
return itertools.chain(self._opts, self._groups.keys())
|
||||
|
||||
def __len__(self):
|
||||
"""Return the number of options and option groups."""
|
||||
return len(self._opts) + len(self._groups)
|
||||
|
||||
def register_opt(self, opt, group=None):
|
||||
"""Register an option schema.
|
||||
|
||||
:param opt: an instance of an Opt sub-class
|
||||
:param group: an optional OptGroup object or group name
|
||||
:return: False if the opt was already registered, True otherwise
|
||||
:raises: DuplicateOptError
|
||||
"""
|
||||
if not self._conf.register_opt(opt, group):
|
||||
return False
|
||||
|
||||
self._register_opt(opt.dest, group)
|
||||
return True
|
||||
|
||||
def register_opts(self, opts, group=None):
|
||||
"""Register multiple option schemas at once."""
|
||||
for opt in opts:
|
||||
self.register_opt(opt, group)
|
||||
|
||||
def register_cli_opt(self, opt, group=None):
|
||||
"""Register a CLI option schema.
|
||||
|
||||
:param opt: an instance of an Opt sub-class
|
||||
:param group: an optional OptGroup object or group name
|
||||
:return: False if the opt was already register, True otherwise
|
||||
:raises: DuplicateOptError, ArgsAlreadyParsedError
|
||||
"""
|
||||
if not self._conf.register_cli_opt(opt, group):
|
||||
return False
|
||||
|
||||
self._register_opt(opt.dest, group)
|
||||
return True
|
||||
|
||||
def register_cli_opts(self, opts, group=None):
|
||||
"""Register multiple CLI option schemas at once."""
|
||||
for opt in opts:
|
||||
self.register_cli_opts(opt, group)
|
||||
|
||||
def register_group(self, group):
|
||||
"""Register an option group.
|
||||
|
||||
:param group: an OptGroup object
|
||||
"""
|
||||
self._conf.register_group(group)
|
||||
self._get_group(group.name)
|
||||
|
||||
def import_opt(self, opt_name, module_str, group=None):
|
||||
"""Import an option definition from a module.
|
||||
|
||||
:param name: the name/dest of the opt
|
||||
:param module_str: the name of a module to import
|
||||
:param group: an option OptGroup object or group name
|
||||
:raises: NoSuchOptError, NoSuchGroupError
|
||||
"""
|
||||
self._conf.import_opt(opt_name, module_str, group)
|
||||
self._register_opt(opt_name, group)
|
||||
|
||||
def import_group(self, group, module_str):
|
||||
"""Import an option group from a module.
|
||||
|
||||
Note that this allows access to all options registered with
|
||||
the group whether or not those options were registered by
|
||||
the given module.
|
||||
|
||||
:param group: an option OptGroup object or group name
|
||||
:param module_str: the name of a module to import
|
||||
:raises: ImportError, NoSuchGroupError
|
||||
"""
|
||||
self._conf.import_group(group, module_str)
|
||||
group = self._get_group(group)
|
||||
group._all_opts = True
|
||||
|
||||
def _register_opt(self, opt_name, group):
|
||||
if group is None:
|
||||
self._opts.add(opt_name)
|
||||
return True
|
||||
else:
|
||||
group = self._get_group(group)
|
||||
return group._register_opt(opt_name)
|
||||
|
||||
def _get_group(self, group_or_name):
|
||||
if isinstance(group_or_name, cfg.OptGroup):
|
||||
group_name = group_or_name.name
|
||||
else:
|
||||
group_name = group_or_name
|
||||
|
||||
if group_name in self._groups:
|
||||
return self._groups[group_name]
|
||||
else:
|
||||
group = self.GroupAttr(self._conf, group_name)
|
||||
self._groups[group_name] = group
|
||||
return group
|
||||
|
||||
class GroupAttr(collections.Mapping):
|
||||
|
||||
"""Helper class to wrap a group object.
|
||||
|
||||
Represents the option values of a group as a mapping and attributes.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, group):
|
||||
"""Construct a GroupAttr object.
|
||||
|
||||
:param conf: a ConfigOpts object
|
||||
:param group: an OptGroup object
|
||||
"""
|
||||
self._conf = conf
|
||||
self._group = group
|
||||
self._opts = set()
|
||||
self._all_opts = False
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Look up an option value."""
|
||||
if not self._all_opts and name not in self._opts:
|
||||
raise cfg.NoSuchOptError(name)
|
||||
return getattr(self._conf[self._group], name)
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Look up an option value."""
|
||||
return getattr(self, key)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return True if key is the name of a registered opt or group."""
|
||||
return key in self._opts
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over all registered opt and group names."""
|
||||
for key in self._opts:
|
||||
yield key
|
||||
|
||||
def __len__(self):
|
||||
"""Return the number of options and option groups."""
|
||||
return len(self._opts)
|
||||
|
||||
def _register_opt(self, opt_name):
|
||||
self._opts.add(opt_name)
|
@ -1,63 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
class MissingArgs(Exception):
|
||||
|
||||
def __init__(self, missing):
|
||||
self.missing = missing
|
||||
|
||||
def __str__(self):
|
||||
if len(self.missing) == 1:
|
||||
return "An argument is missing"
|
||||
else:
|
||||
return ("%(num)d arguments are missing" %
|
||||
dict(num=len(self.missing)))
|
||||
|
||||
|
||||
def validate_args(fn, *args, **kwargs):
|
||||
"""Check that the supplied args are sufficient for calling a function.
|
||||
|
||||
>>> validate_args(lambda a: None)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: An argument is missing
|
||||
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: 2 arguments are missing
|
||||
|
||||
:param fn: the function to check
|
||||
:param arg: the positional arguments supplied
|
||||
:param kwargs: the keyword arguments supplied
|
||||
"""
|
||||
argspec = inspect.getargspec(fn)
|
||||
|
||||
num_defaults = len(argspec.defaults or [])
|
||||
required_args = argspec.args[:len(argspec.args) - num_defaults]
|
||||
|
||||
def isbound(method):
|
||||
return getattr(method, 'im_self', None) is not None
|
||||
|
||||
if isbound(fn):
|
||||
required_args.pop(0)
|
||||
|
||||
missing = [arg for arg in required_args if arg not in kwargs]
|
||||
missing = missing[len(args):]
|
||||
if missing:
|
||||
raise MissingArgs(missing)
|
@ -1,255 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 SINA Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Zhongyue Luo, SINA Corporation.
|
||||
#
|
||||
"""Extracts OpenStack config option info from module(s)."""
|
||||
|
||||
import imp
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import gettextutils
|
||||
from openstack.common import importutils
|
||||
|
||||
gettextutils.install('oslo')
|
||||
|
||||
STROPT = "StrOpt"
|
||||
BOOLOPT = "BoolOpt"
|
||||
INTOPT = "IntOpt"
|
||||
FLOATOPT = "FloatOpt"
|
||||
LISTOPT = "ListOpt"
|
||||
MULTISTROPT = "MultiStrOpt"
|
||||
|
||||
OPT_TYPES = {
|
||||
STROPT: 'string value',
|
||||
BOOLOPT: 'boolean value',
|
||||
INTOPT: 'integer value',
|
||||
FLOATOPT: 'floating point value',
|
||||
LISTOPT: 'list value',
|
||||
MULTISTROPT: 'multi valued',
|
||||
}
|
||||
|
||||
OPTION_COUNT = 0
|
||||
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||
FLOATOPT, LISTOPT,
|
||||
MULTISTROPT]))
|
||||
|
||||
PY_EXT = ".py"
|
||||
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
"../../../../"))
|
||||
WORDWRAP_WIDTH = 60
|
||||
|
||||
|
||||
def generate(srcfiles):
|
||||
mods_by_pkg = dict()
|
||||
for filepath in srcfiles:
|
||||
pkg_name = filepath.split(os.sep)[1]
|
||||
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||
os.path.basename(filepath).split('.')[0]])
|
||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||
# NOTE(lzyeval): place top level modules before packages
|
||||
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
|
||||
pkg_names.sort()
|
||||
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
|
||||
ext_names.sort()
|
||||
pkg_names.extend(ext_names)
|
||||
|
||||
# opts_by_group is a mapping of group name to an options list
|
||||
# The options list is a list of (module, options) tuples
|
||||
opts_by_group = {'DEFAULT': []}
|
||||
|
||||
for pkg_name in pkg_names:
|
||||
mods = mods_by_pkg.get(pkg_name)
|
||||
mods.sort()
|
||||
for mod_str in mods:
|
||||
if mod_str.endswith('.__init__'):
|
||||
mod_str = mod_str[:mod_str.rfind(".")]
|
||||
|
||||
mod_obj = _import_module(mod_str)
|
||||
if not mod_obj:
|
||||
continue
|
||||
|
||||
for group, opts in _list_opts(mod_obj):
|
||||
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||
|
||||
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||
for group, opts in opts_by_group.items():
|
||||
print_group_opts(group, opts)
|
||||
|
||||
print "# Total option count: %d" % OPTION_COUNT
|
||||
|
||||
|
||||
def _import_module(mod_str):
|
||||
try:
|
||||
if mod_str.startswith('bin.'):
|
||||
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
|
||||
return sys.modules[mod_str[4:]]
|
||||
else:
|
||||
return importutils.import_module(mod_str)
|
||||
except ImportError as ie:
|
||||
sys.stderr.write("%s\n" % str(ie))
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _is_in_group(opt, group):
|
||||
"Check if opt is in group."
|
||||
for key, value in group._opts.items():
|
||||
if value['opt'] == opt:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _guess_groups(opt, mod_obj):
|
||||
# is it in the DEFAULT group?
|
||||
if _is_in_group(opt, cfg.CONF):
|
||||
return 'DEFAULT'
|
||||
|
||||
# what other groups is it in?
|
||||
for key, value in cfg.CONF.items():
|
||||
if isinstance(value, cfg.CONF.GroupAttr):
|
||||
if _is_in_group(opt, value._group):
|
||||
return value._group.name
|
||||
|
||||
raise RuntimeError(
|
||||
"Unable to find group for option %s, "
|
||||
"maybe it's defined twice in the same group?"
|
||||
% opt.name
|
||||
)
|
||||
|
||||
|
||||
def _list_opts(obj):
|
||||
def is_opt(o):
|
||||
return (isinstance(o, cfg.Opt) and
|
||||
not isinstance(o, cfg.SubCommandOpt))
|
||||
|
||||
opts = list()
|
||||
for attr_str in dir(obj):
|
||||
attr_obj = getattr(obj, attr_str)
|
||||
if is_opt(attr_obj):
|
||||
opts.append(attr_obj)
|
||||
elif (isinstance(attr_obj, list) and
|
||||
all(map(lambda x: is_opt(x), attr_obj))):
|
||||
opts.extend(attr_obj)
|
||||
|
||||
ret = {}
|
||||
for opt in opts:
|
||||
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
|
||||
return ret.items()
|
||||
|
||||
|
||||
def print_group_opts(group, opts_by_module):
|
||||
print "[%s]" % group
|
||||
print
|
||||
global OPTION_COUNT
|
||||
for mod, opts in opts_by_module:
|
||||
OPTION_COUNT += len(opts)
|
||||
print '#'
|
||||
print '# Options defined in %s' % mod
|
||||
print '#'
|
||||
print
|
||||
for opt in opts:
|
||||
_print_opt(opt)
|
||||
print
|
||||
|
||||
|
||||
def _get_my_ip():
|
||||
try:
|
||||
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
csock.connect(('8.8.8.8', 80))
|
||||
(addr, port) = csock.getsockname()
|
||||
csock.close()
|
||||
return addr
|
||||
except socket.error:
|
||||
return None
|
||||
|
||||
|
||||
def _sanitize_default(s):
|
||||
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
||||
if s.startswith(BASEDIR):
|
||||
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
|
||||
elif BASEDIR in s:
|
||||
return s.replace(BASEDIR, '')
|
||||
elif s == _get_my_ip():
|
||||
return '10.0.0.1'
|
||||
elif s == socket.gethostname():
|
||||
return 'oslo'
|
||||
elif s.strip() != s:
|
||||
return '"%s"' % s
|
||||
return s
|
||||
|
||||
|
||||
def _print_opt(opt):
|
||||
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
|
||||
if not opt_help:
|
||||
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
|
||||
opt_help = ""
|
||||
opt_type = None
|
||||
try:
|
||||
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
||||
except (ValueError, AttributeError) as err:
|
||||
sys.stderr.write("%s\n" % str(err))
|
||||
sys.exit(1)
|
||||
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
||||
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
|
||||
try:
|
||||
if opt_default is None:
|
||||
print '#%s=<None>' % opt_name
|
||||
elif opt_type == STROPT:
|
||||
assert(isinstance(opt_default, basestring))
|
||||
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
|
||||
elif opt_type == BOOLOPT:
|
||||
assert(isinstance(opt_default, bool))
|
||||
print '#%s=%s' % (opt_name, str(opt_default).lower())
|
||||
elif opt_type == INTOPT:
|
||||
assert(isinstance(opt_default, int) and
|
||||
not isinstance(opt_default, bool))
|
||||
print '#%s=%s' % (opt_name, opt_default)
|
||||
elif opt_type == FLOATOPT:
|
||||
assert(isinstance(opt_default, float))
|
||||
print '#%s=%s' % (opt_name, opt_default)
|
||||
elif opt_type == LISTOPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
print '#%s=%s' % (opt_name, ','.join(opt_default))
|
||||
elif opt_type == MULTISTROPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
if not opt_default:
|
||||
opt_default = ['']
|
||||
for default in opt_default:
|
||||
print '#%s=%s' % (opt_name, default)
|
||||
print
|
||||
except Exception:
|
||||
sys.stderr.write('Error in option "%s"\n' % opt_name)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print "usage: %s [srcfile]...\n" % sys.argv[0]
|
||||
sys.exit(0)
|
||||
generate(sys.argv[1:])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,83 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
|
||||
from openstack.common import uuidutils
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-%s' % uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""Helper class to represent useful information about a request context.
|
||||
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None):
|
||||
self.auth_token = auth_token
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_token,
|
||||
'request_id': self.request_id}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted="no"):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
@ -1,16 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,106 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2013 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Multiple DB API backend support.
|
||||
|
||||
Supported configuration options:
|
||||
|
||||
The following two parameters are in the 'database' group:
|
||||
`backend`: DB backend name or full module path to DB backend module.
|
||||
`use_tpool`: Enable thread pooling of DB API calls.
|
||||
|
||||
A DB backend module should implement a method named 'get_backend' which
|
||||
takes no arguments. The method can return any object that implements DB
|
||||
API methods.
|
||||
|
||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
||||
threading locks. The python logging module happens to use such locks. To
|
||||
work around this issue, be sure to specify thread=False with
|
||||
eventlet.monkey_patch().
|
||||
|
||||
A bug for eventlet has been filed here:
|
||||
|
||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
||||
"""
|
||||
import functools
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import importutils
|
||||
from openstack.common import lockutils
|
||||
|
||||
|
||||
db_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='sqlalchemy',
|
||||
deprecated_name='db_backend',
|
||||
deprecated_group='DEFAULT',
|
||||
help='The backend to use for db'),
|
||||
cfg.BoolOpt('use_tpool',
|
||||
default=False,
|
||||
deprecated_name='dbapi_use_tpool',
|
||||
deprecated_group='DEFAULT',
|
||||
help='Enable the experimental use of thread pooling for '
|
||||
'all DB API calls')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(db_opts, 'database')
|
||||
|
||||
|
||||
class DBAPI(object):
|
||||
def __init__(self, backend_mapping=None):
|
||||
if backend_mapping is None:
|
||||
backend_mapping = {}
|
||||
self.__backend = None
|
||||
self.__backend_mapping = backend_mapping
|
||||
|
||||
@lockutils.synchronized('dbapi_backend', 'oslo-')
|
||||
def __get_backend(self):
|
||||
"""Get the actual backend. May be a module or an instance of
|
||||
a class. Doesn't matter to us. We do this synchronized as it's
|
||||
possible multiple greenthreads started very quickly trying to do
|
||||
DB calls and eventlet can switch threads before self.__backend gets
|
||||
assigned.
|
||||
"""
|
||||
if self.__backend:
|
||||
# Another thread assigned it
|
||||
return self.__backend
|
||||
backend_name = CONF.database.backend
|
||||
self.__use_tpool = CONF.database.use_tpool
|
||||
if self.__use_tpool:
|
||||
from eventlet import tpool
|
||||
self.__tpool = tpool
|
||||
# Import the untranslated name if we don't have a
|
||||
# mapping.
|
||||
backend_path = self.__backend_mapping.get(backend_name,
|
||||
backend_name)
|
||||
backend_mod = importutils.import_module(backend_path)
|
||||
self.__backend = backend_mod.get_backend()
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__backend or self.__get_backend()
|
||||
attr = getattr(backend, key)
|
||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
||||
return attr
|
||||
|
||||
def tpool_wrapper(*args, **kwargs):
|
||||
return self.__tpool.execute(attr, *args, **kwargs)
|
||||
|
||||
functools.update_wrapper(tpool_wrapper, attr)
|
||||
return tpool_wrapper
|
@ -1,45 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""DB related custom exceptions."""
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, inner_exception=None):
|
||||
self.inner_exception = inner_exception
|
||||
super(DBError, self).__init__(str(inner_exception))
|
||||
|
||||
|
||||
class DBDuplicateEntry(DBError):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, columns=[], inner_exception=None):
|
||||
self.columns = columns
|
||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBDeadlock(DBError):
|
||||
def __init__(self, inner_exception=None):
|
||||
super(DBDeadlock, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBInvalidUnicodeParameter(Exception):
|
||||
message = _("Invalid Parameter: "
|
||||
"Unicode is not supported by the current database.")
|
@ -1,16 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,106 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# Copyright 2012 Cloudscaling Group, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
SQLAlchemy models.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer
|
||||
from sqlalchemy import DateTime
|
||||
from sqlalchemy.orm import object_mapper
|
||||
|
||||
from openstack.common.db.sqlalchemy.session import get_session
|
||||
from openstack.common import timeutils
|
||||
|
||||
|
||||
class ModelBase(object):
|
||||
"""Base class for models."""
|
||||
__table_initialized__ = False
|
||||
|
||||
def save(self, session=None):
|
||||
"""Save this object."""
|
||||
if not session:
|
||||
session = get_session()
|
||||
# NOTE(boris-42): This part of code should be look like:
|
||||
# sesssion.add(self)
|
||||
# session.flush()
|
||||
# But there is a bug in sqlalchemy and eventlet that
|
||||
# raises NoneType exception if there is no running
|
||||
# transaction and rollback is called. As long as
|
||||
# sqlalchemy has this bug we have to create transaction
|
||||
# explicity.
|
||||
with session.begin(subtransactions=True):
|
||||
session.add(self)
|
||||
session.flush()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return getattr(self, key, default)
|
||||
|
||||
def __iter__(self):
|
||||
columns = dict(object_mapper(self).columns).keys()
|
||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||
# up, beyond the actual db columns. An example would be the 'name'
|
||||
# property for an Instance.
|
||||
if hasattr(self, '_extra_keys'):
|
||||
columns.extend(self._extra_keys())
|
||||
self._i = iter(columns)
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
n = self._i.next()
|
||||
return n, getattr(self, n)
|
||||
|
||||
def update(self, values):
|
||||
"""Make the model object behave like a dict."""
|
||||
for k, v in values.iteritems():
|
||||
setattr(self, k, v)
|
||||
|
||||
def iteritems(self):
|
||||
"""Make the model object behave like a dict.
|
||||
|
||||
Includes attributes from joins.
|
||||
"""
|
||||
local = dict(self)
|
||||
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
|
||||
if not k[0] == '_'])
|
||||
local.update(joined)
|
||||
return local.iteritems()
|
||||
|
||||
|
||||
class TimestampMixin(object):
|
||||
created_at = Column(DateTime, default=timeutils.utcnow)
|
||||
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
||||
|
||||
|
||||
class SoftDeleteMixin(object):
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Integer, default=0)
|
||||
|
||||
def soft_delete(self, session=None):
|
||||
"""Mark this object as deleted."""
|
||||
self.deleted = self.id
|
||||
self.deleted_at = timeutils.utcnow()
|
||||
self.save(session=session)
|
@ -1,784 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Session Handling for SQLAlchemy backend.
|
||||
|
||||
Initializing:
|
||||
|
||||
* Call set_defaults with the minimal of the following kwargs:
|
||||
sql_connection, sqlite_db
|
||||
|
||||
Example:
|
||||
|
||||
session.set_defaults(
|
||||
sql_connection="sqlite:///var/lib/oslo/sqlite.db",
|
||||
sqlite_db="/var/lib/oslo/sqlite.db")
|
||||
|
||||
Recommended ways to use sessions within this framework:
|
||||
|
||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
||||
model_query() will implicitly use a session when called without one
|
||||
supplied. This is the ideal situation because it will allow queries
|
||||
to be automatically retried if the database connection is interrupted.
|
||||
|
||||
Note: Automatic retry will be enabled in a future patch.
|
||||
|
||||
It is generally fine to issue several queries in a row like this. Even though
|
||||
they may be run in separate transactions and/or separate sessions, each one
|
||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||
functionality should be handled at a logical level. For an example, look at
|
||||
the code around quotas and reservation_rollback().
|
||||
|
||||
Examples:
|
||||
|
||||
def get_foo(context, foo):
|
||||
return model_query(context, models.Foo).\
|
||||
filter_by(foo=foo).\
|
||||
first()
|
||||
|
||||
def update_foo(context, id, newfoo):
|
||||
model_query(context, models.Foo).\
|
||||
filter_by(id=id).\
|
||||
update({'foo': newfoo})
|
||||
|
||||
def create_foo(context, values):
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(values)
|
||||
foo_ref.save()
|
||||
return foo_ref
|
||||
|
||||
|
||||
* Within the scope of a single method, keeping all the reads and writes within
|
||||
the context managed by a single session. In this way, the session's __exit__
|
||||
handler will take care of calling flush() and commit() for you.
|
||||
If using this approach, you should not explicitly call flush() or commit().
|
||||
Any error within the context of the session will cause the session to emit
|
||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
||||
database will implicitly rollback the transaction.
|
||||
|
||||
Note: statements in the session scope will not be automatically retried.
|
||||
|
||||
If you create models within the session, they need to be added, but you
|
||||
do not need to call model.save()
|
||||
|
||||
def create_many_foo(context, foos):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
for foo in foos:
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(foo)
|
||||
session.add(foo_ref)
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
foo_ref = model_query(context, models.Foo, session).\
|
||||
filter_by(id=foo_id).\
|
||||
first()
|
||||
model_query(context, models.Bar, session).\
|
||||
filter_by(id=foo_ref['bar_id']).\
|
||||
update({'bar': newbar})
|
||||
|
||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||
it is always best to use as few queries as possible. The two queries in
|
||||
update_bar can be better expressed using a single query which avoids
|
||||
the need for an explicit transaction. It can be expressed like so:
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
subq = model_query(context, models.Foo.id).\
|
||||
filter_by(id=foo_id).\
|
||||
limit(1).\
|
||||
subquery()
|
||||
model_query(context, models.Bar).\
|
||||
filter_by(id=subq.as_scalar()).\
|
||||
update({'bar': newbar})
|
||||
|
||||
For reference, this emits approximagely the following SQL statement:
|
||||
|
||||
UPDATE bar SET bar = ${newbar}
|
||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||
|
||||
* Passing an active session between methods. Sessions should only be passed
|
||||
to private methods. The private method must use a subtransaction; otherwise
|
||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||
transaction. Public methods should not accept a session parameter and should
|
||||
not be involved in sessions within the caller's scope.
|
||||
|
||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||
due to nesting transactions, and it is not possible to implicitly retry
|
||||
failed database operations when using this approach.
|
||||
|
||||
This also makes code somewhat more difficult to read and debug, because a
|
||||
single database transaction spans more than one method. Error handling
|
||||
becomes less clear in this situation. When this is needed for code clarity,
|
||||
it should be clearly documented.
|
||||
|
||||
def myfunc(foo):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
# do some database things
|
||||
bar = _private_func(foo, session)
|
||||
return bar
|
||||
|
||||
def _private_func(foo, session=None):
|
||||
if not session:
|
||||
session = get_session()
|
||||
with session.begin(subtransaction=True):
|
||||
# do some other database things
|
||||
return bar
|
||||
|
||||
|
||||
There are some things which it is best to avoid:
|
||||
|
||||
* Don't keep a transaction open any longer than necessary.
|
||||
|
||||
This means that your "with session.begin()" block should be as short
|
||||
as possible, while still containing all the related calls for that
|
||||
transaction.
|
||||
|
||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
||||
|
||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||
"gap" where no rows exist, and prevents any other writes to that space.
|
||||
This can effectively prevent any INSERT into a table by locking the gap
|
||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||
|
||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||
number of rows matching a query, and if only one row is returned,
|
||||
then issue the SELECT FOR UPDATE.
|
||||
|
||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
||||
However, this can not be done until the "deleted" columns are removed and
|
||||
proper UNIQUE constraints are added to the tables.
|
||||
|
||||
|
||||
Enabling soft deletes:
|
||||
|
||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||
to your model class. For example:
|
||||
|
||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||
pass
|
||||
|
||||
|
||||
Efficient use of soft deletes:
|
||||
|
||||
* There are two possible ways to mark a record as deleted:
|
||||
model.soft_delete() and query.soft_delete().
|
||||
|
||||
model.soft_delete() method works with single already fetched entry.
|
||||
query.soft_delete() makes only one db request for all entries that correspond
|
||||
to query.
|
||||
|
||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
||||
|
||||
def soft_delete_bar():
|
||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||
if session is None:
|
||||
session = get_session()
|
||||
with session.begin(subtransactions=True):
|
||||
count = model_query(BarModel).\
|
||||
find(some_condition).\
|
||||
soft_delete(synchronize_session=True)
|
||||
# Here synchronize_session is required, because we
|
||||
# don't know what is going on in outer session.
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
* There is only one situation where model.soft_delete() is appropriate: when
|
||||
you fetch a single record, work with it, and mark it as deleted in the same
|
||||
transaction.
|
||||
|
||||
def soft_delete_bar_model():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||
# Work with bar_ref
|
||||
bar_ref.soft_delete(session=session)
|
||||
|
||||
However, if you need to work with all entries that correspond to query and
|
||||
then soft delete them you should use query.soft_delete() method:
|
||||
|
||||
def soft_delete_multi_models():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
query = model_query(BarModel, session=session).\
|
||||
find(some_condition)
|
||||
model_refs = query.all()
|
||||
# Work with model_refs
|
||||
query.soft_delete(synchronize_session=False)
|
||||
# synchronize_session=False should be set if there is no outer
|
||||
# session and these entries are not used after this.
|
||||
|
||||
When working with many rows, it is very important to use query.soft_delete,
|
||||
which issues a single query. Using model.soft_delete(), as in the following
|
||||
example, is very inefficient.
|
||||
|
||||
for bar_ref in bar_refs:
|
||||
bar_ref.soft_delete(session=session)
|
||||
# This will produce count(bar_refs) db requests.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
|
||||
from eventlet import greenthread
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
from sqlalchemy import exc as sqla_exc
|
||||
import sqlalchemy.interfaces
|
||||
from sqlalchemy.interfaces import PoolListener
|
||||
import sqlalchemy.orm
|
||||
from sqlalchemy.pool import NullPool, StaticPool
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
|
||||
from openstack.common.db import exception
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import timeutils
|
||||
|
||||
sqlite_db_opts = [
|
||||
cfg.StrOpt('sqlite_db',
|
||||
default='oslo.sqlite',
|
||||
help='the filename to use with sqlite'),
|
||||
cfg.BoolOpt('sqlite_synchronous',
|
||||
default=True,
|
||||
help='If true, use synchronous mode for sqlite'),
|
||||
]
|
||||
|
||||
database_opts = [
|
||||
cfg.StrOpt('connection',
|
||||
default='sqlite:///' +
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../', '$sqlite_db')),
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database',
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_connection',
|
||||
group='DATABASE')],
|
||||
secret=True),
|
||||
cfg.StrOpt('slave_connection',
|
||||
default='',
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'slave database',
|
||||
secret=True),
|
||||
cfg.IntOpt('idle_timeout',
|
||||
default=3600,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||
group='DATABASE')],
|
||||
help='timeout before idle sql connections are reaped'),
|
||||
cfg.IntOpt('min_pool_size',
|
||||
default=1,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||
group='DATABASE')],
|
||||
help='Minimum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_pool_size',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||
group='DATABASE')],
|
||||
help='Maximum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_retries',
|
||||
default=10,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_max_retries',
|
||||
group='DATABASE')],
|
||||
help='maximum db connection retries during startup. '
|
||||
'(setting -1 implies an infinite retry count)'),
|
||||
cfg.IntOpt('retry_interval',
|
||||
default=10,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('reconnect_interval',
|
||||
group='DATABASE')],
|
||||
help='interval between retries of opening a sql connection'),
|
||||
cfg.IntOpt('max_overflow',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||
group='DATABASE')],
|
||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||
cfg.IntOpt('connection_debug',
|
||||
default=0,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||
group='DEFAULT')],
|
||||
help='Verbosity of SQL debugging information. 0=None, '
|
||||
'100=Everything'),
|
||||
cfg.BoolOpt('connection_trace',
|
||||
default=False,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||
group='DEFAULT')],
|
||||
help='Add python stack traces to SQL as comment strings'),
|
||||
cfg.IntOpt('pool_timeout',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||
group='DATABASE')],
|
||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(sqlite_db_opts)
|
||||
CONF.register_opts(database_opts, 'database')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_ENGINE = None
|
||||
_MAKER = None
|
||||
_SLAVE_ENGINE = None
|
||||
_SLAVE_MAKER = None
|
||||
|
||||
|
||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||
max_overflow=None, pool_timeout=None):
|
||||
"""Set defaults for configuration variables."""
|
||||
cfg.set_defaults(database_opts,
|
||||
connection=sql_connection)
|
||||
cfg.set_defaults(sqlite_db_opts,
|
||||
sqlite_db=sqlite_db)
|
||||
# Update the QueuePool defaults
|
||||
if max_pool_size is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
max_pool_size=max_pool_size)
|
||||
if max_overflow is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
max_overflow=max_overflow)
|
||||
if pool_timeout is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
pool_timeout=pool_timeout)
|
||||
|
||||
|
||||
def cleanup():
|
||||
global _ENGINE, _MAKER
|
||||
global _SLAVE_ENGINE, _SLAVE_MAKER
|
||||
|
||||
if _MAKER:
|
||||
_MAKER.close_all()
|
||||
_MAKER = None
|
||||
if _ENGINE:
|
||||
_ENGINE.dispose()
|
||||
_ENGINE = None
|
||||
if _SLAVE_MAKER:
|
||||
_SLAVE_MAKER.close_all()
|
||||
_SLAVE_MAKER = None
|
||||
if _SLAVE_ENGINE:
|
||||
_SLAVE_ENGINE.dispose()
|
||||
_SLAVE_ENGINE = None
|
||||
|
||||
|
||||
class SqliteForeignKeysListener(PoolListener):
|
||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||
|
||||
The foreign key constraints are disabled by default in SQLite,
|
||||
so the foreign key constraints will be enabled here for every
|
||||
database connection
|
||||
"""
|
||||
def connect(self, dbapi_con, con_record):
|
||||
dbapi_con.execute('pragma foreign_keys=ON')
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False,
|
||||
sqlite_fk=False, slave_session=False):
|
||||
"""Return a SQLAlchemy session."""
|
||||
global _MAKER
|
||||
global _SLAVE_MAKER
|
||||
maker = _MAKER
|
||||
|
||||
if slave_session:
|
||||
maker = _SLAVE_MAKER
|
||||
|
||||
if maker is None:
|
||||
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
|
||||
maker = get_maker(engine, autocommit, expire_on_commit)
|
||||
|
||||
if slave_session:
|
||||
_SLAVE_MAKER = maker
|
||||
else:
|
||||
_MAKER = maker
|
||||
|
||||
session = maker()
|
||||
return session
|
||||
|
||||
|
||||
# note(boris-42): In current versions of DB backends unique constraint
|
||||
# violation messages follow the structure:
|
||||
#
|
||||
# sqlite:
|
||||
# 1 column - (IntegrityError) column c1 is not unique
|
||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||
#
|
||||
# postgres:
|
||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "users_c1_key"
|
||||
# N columns - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "name_of_our_constraint"
|
||||
#
|
||||
# mysql:
|
||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||
# 'c1'")
|
||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||
# with -' for key 'name_of_our_constraint'")
|
||||
_DUP_KEY_RE_DB = {
|
||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||
"""Raise exception if two entries are duplicated.
|
||||
|
||||
In this function will be raised DBDuplicateEntry exception if integrity
|
||||
error wrap unique constraint violation.
|
||||
"""
|
||||
|
||||
def get_columns_from_uniq_cons_or_name(columns):
|
||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
||||
# where `t` it is table name and columns `c1`, `c2`
|
||||
# are in UniqueConstraint.
|
||||
uniqbase = "uniq_"
|
||||
if not columns.startswith(uniqbase):
|
||||
if engine_name == "postgresql":
|
||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||
return [columns]
|
||||
return columns[len(uniqbase):].split("0")[1:]
|
||||
|
||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
||||
return
|
||||
|
||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
||||
if not m:
|
||||
return
|
||||
columns = m.group(1)
|
||||
|
||||
if engine_name == "sqlite":
|
||||
columns = columns.strip().split(", ")
|
||||
else:
|
||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||
|
||||
|
||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||
# messages follow the structure:
|
||||
#
|
||||
# mysql:
|
||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||
# 'restarting transaction') <query_str> <query_args>
|
||||
_DEADLOCK_RE_DB = {
|
||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||
"""Raise exception on deadlock condition.
|
||||
|
||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||
condition.
|
||||
"""
|
||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||
if re is None:
|
||||
return
|
||||
m = re.match(operational_error.message)
|
||||
if not m:
|
||||
return
|
||||
raise exception.DBDeadlock(operational_error)
|
||||
|
||||
|
||||
def _wrap_db_error(f):
|
||||
def _wrap(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except UnicodeEncodeError:
|
||||
raise exception.DBInvalidUnicodeParameter()
|
||||
# note(boris-42): We should catch unique constraint violation and
|
||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||
# violation is wrapped by IntegrityError.
|
||||
except sqla_exc.OperationalError as e:
|
||||
_raise_if_deadlock_error(e, get_engine().name)
|
||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||
# so let's not wrap it for now.
|
||||
raise
|
||||
except sqla_exc.IntegrityError as e:
|
||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||
# DBs so we must do this. Also in some tables (for example
|
||||
# instance_types) there are more than one unique constraint. This
|
||||
# means we should get names of columns, which values violate
|
||||
# unique constraint, from error message.
|
||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
||||
raise exception.DBError(e)
|
||||
except Exception as e:
|
||||
LOG.exception(_('DB exception wrapped.'))
|
||||
raise exception.DBError(e)
|
||||
_wrap.func_name = f.func_name
|
||||
return _wrap
|
||||
|
||||
|
||||
def get_engine(sqlite_fk=False, slave_engine=False):
|
||||
"""Return a SQLAlchemy engine."""
|
||||
global _ENGINE
|
||||
global _SLAVE_ENGINE
|
||||
engine = _ENGINE
|
||||
db_uri = CONF.database.connection
|
||||
|
||||
if slave_engine:
|
||||
engine = _SLAVE_ENGINE
|
||||
db_uri = CONF.database.slave_connection
|
||||
|
||||
if engine is None:
|
||||
engine = create_engine(db_uri,
|
||||
sqlite_fk=sqlite_fk)
|
||||
if slave_engine:
|
||||
_SLAVE_ENGINE = engine
|
||||
else:
|
||||
_ENGINE = engine
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||
"""Switch sqlite connections to non-synchronous mode."""
|
||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||
|
||||
|
||||
def _add_regexp_listener(dbapi_con, con_record):
|
||||
"""Add REGEXP function to sqlite connections."""
|
||||
|
||||
def regexp(expr, item):
|
||||
reg = re.compile(expr)
|
||||
return reg.search(six.text_type(item)) is not None
|
||||
dbapi_con.create_function('regexp', 2, regexp)
|
||||
|
||||
|
||||
def _greenthread_yield(dbapi_con, con_record):
|
||||
"""Ensure other greenthreads get a chance to be executed.
|
||||
|
||||
Force a context switch. With common database backends (eg MySQLdb and
|
||||
sqlite), there is no implicit yield caused by network I/O since they are
|
||||
implemented by C libraries that eventlet cannot monkey patch.
|
||||
"""
|
||||
greenthread.sleep(0)
|
||||
|
||||
|
||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
||||
"""Ensures that MySQL connections checked out of the pool are alive.
|
||||
|
||||
Borrowed from:
|
||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||
"""
|
||||
try:
|
||||
dbapi_conn.cursor().execute('select 1')
|
||||
except dbapi_conn.OperationalError as ex:
|
||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
||||
raise sqla_exc.DisconnectionError("Database server went away")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _is_db_connection_error(args):
|
||||
"""Return True if error in connecting to db."""
|
||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||
# to support Postgres and others.
|
||||
conn_err_codes = ('2002', '2003', '2006')
|
||||
for err_code in conn_err_codes:
|
||||
if args.find(err_code) != -1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def create_engine(sql_connection, sqlite_fk=False):
|
||||
"""Return a new SQLAlchemy engine."""
|
||||
# NOTE(geekinutah): At this point we could be connecting to the normal
|
||||
# db handle or the slave db handle. Things like
|
||||
# _wrap_db_error aren't going to work well if their
|
||||
# backends don't match. Let's check.
|
||||
_assert_matching_drivers()
|
||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||
|
||||
engine_args = {
|
||||
"pool_recycle": CONF.database.idle_timeout,
|
||||
"echo": False,
|
||||
'convert_unicode': True,
|
||||
}
|
||||
|
||||
# Map our SQL debug level to SQLAlchemy's options
|
||||
if CONF.database.connection_debug >= 100:
|
||||
engine_args['echo'] = 'debug'
|
||||
elif CONF.database.connection_debug >= 50:
|
||||
engine_args['echo'] = True
|
||||
|
||||
if "sqlite" in connection_dict.drivername:
|
||||
if sqlite_fk:
|
||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||
engine_args["poolclass"] = NullPool
|
||||
|
||||
if CONF.database.connection == "sqlite://":
|
||||
engine_args["poolclass"] = StaticPool
|
||||
engine_args["connect_args"] = {'check_same_thread': False}
|
||||
else:
|
||||
if CONF.database.max_pool_size is not None:
|
||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
||||
if CONF.database.max_overflow is not None:
|
||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||
if CONF.database.pool_timeout is not None:
|
||||
engine_args['pool_timeout'] = CONF.database.pool_timeout
|
||||
|
||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||
|
||||
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
|
||||
|
||||
if 'mysql' in connection_dict.drivername:
|
||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
||||
elif 'sqlite' in connection_dict.drivername:
|
||||
if not CONF.sqlite_synchronous:
|
||||
sqlalchemy.event.listen(engine, 'connect',
|
||||
_synchronous_switch_listener)
|
||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||
|
||||
if (CONF.database.connection_trace and
|
||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
||||
_patch_mysqldb_with_stacktrace_comments()
|
||||
|
||||
try:
|
||||
engine.connect()
|
||||
except sqla_exc.OperationalError as e:
|
||||
if not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
|
||||
remaining = CONF.database.max_retries
|
||||
if remaining == -1:
|
||||
remaining = 'infinite'
|
||||
while True:
|
||||
msg = _('SQL connection failed. %s attempts left.')
|
||||
LOG.warn(msg % remaining)
|
||||
if remaining != 'infinite':
|
||||
remaining -= 1
|
||||
time.sleep(CONF.database.retry_interval)
|
||||
try:
|
||||
engine.connect()
|
||||
break
|
||||
except sqla_exc.OperationalError as e:
|
||||
if (remaining != 'infinite' and remaining == 0) or \
|
||||
not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
return engine
|
||||
|
||||
|
||||
class Query(sqlalchemy.orm.query.Query):
|
||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||
def soft_delete(self, synchronize_session='evaluate'):
|
||||
return self.update({'deleted': literal_column('id'),
|
||||
'updated_at': literal_column('updated_at'),
|
||||
'deleted_at': timeutils.utcnow()},
|
||||
synchronize_session=synchronize_session)
|
||||
|
||||
|
||||
class Session(sqlalchemy.orm.session.Session):
|
||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||
@_wrap_db_error
|
||||
def query(self, *args, **kwargs):
|
||||
return super(Session, self).query(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def flush(self, *args, **kwargs):
|
||||
return super(Session, self).flush(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def execute(self, *args, **kwargs):
|
||||
return super(Session, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
||||
class_=Session,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
query_cls=Query)
|
||||
|
||||
|
||||
def _patch_mysqldb_with_stacktrace_comments():
|
||||
"""Adds current stack trace as a comment in queries.
|
||||
|
||||
Patches MySQLdb.cursors.BaseCursor._do_query.
|
||||
"""
|
||||
import MySQLdb.cursors
|
||||
import traceback
|
||||
|
||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||
|
||||
def _do_query(self, q):
|
||||
stack = ''
|
||||
for file, line, method, function in traceback.extract_stack():
|
||||
# exclude various common things from trace
|
||||
if file.endswith('session.py') and method == '_do_query':
|
||||
continue
|
||||
if file.endswith('api.py') and method == 'wrapper':
|
||||
continue
|
||||
if file.endswith('utils.py') and method == '_inner':
|
||||
continue
|
||||
if file.endswith('exception.py') and method == '_wrap':
|
||||
continue
|
||||
# db/api is just a wrapper around db/sqlalchemy/api
|
||||
if file.endswith('db/api.py'):
|
||||
continue
|
||||
# only trace inside oslo
|
||||
index = file.rfind('oslo')
|
||||
if index == -1:
|
||||
continue
|
||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||
% (file[index:], line, method, function)
|
||||
|
||||
# strip trailing " | " from stack
|
||||
if stack:
|
||||
stack = stack[:-3]
|
||||
qq = "%s /* %s */" % (q, stack)
|
||||
else:
|
||||
qq = q
|
||||
old_mysql_do_query(self, qq)
|
||||
|
||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||
|
||||
|
||||
def _assert_matching_drivers():
|
||||
"""Make sure slave handle and normal handle have the same driver."""
|
||||
# NOTE(geekinutah): There's no use case for writing to one backend and
|
||||
# reading from another. Who knows what the future holds?
|
||||
if CONF.database.slave_connection == '':
|
||||
return
|
||||
|
||||
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
|
||||
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
|
||||
assert normal.drivername == slave.drivername
|
@ -1,132 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010-2011 OpenStack Foundation.
|
||||
# Copyright 2012 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of paginate query."""
|
||||
|
||||
import sqlalchemy
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidSortKey(Exception):
|
||||
message = _("Sort key supplied was not valid.")
|
||||
|
||||
|
||||
# copy from glance/db/sqlalchemy/api.py
|
||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||
sort_dir=None, sort_dirs=None):
|
||||
"""Returns a query with sorting / pagination criteria added.
|
||||
|
||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||
(If sort_keys is not unique, then we risk looping through values.)
|
||||
We use the last row in the previous page as the 'marker' for pagination.
|
||||
So we must return values that follow the passed marker in the order.
|
||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||
the lexicographical ordering:
|
||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||
|
||||
We also have to cope with different sort_directions.
|
||||
|
||||
Typically, the id of the last row is used as the client-facing pagination
|
||||
marker, then the actual marker object must be fetched from the db and
|
||||
passed in to us as marker.
|
||||
|
||||
:param query: the query object to which we should add paging/sorting
|
||||
:param model: the ORM model class
|
||||
:param limit: maximum number of items to return
|
||||
:param sort_keys: array of attributes by which results should be sorted
|
||||
:param marker: the last item of the previous page; we returns the next
|
||||
results after this value.
|
||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||
|
||||
:rtype: sqlalchemy.orm.query.Query
|
||||
:return: The query with sorting/pagination added.
|
||||
"""
|
||||
|
||||
if 'id' not in sort_keys:
|
||||
# TODO(justinsb): If this ever gives a false-positive, check
|
||||
# the actual primary key, rather than assuming its id
|
||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
||||
|
||||
assert(not (sort_dir and sort_dirs))
|
||||
|
||||
# Default the sort direction to ascending
|
||||
if sort_dirs is None and sort_dir is None:
|
||||
sort_dir = 'asc'
|
||||
|
||||
# Ensure a per-column sort direction
|
||||
if sort_dirs is None:
|
||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||
|
||||
assert(len(sort_dirs) == len(sort_keys))
|
||||
|
||||
# Add sorting
|
||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||
sort_dir_func = {
|
||||
'asc': sqlalchemy.asc,
|
||||
'desc': sqlalchemy.desc,
|
||||
}[current_sort_dir]
|
||||
|
||||
try:
|
||||
sort_key_attr = getattr(model, current_sort_key)
|
||||
except AttributeError:
|
||||
raise InvalidSortKey()
|
||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||
|
||||
# Add pagination
|
||||
if marker is not None:
|
||||
marker_values = []
|
||||
for sort_key in sort_keys:
|
||||
v = getattr(marker, sort_key)
|
||||
marker_values.append(v)
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in range(0, len(sort_keys)):
|
||||
crit_attrs = []
|
||||
for j in range(0, i):
|
||||
model_attr = getattr(model, sort_keys[j])
|
||||
crit_attrs.append((model_attr == marker_values[j]))
|
||||
|
||||
model_attr = getattr(model, sort_keys[i])
|
||||
if sort_dirs[i] == 'desc':
|
||||
crit_attrs.append((model_attr < marker_values[i]))
|
||||
elif sort_dirs[i] == 'asc':
|
||||
crit_attrs.append((model_attr > marker_values[i]))
|
||||
else:
|
||||
raise ValueError(_("Unknown sort direction, "
|
||||
"must be 'desc' or 'asc'"))
|
||||
|
||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||
criteria_list.append(criteria)
|
||||
|
||||
f = sqlalchemy.sql.or_(*criteria_list)
|
||||
query = query.filter(f)
|
||||
|
||||
if limit is not None:
|
||||
query = query.limit(limit)
|
||||
|
||||
return query
|
@ -1,21 +0,0 @@
|
||||
# Copyright 2013 Red Hat, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import warnings
|
||||
|
||||
msg = ("Modules in this package are deprecated "
|
||||
"and will be removed in future releases")
|
||||
|
||||
warnings.warn(msg, DeprecationWarning)
|
@ -1,725 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utility methods for working with WSGI servers."""
|
||||
|
||||
import eventlet
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True)
|
||||
|
||||
import datetime
|
||||
import errno
|
||||
import socket
|
||||
import time
|
||||
|
||||
import eventlet.wsgi
|
||||
from oslo.config import cfg
|
||||
import routes
|
||||
import routes.middleware
|
||||
import six
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
from xml.dom import minidom
|
||||
from xml.parsers import expat
|
||||
|
||||
from openstack.common import exception
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import service
|
||||
from openstack.common import sslutils
|
||||
from openstack.common import xmlutils
|
||||
|
||||
socket_opts = [
|
||||
cfg.IntOpt('backlog',
|
||||
default=4096,
|
||||
help="Number of backlog requests to configure the socket with"),
|
||||
cfg.IntOpt('tcp_keepidle',
|
||||
default=600,
|
||||
help="Sets the value of TCP_KEEPIDLE in seconds for each "
|
||||
"server socket. Not supported on OS X."),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(socket_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_server(application, port, **kwargs):
|
||||
"""Run a WSGI server with the given application."""
|
||||
sock = eventlet.listen(('0.0.0.0', port))
|
||||
eventlet.wsgi.server(sock, application, **kwargs)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""Provides a Service API for wsgi servers.
|
||||
|
||||
This gives us the ability to launch wsgi servers with the
|
||||
Launcher classes in service.py.
|
||||
"""
|
||||
|
||||
def __init__(self, application, port,
|
||||
host='0.0.0.0', backlog=4096, threads=1000):
|
||||
self.application = application
|
||||
self._port = port
|
||||
self._host = host
|
||||
self._backlog = backlog if backlog else CONF.backlog
|
||||
self._socket = self._get_socket(host, port, self._backlog)
|
||||
super(Service, self).__init__(threads)
|
||||
|
||||
def _get_socket(self, host, port, backlog):
|
||||
# TODO(dims): eventlet's green dns/socket module does not actually
|
||||
# support IPv6 in getaddrinfo(). We need to get around this in the
|
||||
# future or monitor upstream for a fix
|
||||
info = socket.getaddrinfo(host,
|
||||
port,
|
||||
socket.AF_UNSPEC,
|
||||
socket.SOCK_STREAM)[0]
|
||||
family = info[0]
|
||||
bind_addr = info[-1]
|
||||
|
||||
sock = None
|
||||
retry_until = time.time() + 30
|
||||
while not sock and time.time() < retry_until:
|
||||
try:
|
||||
sock = eventlet.listen(bind_addr,
|
||||
backlog=backlog,
|
||||
family=family)
|
||||
if sslutils.is_enabled():
|
||||
sock = sslutils.wrap(sock)
|
||||
|
||||
except socket.error as err:
|
||||
if err.args[0] != errno.EADDRINUSE:
|
||||
raise
|
||||
eventlet.sleep(0.1)
|
||||
if not sock:
|
||||
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
|
||||
"after trying for 30 seconds") %
|
||||
{'host': host, 'port': port})
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
# sockets can hang around forever without keepalive
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
|
||||
# This option isn't available in the OS X version of eventlet
|
||||
if hasattr(socket, 'TCP_KEEPIDLE'):
|
||||
sock.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_KEEPIDLE,
|
||||
CONF.tcp_keepidle)
|
||||
|
||||
return sock
|
||||
|
||||
def start(self):
|
||||
"""Start serving this service using the provided server instance.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
super(Service, self).start()
|
||||
self.tg.add_thread(self._run, self.application, self._socket)
|
||||
|
||||
@property
|
||||
def backlog(self):
|
||||
return self._backlog
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
return self._socket.getsockname()[0] if self._socket else self._host
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
return self._socket.getsockname()[1] if self._socket else self._port
|
||||
|
||||
def stop(self):
|
||||
"""Stop serving this API.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
super(Service, self).stop()
|
||||
|
||||
def _run(self, application, socket):
|
||||
"""Start a WSGI server in a new green thread."""
|
||||
logger = logging.getLogger('eventlet.wsgi')
|
||||
eventlet.wsgi.server(socket,
|
||||
application,
|
||||
custom_pool=self.tg.pool,
|
||||
log=logging.WritableLogger(logger))
|
||||
|
||||
|
||||
class Router(object):
|
||||
|
||||
"""WSGI middleware that maps incoming requests to WSGI apps."""
|
||||
|
||||
def __init__(self, mapper):
|
||||
"""Create a router for the given routes.Mapper.
|
||||
|
||||
Each route in `mapper` must specify a 'controller', which is a
|
||||
WSGI app to call. You'll probably want to specify an 'action' as
|
||||
well and have your controller be a wsgi.Controller, who will route
|
||||
the request to the action method.
|
||||
|
||||
Examples:
|
||||
mapper = routes.Mapper()
|
||||
sc = ServerController()
|
||||
|
||||
# Explicit mapping of one route to a controller+action
|
||||
mapper.connect(None, "/svrlist", controller=sc, action="list")
|
||||
|
||||
# Actions are all implicitly defined
|
||||
mapper.resource("server", "servers", controller=sc)
|
||||
|
||||
# Pointing to an arbitrary WSGI app. You can specify the
|
||||
# {path_info:.*} parameter so the target app can be handed just that
|
||||
# section of the URL.
|
||||
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
|
||||
"""
|
||||
self.map = mapper
|
||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||
self.map)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""Route the incoming request to a controller based on self.map.
|
||||
|
||||
If no match, return a 404.
|
||||
"""
|
||||
return self._router
|
||||
|
||||
@staticmethod
|
||||
@webob.dec.wsgify
|
||||
def _dispatch(req):
|
||||
"""Gets application from the environment.
|
||||
|
||||
Called by self._router after matching the incoming request to a route
|
||||
and putting the information into req.environ. Either returns 404
|
||||
or the routed WSGI app's response.
|
||||
"""
|
||||
match = req.environ['wsgiorg.routing_args'][1]
|
||||
if not match:
|
||||
return webob.exc.HTTPNotFound()
|
||||
app = match['controller']
|
||||
return app
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
"""Add some Openstack API-specific logic to the base webob.Request."""
|
||||
|
||||
default_request_content_types = ('application/json', 'application/xml')
|
||||
default_accept_types = ('application/json', 'application/xml')
|
||||
default_accept_type = 'application/json'
|
||||
|
||||
def best_match_content_type(self, supported_content_types=None):
|
||||
"""Determine the requested response content-type.
|
||||
|
||||
Based on the query extension then the Accept header.
|
||||
Defaults to default_accept_type if we don't find a preference
|
||||
|
||||
"""
|
||||
supported_content_types = (supported_content_types or
|
||||
self.default_accept_types)
|
||||
|
||||
parts = self.path.rsplit('.', 1)
|
||||
if len(parts) > 1:
|
||||
ctype = 'application/{0}'.format(parts[1])
|
||||
if ctype in supported_content_types:
|
||||
return ctype
|
||||
|
||||
bm = self.accept.best_match(supported_content_types)
|
||||
return bm or self.default_accept_type
|
||||
|
||||
def get_content_type(self, allowed_content_types=None):
|
||||
"""Determine content type of the request body.
|
||||
|
||||
Does not do any body introspection, only checks header
|
||||
|
||||
"""
|
||||
if "Content-Type" not in self.headers:
|
||||
return None
|
||||
|
||||
content_type = self.content_type
|
||||
allowed_content_types = (allowed_content_types or
|
||||
self.default_request_content_types)
|
||||
|
||||
if content_type not in allowed_content_types:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
return content_type
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""WSGI app that handles (de)serialization and controller dispatch.
|
||||
|
||||
Reads routing information supplied by RoutesMiddleware and calls
|
||||
the requested action method upon its deserializer, controller,
|
||||
and serializer. Those three objects may implement any of the basic
|
||||
controller action methods (create, update, show, index, delete)
|
||||
along with any that may be specified in the api router. A 'default'
|
||||
method may also be implemented to be used in place of any
|
||||
non-implemented actions. Deserializer methods must accept a request
|
||||
argument and return a dictionary. Controller methods must accept a
|
||||
request argument. Additionally, they must also accept keyword
|
||||
arguments that represent the keys returned by the Deserializer. They
|
||||
may raise a webob.exc exception or return a dict, which will be
|
||||
serialized by requested content type.
|
||||
"""
|
||||
def __init__(self, controller, deserializer=None, serializer=None):
|
||||
"""Initiates Resource object.
|
||||
|
||||
:param controller: object that implement methods created by routes lib
|
||||
:param deserializer: object that supports webob request deserialization
|
||||
through controller-like actions
|
||||
:param serializer: object that supports webob response serialization
|
||||
through controller-like actions
|
||||
"""
|
||||
self.controller = controller
|
||||
self.serializer = serializer or ResponseSerializer()
|
||||
self.deserializer = deserializer or RequestDeserializer()
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, request):
|
||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||
|
||||
try:
|
||||
action, action_args, accept = self.deserialize_request(request)
|
||||
except exception.InvalidContentType:
|
||||
msg = _("Unsupported Content-Type")
|
||||
return webob.exc.HTTPUnsupportedMediaType(explanation=msg)
|
||||
except exception.MalformedRequestBody:
|
||||
msg = _("Malformed request body")
|
||||
return webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
action_result = self.execute_action(action, request, **action_args)
|
||||
try:
|
||||
return self.serialize_response(action, action_result, accept)
|
||||
# return unserializable result (typically a webob exc)
|
||||
except Exception:
|
||||
return action_result
|
||||
|
||||
def deserialize_request(self, request):
|
||||
return self.deserializer.deserialize(request)
|
||||
|
||||
def serialize_response(self, action, action_result, accept):
|
||||
return self.serializer.serialize(action_result, accept, action)
|
||||
|
||||
def execute_action(self, action, request, **action_args):
|
||||
return self.dispatch(self.controller, action, request, **action_args)
|
||||
|
||||
def dispatch(self, obj, action, *args, **kwargs):
|
||||
"""Find action-specific method on self and call it."""
|
||||
try:
|
||||
method = getattr(obj, action)
|
||||
except AttributeError:
|
||||
method = getattr(obj, 'default')
|
||||
|
||||
return method(*args, **kwargs)
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class ActionDispatcher(object):
|
||||
"""Maps method name to local methods through action name."""
|
||||
|
||||
def dispatch(self, *args, **kwargs):
|
||||
"""Find and call local method."""
|
||||
action = kwargs.pop('action', 'default')
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(*args, **kwargs)
|
||||
|
||||
def default(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class DictSerializer(ActionDispatcher):
|
||||
"""Default request body serialization."""
|
||||
|
||||
def serialize(self, data, action='default'):
|
||||
return self.dispatch(data, action=action)
|
||||
|
||||
def default(self, data):
|
||||
return ""
|
||||
|
||||
|
||||
class JSONDictSerializer(DictSerializer):
|
||||
"""Default JSON request body serialization."""
|
||||
|
||||
def default(self, data):
|
||||
def sanitizer(obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
_dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
|
||||
return _dtime.isoformat()
|
||||
return six.text_type(obj)
|
||||
return jsonutils.dumps(data, default=sanitizer)
|
||||
|
||||
|
||||
class XMLDictSerializer(DictSerializer):
|
||||
|
||||
def __init__(self, metadata=None, xmlns=None):
|
||||
"""Initiates XMLDictSerializer object.
|
||||
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
:param xmlns: XML namespace to include with serialized xml
|
||||
"""
|
||||
super(XMLDictSerializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
self.xmlns = xmlns
|
||||
|
||||
def default(self, data):
|
||||
# We expect data to contain a single key which is the XML root.
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
|
||||
|
||||
return self.to_xml_string(node)
|
||||
|
||||
def to_xml_string(self, node, has_atom=False):
|
||||
self._add_xmlns(node, has_atom)
|
||||
return node.toprettyxml(indent=' ', encoding='UTF-8')
|
||||
|
||||
#NOTE (ameade): the has_atom should be removed after all of the
|
||||
# xml serializers and view builders have been updated to the current
|
||||
# spec that required all responses include the xmlns:atom, the has_atom
|
||||
# flag is to prevent current tests from breaking
|
||||
def _add_xmlns(self, node, has_atom=False):
|
||||
if self.xmlns is not None:
|
||||
node.setAttribute('xmlns', self.xmlns)
|
||||
if has_atom:
|
||||
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
singular = nodename[:-1]
|
||||
else:
|
||||
singular = 'item'
|
||||
for item in data:
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
result.setAttribute(k, str(v))
|
||||
else:
|
||||
node = self._to_xml_node(doc, metadata, k, v)
|
||||
result.appendChild(node)
|
||||
else:
|
||||
# Type is atom
|
||||
node = doc.createTextNode(str(data))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
|
||||
def _create_link_nodes(self, xml_doc, links):
|
||||
link_nodes = []
|
||||
for link in links:
|
||||
link_node = xml_doc.createElement('atom:link')
|
||||
link_node.setAttribute('rel', link['rel'])
|
||||
link_node.setAttribute('href', link['href'])
|
||||
if 'type' in link:
|
||||
link_node.setAttribute('type', link['type'])
|
||||
link_nodes.append(link_node)
|
||||
return link_nodes
|
||||
|
||||
|
||||
class ResponseHeadersSerializer(ActionDispatcher):
|
||||
"""Default response headers serialization."""
|
||||
|
||||
def serialize(self, response, data, action):
|
||||
self.dispatch(response, data, action=action)
|
||||
|
||||
def default(self, response, data):
|
||||
response.status_int = 200
|
||||
|
||||
|
||||
class ResponseSerializer(object):
|
||||
"""Encode the necessary pieces into a response object."""
|
||||
|
||||
def __init__(self, body_serializers=None, headers_serializer=None):
|
||||
self.body_serializers = {
|
||||
'application/xml': XMLDictSerializer(),
|
||||
'application/json': JSONDictSerializer(),
|
||||
}
|
||||
self.body_serializers.update(body_serializers or {})
|
||||
|
||||
self.headers_serializer = (headers_serializer or
|
||||
ResponseHeadersSerializer())
|
||||
|
||||
def serialize(self, response_data, content_type, action='default'):
|
||||
"""Serialize a dict into a string and wrap in a wsgi.Request object.
|
||||
|
||||
:param response_data: dict produced by the Controller
|
||||
:param content_type: expected mimetype of serialized response body
|
||||
|
||||
"""
|
||||
response = webob.Response()
|
||||
self.serialize_headers(response, response_data, action)
|
||||
self.serialize_body(response, response_data, content_type, action)
|
||||
return response
|
||||
|
||||
def serialize_headers(self, response, data, action):
|
||||
self.headers_serializer.serialize(response, data, action)
|
||||
|
||||
def serialize_body(self, response, data, content_type, action):
|
||||
response.headers['Content-Type'] = content_type
|
||||
if data is not None:
|
||||
serializer = self.get_body_serializer(content_type)
|
||||
response.body = serializer.serialize(data, action)
|
||||
|
||||
def get_body_serializer(self, content_type):
|
||||
try:
|
||||
return self.body_serializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
|
||||
class RequestHeadersDeserializer(ActionDispatcher):
|
||||
"""Default request headers deserializer"""
|
||||
|
||||
def deserialize(self, request, action):
|
||||
return self.dispatch(request, action=action)
|
||||
|
||||
def default(self, request):
|
||||
return {}
|
||||
|
||||
|
||||
class RequestDeserializer(object):
|
||||
"""Break up a Request object into more useful pieces."""
|
||||
|
||||
def __init__(self, body_deserializers=None, headers_deserializer=None,
|
||||
supported_content_types=None):
|
||||
|
||||
self.supported_content_types = supported_content_types
|
||||
|
||||
self.body_deserializers = {
|
||||
'application/xml': XMLDeserializer(),
|
||||
'application/json': JSONDeserializer(),
|
||||
}
|
||||
self.body_deserializers.update(body_deserializers or {})
|
||||
|
||||
self.headers_deserializer = (headers_deserializer or
|
||||
RequestHeadersDeserializer())
|
||||
|
||||
def deserialize(self, request):
|
||||
"""Extract necessary pieces of the request.
|
||||
|
||||
:param request: Request object
|
||||
:returns: tuple of (expected controller action name, dictionary of
|
||||
keyword arguments to pass to the controller, the expected
|
||||
content type of the response)
|
||||
|
||||
"""
|
||||
action_args = self.get_action_args(request.environ)
|
||||
action = action_args.pop('action', None)
|
||||
|
||||
action_args.update(self.deserialize_headers(request, action))
|
||||
action_args.update(self.deserialize_body(request, action))
|
||||
|
||||
accept = self.get_expected_content_type(request)
|
||||
|
||||
return (action, action_args, accept)
|
||||
|
||||
def deserialize_headers(self, request, action):
|
||||
return self.headers_deserializer.deserialize(request, action)
|
||||
|
||||
def deserialize_body(self, request, action):
|
||||
if not request.body:
|
||||
LOG.debug(_("Empty body provided in request"))
|
||||
return {}
|
||||
|
||||
try:
|
||||
content_type = request.get_content_type()
|
||||
except exception.InvalidContentType:
|
||||
LOG.debug(_("Unrecognized Content-Type provided in request"))
|
||||
raise
|
||||
|
||||
if content_type is None:
|
||||
LOG.debug(_("No Content-Type provided in request"))
|
||||
return {}
|
||||
|
||||
try:
|
||||
deserializer = self.get_body_deserializer(content_type)
|
||||
except exception.InvalidContentType:
|
||||
LOG.debug(_("Unable to deserialize body as provided Content-Type"))
|
||||
raise
|
||||
|
||||
return deserializer.deserialize(request.body, action)
|
||||
|
||||
def get_body_deserializer(self, content_type):
|
||||
try:
|
||||
return self.body_deserializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def get_expected_content_type(self, request):
|
||||
return request.best_match_content_type(self.supported_content_types)
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class TextDeserializer(ActionDispatcher):
|
||||
"""Default request body deserialization."""
|
||||
|
||||
def deserialize(self, datastring, action='default'):
|
||||
return self.dispatch(datastring, action=action)
|
||||
|
||||
def default(self, datastring):
|
||||
return {}
|
||||
|
||||
|
||||
class JSONDeserializer(TextDeserializer):
|
||||
|
||||
def _from_json(self, datastring):
|
||||
try:
|
||||
return jsonutils.loads(datastring)
|
||||
except ValueError:
|
||||
msg = _("cannot understand JSON")
|
||||
raise exception.MalformedRequestBody(reason=msg)
|
||||
|
||||
def default(self, datastring):
|
||||
return {'body': self._from_json(datastring)}
|
||||
|
||||
|
||||
class XMLDeserializer(TextDeserializer):
|
||||
|
||||
def __init__(self, metadata=None):
|
||||
"""Initiates XMLDeserializer object.
|
||||
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
"""
|
||||
super(XMLDeserializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
|
||||
def _from_xml(self, datastring):
|
||||
plurals = set(self.metadata.get('plurals', {}))
|
||||
|
||||
try:
|
||||
node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
except expat.ExpatError:
|
||||
msg = _("cannot understand XML")
|
||||
raise exception.MalformedRequestBody(reason=msg)
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
|
||||
:param listnames: list of XML node names whose subnodes should
|
||||
be considered list items.
|
||||
|
||||
"""
|
||||
|
||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
||||
return node.childNodes[0].nodeValue
|
||||
elif node.nodeName in listnames:
|
||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
||||
else:
|
||||
result = dict()
|
||||
for attr in node.attributes.keys():
|
||||
result[attr] = node.attributes[attr].nodeValue
|
||||
for child in node.childNodes:
|
||||
if child.nodeType != node.TEXT_NODE:
|
||||
result[child.nodeName] = self._from_xml_node(child,
|
||||
listnames)
|
||||
return result
|
||||
|
||||
def find_first_child_named(self, parent, name):
|
||||
"""Search a nodes children for the first child with a given name."""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
return node
|
||||
return None
|
||||
|
||||
def find_children_named(self, parent, name):
|
||||
"""Return all of a nodes children who have the given name."""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
yield node
|
||||
|
||||
def extract_text(self, node):
|
||||
"""Get the text field contained by the given node."""
|
||||
if len(node.childNodes) == 1:
|
||||
child = node.childNodes[0]
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
return child.nodeValue
|
||||
return ""
|
||||
|
||||
def default(self, datastring):
|
||||
return {'body': self._from_xml(datastring)}
|
@ -1,89 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import gc
|
||||
import pprint
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.IntOpt('backdoor_port',
|
||||
default=None,
|
||||
help='port for eventlet backdoor to listen')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print("Don't use this, just disconnect instead")
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print(i, gt)
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print()
|
||||
|
||||
|
||||
def _print_nativethreads():
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
print(threadId)
|
||||
traceback.print_stack(stack)
|
||||
print()
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
'pnt': _print_nativethreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
# since it won't interact poorly with gettext, and it's easier to
|
||||
# read the output too.
|
||||
def displayhook(val):
|
||||
if val is not None:
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = eventlet.listen(('localhost', CONF.backdoor_port))
|
||||
port = sock.getsockname()[1]
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
@ -1,141 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exceptions common to OpenStack projects
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
|
||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(Error, self).__init__(message)
|
||||
|
||||
|
||||
class ApiError(Error):
|
||||
def __init__(self, message='Unknown', code='Unknown'):
|
||||
self.message = message
|
||||
self.code = code
|
||||
super(ApiError, self).__init__('%s: %s' % (code, message))
|
||||
|
||||
|
||||
class NotFound(Error):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownScheme(Error):
|
||||
|
||||
msg = "Unknown scheme '%s' found in URI"
|
||||
|
||||
def __init__(self, scheme):
|
||||
msg = self.__class__.msg % scheme
|
||||
super(UnknownScheme, self).__init__(msg)
|
||||
|
||||
|
||||
class BadStoreUri(Error):
|
||||
|
||||
msg = "The Store URI %s was malformed. Reason: %s"
|
||||
|
||||
def __init__(self, uri, reason):
|
||||
msg = self.__class__.msg % (uri, reason)
|
||||
super(BadStoreUri, self).__init__(msg)
|
||||
|
||||
|
||||
class Duplicate(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotAuthorized(Error):
|
||||
pass
|
||||
|
||||
|
||||
class NotEmpty(Error):
|
||||
pass
|
||||
|
||||
|
||||
class Invalid(Error):
|
||||
pass
|
||||
|
||||
|
||||
class BadInputError(Exception):
|
||||
"""Error resulting from a client sending bad input to a server"""
|
||||
pass
|
||||
|
||||
|
||||
class MissingArgumentError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class DatabaseMigrationError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class ClientConnectionError(Exception):
|
||||
"""Error resulting from a client connecting to a server"""
|
||||
pass
|
||||
|
||||
|
||||
def wrap_exception(f):
|
||||
def _wrap(*args, **kw):
|
||||
try:
|
||||
return f(*args, **kw)
|
||||
except Exception as e:
|
||||
if not isinstance(e, Error):
|
||||
#exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
logging.exception(_('Uncaught exception'))
|
||||
#logging.error(traceback.extract_stack(exc_traceback))
|
||||
raise Error(str(e))
|
||||
raise
|
||||
_wrap.func_name = f.func_name
|
||||
return _wrap
|
||||
|
||||
|
||||
class OpenstackException(Exception):
|
||||
"""Base Exception class.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = "An unknown exception occurred"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self._error_string = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
||||
raise
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.message
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class MalformedRequestBody(OpenstackException):
|
||||
message = "Malformed message body: %(reason)s"
|
||||
|
||||
|
||||
class InvalidContentType(OpenstackException):
|
||||
message = "Invalid content type %(content_type)s"
|
@ -1,82 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def save_and_reraise_exception():
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
being attempted to be re-raised after an exception handler is run. This
|
||||
can happen when eventlet switches greenthreads or when running an
|
||||
exception handler, code raises and catches an exception. In both
|
||||
cases the exception context will be cleared.
|
||||
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
"""
|
||||
type_, value, tb = sys.exc_info()
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(type_, value, tb))
|
||||
raise
|
||||
raise type_, value, tb
|
||||
|
||||
|
||||
def forever_retry_uncaught_exceptions(infunc):
|
||||
def inner_func(*args, **kwargs):
|
||||
last_log_time = 0
|
||||
last_exc_message = None
|
||||
exc_count = 0
|
||||
while True:
|
||||
try:
|
||||
return infunc(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
if exc.message == last_exc_message:
|
||||
exc_count += 1
|
||||
else:
|
||||
exc_count = 1
|
||||
# Do not log any more frequently than once a minute unless
|
||||
# the exception message changes
|
||||
cur_time = int(time.time())
|
||||
if (cur_time - last_log_time > 60 or
|
||||
exc.message != last_exc_message):
|
||||
logging.exception(
|
||||
_('Unexpected exception occurred %d time(s)... '
|
||||
'retrying.') % exc_count)
|
||||
last_log_time = cur_time
|
||||
last_exc_message = exc.message
|
||||
exc_count = 0
|
||||
# This should be a very rare event. In case it isn't, do
|
||||
# a sleep.
|
||||
time.sleep(1)
|
||||
return inner_func
|
@ -1,110 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import os
|
||||
|
||||
from openstack.common import excutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_FILE_CACHE = {}
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def read_cached_file(filename, force_reload=False):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param force_reload: Whether to reload the file.
|
||||
:returns: A tuple with a boolean specifying if the data is fresh
|
||||
or not.
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if force_reload and filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
|
||||
reloaded = False
|
||||
mtime = os.path.getmtime(filename)
|
||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||
|
||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||
LOG.debug(_("Reloading cached file %s") % filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
reloaded = True
|
||||
return (reloaded, cache_info['data'])
|
||||
|
||||
|
||||
def delete_if_exists(path):
|
||||
"""Delete a file, but ignore file not found error.
|
||||
|
||||
:param path: File to delete
|
||||
"""
|
||||
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def remove_path_on_error(path):
|
||||
"""Protect code that wants to operate on PATH atomically.
|
||||
Any exception will cause PATH to be removed.
|
||||
|
||||
:param path: File to work with
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
delete_if_exists(path)
|
||||
|
||||
|
||||
def file_open(*args, **kwargs):
|
||||
"""Open file
|
||||
|
||||
see built-in file() documentation for more details
|
||||
|
||||
Note: The reason this is kept in a separate module is to easily
|
||||
be able to provide a stub module that doesn't alter system
|
||||
state at all (for unit tests)
|
||||
"""
|
||||
return file(*args, **kwargs)
|
@ -1,51 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
|
||||
|
||||
class PatchObject(fixtures.Fixture):
|
||||
"""Deal with code around mock."""
|
||||
|
||||
def __init__(self, obj, attr, **kwargs):
|
||||
self.obj = obj
|
||||
self.attr = attr
|
||||
self.kwargs = kwargs
|
||||
|
||||
def setUp(self):
|
||||
super(PatchObject, self).setUp()
|
||||
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
|
||||
class Patch(fixtures.Fixture):
|
||||
|
||||
"""Deal with code around mock.patch."""
|
||||
|
||||
def __init__(self, obj, **kwargs):
|
||||
self.obj = obj
|
||||
self.kwargs = kwargs
|
||||
|
||||
def setUp(self):
|
||||
super(Patch, self).setUp()
|
||||
_p = mock.patch(self.obj, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
@ -1,37 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
|
||||
class MoxStubout(fixtures.Fixture):
|
||||
"""Deal with code around mox and stubout as a fixture."""
|
||||
|
||||
def setUp(self):
|
||||
super(MoxStubout, self).setUp()
|
||||
# emulate some of the mox stuff, we can't use the metaclass
|
||||
# because it screws with our generators
|
||||
self.mox = mox.Mox()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.addCleanup(self.mox.UnsetStubs)
|
||||
self.addCleanup(self.stubs.UnsetAll)
|
||||
self.addCleanup(self.stubs.SmartUnsetAll)
|
||||
self.addCleanup(self.mox.VerifyAll)
|
@ -1,76 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utility methods for working with functions/decorators."""
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
def get_wrapped_function(function):
|
||||
"""Get the method at the bottom of a stack of decorators."""
|
||||
|
||||
if not hasattr(function, 'func_closure') or not function.func_closure:
|
||||
return function
|
||||
|
||||
def _get_wrapped_function(function):
|
||||
if not hasattr(function, 'func_closure') or not function.func_closure:
|
||||
return None
|
||||
|
||||
for closure in function.func_closure:
|
||||
func = closure.cell_contents
|
||||
|
||||
deeper_func = _get_wrapped_function(func)
|
||||
if deeper_func:
|
||||
return deeper_func
|
||||
elif hasattr(closure.cell_contents, '__call__'):
|
||||
return closure.cell_contents
|
||||
|
||||
return _get_wrapped_function(function)
|
||||
|
||||
|
||||
def getcallargs(function, *args, **kwargs):
|
||||
"""This is a simplified inspect.getcallargs (2.7+).
|
||||
|
||||
It should be replaced when python >= 2.7 is standard.
|
||||
"""
|
||||
|
||||
keyed_args = {}
|
||||
argnames, varargs, keywords, defaults = inspect.getargspec(function)
|
||||
|
||||
keyed_args.update(kwargs)
|
||||
|
||||
# NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
|
||||
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
|
||||
# some tests use 'self2'.
|
||||
if 'self' in argnames[0] or 'cls' == argnames[0]:
|
||||
# The function may not actually be a method or have im_self.
|
||||
# Typically seen when it's stubbed with mox.
|
||||
if inspect.ismethod(function) and hasattr(function, 'im_self'):
|
||||
keyed_args[argnames[0]] = function.im_self
|
||||
else:
|
||||
keyed_args[argnames[0]] = None
|
||||
|
||||
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
|
||||
keyed_args.update(dict(zip(remaining_argnames, args)))
|
||||
|
||||
if defaults:
|
||||
num_defaults = len(defaults)
|
||||
for argname, value in zip(argnames[-num_defaults:], defaults):
|
||||
if argname not in keyed_args:
|
||||
keyed_args[argname] = value
|
||||
|
||||
return keyed_args
|
@ -1,226 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
gettext for openstack-common modules.
|
||||
|
||||
Usual usage in an openstack.common module:
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
"""
|
||||
|
||||
import copy
|
||||
import gettext
|
||||
import logging.handlers
|
||||
import os
|
||||
import UserString
|
||||
|
||||
_localedir = os.environ.get('oslo'.upper() + '_LOCALEDIR')
|
||||
_t = gettext.translation('oslo', localedir=_localedir, fallback=True)
|
||||
|
||||
|
||||
def _(msg):
|
||||
return _t.ugettext(msg)
|
||||
|
||||
|
||||
def install(domain):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
"""
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||
unicode=True)
|
||||
|
||||
|
||||
"""
|
||||
Lazy gettext functionality.
|
||||
|
||||
The following is an attempt to introduce a deferred way
|
||||
to do translations on messages in OpenStack. We attempt to
|
||||
override the standard _() function and % (format string) operation
|
||||
to build Message objects that can later be translated when we have
|
||||
more information. Also included is an example LogHandler that
|
||||
translates Messages to an associated locale, effectively allowing
|
||||
many logs, each with their own locale.
|
||||
"""
|
||||
|
||||
|
||||
def get_lazy_gettext(domain):
|
||||
"""Assemble and return a lazy gettext function for a given domain.
|
||||
|
||||
Factory method for a project/module to get a lazy gettext function
|
||||
for its own translation domain (i.e. nova, glance, cinder, etc.)
|
||||
"""
|
||||
|
||||
def _lazy_gettext(msg):
|
||||
"""Create and return a Message object.
|
||||
|
||||
Message encapsulates a string so that we can translate it later when
|
||||
needed.
|
||||
"""
|
||||
return Message(msg, domain)
|
||||
|
||||
return _lazy_gettext
|
||||
|
||||
|
||||
class Message(UserString.UserString, object):
|
||||
"""Class used to encapsulate translatable messages."""
|
||||
def __init__(self, msg, domain):
|
||||
# _msg is the gettext msgid and should never change
|
||||
self._msg = msg
|
||||
self._left_extra_msg = ''
|
||||
self._right_extra_msg = ''
|
||||
self.params = None
|
||||
self.locale = None
|
||||
self.domain = domain
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
# NOTE(mrodden): this should always resolve to a unicode string
|
||||
# that best represents the state of the message currently
|
||||
|
||||
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
||||
if self.locale:
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
languages=[self.locale],
|
||||
fallback=True)
|
||||
else:
|
||||
# use system locale for translations
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
fallback=True)
|
||||
|
||||
full_msg = (self._left_extra_msg +
|
||||
lang.ugettext(self._msg) +
|
||||
self._right_extra_msg)
|
||||
|
||||
if self.params is not None:
|
||||
full_msg = full_msg % self.params
|
||||
|
||||
return unicode(full_msg)
|
||||
|
||||
def _save_parameters(self, other):
|
||||
# we check for None later to see if
|
||||
# we actually have parameters to inject,
|
||||
# so encapsulate if our parameter is actually None
|
||||
if other is None:
|
||||
self.params = (other, )
|
||||
else:
|
||||
self.params = copy.deepcopy(other)
|
||||
|
||||
return self
|
||||
|
||||
# overrides to be more string-like
|
||||
def __unicode__(self):
|
||||
return self.data
|
||||
|
||||
def __str__(self):
|
||||
return self.data.encode('utf-8')
|
||||
|
||||
def __getstate__(self):
|
||||
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
||||
'domain', 'params', 'locale']
|
||||
new_dict = self.__dict__.fromkeys(to_copy)
|
||||
for attr in to_copy:
|
||||
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
||||
|
||||
return new_dict
|
||||
|
||||
def __setstate__(self, state):
|
||||
for (k, v) in state.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
# operator overloads
|
||||
def __add__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._right_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __radd__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._left_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __mod__(self, other):
|
||||
# do a format string to catch and raise
|
||||
# any possible KeyErrors from missing parameters
|
||||
self.data % other
|
||||
copied = copy.deepcopy(self)
|
||||
return copied._save_parameters(other)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.data * other
|
||||
|
||||
def __rmul__(self, other):
|
||||
return other * self.data
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.data[key]
|
||||
|
||||
def __getslice__(self, start, end):
|
||||
return self.data.__getslice__(start, end)
|
||||
|
||||
def __getattribute__(self, name):
|
||||
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
||||
# These override the UserString implementation, since UserString
|
||||
# uses our __class__ attribute to try and build a new message
|
||||
# after running the inner data string through the operation.
|
||||
# At that point, we have lost the gettext message id and can just
|
||||
# safely resolve to a string instead.
|
||||
ops = ['capitalize', 'center', 'decode', 'encode',
|
||||
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
||||
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
||||
if name in ops:
|
||||
return getattr(self.data, name)
|
||||
else:
|
||||
return UserString.UserString.__getattribute__(self, name)
|
||||
|
||||
|
||||
class LocaleHandler(logging.Handler):
|
||||
"""Handler that can have a locale associated to translate Messages.
|
||||
|
||||
A quick example of how to utilize the Message class above.
|
||||
LocaleHandler takes a locale and a target logging.Handler object
|
||||
to forward LogRecord objects to after translating the internal Message.
|
||||
"""
|
||||
|
||||
def __init__(self, locale, target):
|
||||
"""Initialize a LocaleHandler
|
||||
|
||||
:param locale: locale to use for translating messages
|
||||
:param target: logging.Handler object to forward
|
||||
LogRecord objects to after translation
|
||||
"""
|
||||
logging.Handler.__init__(self)
|
||||
self.locale = locale
|
||||
self.target = target
|
||||
|
||||
def emit(self, record):
|
||||
if isinstance(record.msg, Message):
|
||||
# set the locale and resolve to a string
|
||||
record.msg.locale = self.locale
|
||||
|
||||
self.target.emit(record)
|
@ -1,68 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class."""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""Tries to import object from default namespace.
|
||||
|
||||
Imports a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
||||
|
||||
|
||||
def try_import(import_str, default=None):
|
||||
"""Try to import a module and if it fails return default."""
|
||||
try:
|
||||
return import_module(import_str)
|
||||
except ImportError:
|
||||
return default
|
@ -1,169 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
JSON related utilities.
|
||||
|
||||
This module provides a few things:
|
||||
|
||||
1) A handy function for getting an object down to something that can be
|
||||
JSON serialized. See to_primitive().
|
||||
|
||||
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||
automatically use to_primitive() for you if needed.
|
||||
|
||||
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||
is available.
|
||||
'''
|
||||
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
import types
|
||||
import xmlrpclib
|
||||
|
||||
import six
|
||||
|
||||
from openstack.common import timeutils
|
||||
|
||||
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
but since this is a recursive function, we could have cyclical
|
||||
data structures.
|
||||
|
||||
To handle cyclical data structures we could track the actual objects
|
||||
visited in a set, but not all objects are hashable. Instead we just
|
||||
track the depth of the object inspections and don't go too deep.
|
||||
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
# has a @wrap_exception with a notifier will fail. If
|
||||
# we up the dependency to 0.5.4 (when it is released) we
|
||||
# can remove this workaround.
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
return json.dumps(value, default=default, **kwargs)
|
||||
|
||||
|
||||
def loads(s):
|
||||
return json.loads(s)
|
||||
|
||||
|
||||
def load(s):
|
||||
return json.load(s)
|
||||
|
||||
|
||||
try:
|
||||
import anyjson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||
'loads', ValueError, 'load'))
|
||||
anyjson.force_implementation(__name__)
|
@ -1,48 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Greenthread local storage of variables using weak references"""
|
||||
|
||||
import weakref
|
||||
|
||||
from eventlet import corolocal
|
||||
|
||||
|
||||
class WeakLocal(corolocal.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = corolocal.local.__getattribute__(self, attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
value = weakref.ref(value)
|
||||
return corolocal.local.__setattr__(self, attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = corolocal.local
|
@ -1,279 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import fileutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import local
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
help=('Directory to use for lock files. Default to a '
|
||||
'temp directory'))
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
def set_defaults(lock_path):
|
||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||
|
||||
|
||||
class _InterProcessLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def __enter__(self):
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
return self
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
except IOError:
|
||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _PosixLock(_InterProcessLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
else:
|
||||
import fcntl
|
||||
InterProcessLock = _PosixLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the foo method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
|
||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||
lock files on disk with a meaningful prefix. The prefix should end with a
|
||||
hyphen ('-') if specified.
|
||||
|
||||
:param external: The external keyword argument denotes whether this lock
|
||||
should work across multiple processes. This means that if two different
|
||||
workers both run a a method decorated with @synchronized('mylock',
|
||||
external=True), only one of them will execute at a time.
|
||||
|
||||
:param lock_path: The lock_path keyword argument is used to specify a
|
||||
special location for external lock files to live. If nothing is set, then
|
||||
CONF.lock_path is used as a default.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
||||
# See http://stackoverflow.com/questions/5390569/dyn
|
||||
# amically-allocating-and-destroying-mutexes
|
||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
||||
if name not in _semaphores:
|
||||
# this check is not racy - we're already holding ref locally
|
||||
# so GC won't remove the item and there was no IO switch
|
||||
# (only valid in greenthreads)
|
||||
_semaphores[name] = sem
|
||||
|
||||
with sem:
|
||||
LOG.debug(_('Got semaphore "%(lock)s" for method '
|
||||
'"%(method)s"...'), {'lock': name,
|
||||
'method': f.__name__})
|
||||
|
||||
# NOTE(mikal): I know this looks odd
|
||||
if not hasattr(local.strong_store, 'locks_held'):
|
||||
local.strong_store.locks_held = []
|
||||
local.strong_store.locks_held.append(name)
|
||||
|
||||
try:
|
||||
if external and not CONF.disable_process_locking:
|
||||
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
|
||||
'for method "%(method)s"...'),
|
||||
{'lock': name, 'method': f.__name__})
|
||||
cleanup_dir = False
|
||||
|
||||
# We need a copy of lock_path because it is non-local
|
||||
local_lock_path = lock_path
|
||||
if not local_lock_path:
|
||||
local_lock_path = CONF.lock_path
|
||||
|
||||
if not local_lock_path:
|
||||
cleanup_dir = True
|
||||
local_lock_path = tempfile.mkdtemp()
|
||||
|
||||
if not os.path.exists(local_lock_path):
|
||||
fileutils.ensure_tree(local_lock_path)
|
||||
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
safe_name = name.replace(os.sep, '_')
|
||||
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
|
||||
lock_file_path = os.path.join(local_lock_path,
|
||||
lock_file_name)
|
||||
|
||||
try:
|
||||
lock = InterProcessLock(lock_file_path)
|
||||
with lock:
|
||||
LOG.debug(_('Got file lock "%(lock)s" at '
|
||||
'%(path)s for method '
|
||||
'"%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
retval = f(*args, **kwargs)
|
||||
finally:
|
||||
LOG.debug(_('Released file lock "%(lock)s" at '
|
||||
'%(path)s for method "%(method)s"...'),
|
||||
{'lock': name,
|
||||
'path': lock_file_path,
|
||||
'method': f.__name__})
|
||||
# NOTE(vish): This removes the tempdir if we needed
|
||||
# to create one. This is used to
|
||||
# cleanup the locks left behind by unit
|
||||
# tests.
|
||||
if cleanup_dir:
|
||||
shutil.rmtree(local_lock_path)
|
||||
else:
|
||||
retval = f(*args, **kwargs)
|
||||
|
||||
finally:
|
||||
local.strong_store.locks_held.remove(name)
|
||||
|
||||
return retval
|
||||
return inner
|
||||
return wrap
|
||||
|
||||
|
||||
def synchronized_with_prefix(lock_file_prefix):
|
||||
"""Partial object generator for the synchronization decorator.
|
||||
|
||||
Redefine @synchronized in each project like so::
|
||||
|
||||
(in nova/utils.py)
|
||||
from nova.openstack.common import lockutils
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('nova-')
|
||||
|
||||
|
||||
(in nova/foo.py)
|
||||
from nova import utils
|
||||
|
||||
@utils.synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
|
||||
"""
|
||||
|
||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
@ -1,559 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Openstack logging handler.
|
||||
|
||||
This module adds to logging functionality by adding the option to specify
|
||||
a context object when calling the various log methods. If the context object
|
||||
is not specified, default formatting is used. Additionally, an instance uuid
|
||||
may be passed as part of the log message, which is intended to make it easier
|
||||
for admins to find messages related to a specific instance.
|
||||
|
||||
It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import cStringIO
|
||||
import inspect
|
||||
import itertools
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import local
|
||||
|
||||
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config',
|
||||
metavar='PATH',
|
||||
help='If this option is specified, the logging configuration '
|
||||
'file specified is used and overrides any other logging '
|
||||
'options specified. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=None,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If no default is set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The base directory used for relative '
|
||||
'--log-file paths'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error')
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=[
|
||||
'amqplib=WARN',
|
||||
'sqlalchemy=WARN',
|
||||
'boto=WARN',
|
||||
'suds=INFO',
|
||||
'keystone=INFO',
|
||||
'eventlet.wsgi.server=WARN'
|
||||
],
|
||||
help='list of logger=LEVEL pairs'),
|
||||
cfg.BoolOpt('publish_errors',
|
||||
default=False,
|
||||
help='publish error events'),
|
||||
cfg.BoolOpt('fatal_deprecations',
|
||||
default=False,
|
||||
help='make deprecations fatal'),
|
||||
|
||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||
# a full instance (and could include more information), and other times we
|
||||
# are just handed a UUID for the instance.
|
||||
cfg.StrOpt('instance_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance is passed with the log message, format '
|
||||
'it like this'),
|
||||
cfg.StrOpt('instance_uuid_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance UUID is passed with the log message, '
|
||||
'format it like this'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
# our new audit level
|
||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||
# module aware of it so it acts like other levels.
|
||||
logging.AUDIT = logging.INFO + 1
|
||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||
|
||||
|
||||
try:
|
||||
NullHandler = logging.NullHandler
|
||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||
class NullHandler(logging.Handler):
|
||||
def handle(self, record):
|
||||
pass
|
||||
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
def createLock(self):
|
||||
self.lock = None
|
||||
|
||||
|
||||
def _dictify_context(context):
|
||||
if context is None:
|
||||
return None
|
||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||
context = context.to_dict()
|
||||
return context
|
||||
|
||||
|
||||
def _get_binary_name():
|
||||
return os.path.basename(inspect.stack()[-1][1])
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
|
||||
if logfile and logdir:
|
||||
return os.path.join(logdir, logfile)
|
||||
|
||||
if logdir:
|
||||
binary = binary or _get_binary_name()
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
self.logger = logger
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
if CONF.fatal_deprecations:
|
||||
self.critical(stdmsg, *args, **kwargs)
|
||||
raise DeprecatedConfig(msg=stdmsg)
|
||||
else:
|
||||
self.warn(stdmsg, *args, **kwargs)
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
if 'extra' not in kwargs:
|
||||
kwargs['extra'] = {}
|
||||
extra = kwargs['extra']
|
||||
|
||||
context = kwargs.pop('context', None)
|
||||
if not context:
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
extra.update(_dictify_context(context))
|
||||
|
||||
instance = kwargs.pop('instance', None)
|
||||
instance_extra = ''
|
||||
if instance:
|
||||
instance_extra = CONF.instance_format % instance
|
||||
else:
|
||||
instance_uuid = kwargs.pop('instance_uuid', None)
|
||||
if instance_uuid:
|
||||
instance_extra = (CONF.instance_uuid_format
|
||||
% {'uuid': instance_uuid})
|
||||
extra.update({'instance': instance_extra})
|
||||
|
||||
extra.update({"project": self.project})
|
||||
extra.update({"version": self.version})
|
||||
extra['extra'] = extra.copy()
|
||||
return msg, kwargs
|
||||
|
||||
|
||||
class JSONFormatter(logging.Formatter):
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||
# since logging.config.fileConfig passes it.
|
||||
self.datefmt = datefmt
|
||||
|
||||
def formatException(self, ei, strip_newlines=True):
|
||||
lines = traceback.format_exception(*ei)
|
||||
if strip_newlines:
|
||||
lines = [itertools.ifilter(
|
||||
lambda x: x,
|
||||
line.rstrip().splitlines()) for line in lines]
|
||||
lines = list(itertools.chain(*lines))
|
||||
return lines
|
||||
|
||||
def format(self, record):
|
||||
message = {'message': record.getMessage(),
|
||||
'asctime': self.formatTime(record, self.datefmt),
|
||||
'name': record.name,
|
||||
'msg': record.msg,
|
||||
'args': record.args,
|
||||
'levelname': record.levelname,
|
||||
'levelno': record.levelno,
|
||||
'pathname': record.pathname,
|
||||
'filename': record.filename,
|
||||
'module': record.module,
|
||||
'lineno': record.lineno,
|
||||
'funcname': record.funcName,
|
||||
'created': record.created,
|
||||
'msecs': record.msecs,
|
||||
'relative_created': record.relativeCreated,
|
||||
'thread': record.thread,
|
||||
'thread_name': record.threadName,
|
||||
'process_name': record.processName,
|
||||
'process': record.process,
|
||||
'traceback': None}
|
||||
|
||||
if hasattr(record, 'extra'):
|
||||
message['extra'] = record.extra
|
||||
|
||||
if record.exc_info:
|
||||
message['traceback'] = self.formatException(record.exc_info)
|
||||
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(type, value, tb):
|
||||
extra = {}
|
||||
if CONF.verbose:
|
||||
extra['exc_info'] = (type, value, tb)
|
||||
getLogger(product_name).critical(str(value), **extra)
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config):
|
||||
try:
|
||||
logging.config.fileConfig(log_config)
|
||||
except ConfigParser.Error as exc:
|
||||
raise LogConfigError(log_config, str(exc))
|
||||
|
||||
|
||||
def setup(product_name):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config:
|
||||
_load_log_config(CONF.log_config)
|
||||
else:
|
||||
_setup_logging_from_conf()
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
|
||||
def set_defaults(logging_context_format_string):
|
||||
cfg.set_defaults(log_opts,
|
||||
logging_context_format_string=
|
||||
logging_context_format_string)
|
||||
|
||||
|
||||
def _find_facility_from_conf():
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
facility = getattr(logging.handlers.SysLogHandler,
|
||||
CONF.syslog_log_facility,
|
||||
None)
|
||||
|
||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||
facility = facility_names.get(CONF.syslog_log_facility)
|
||||
|
||||
if facility is None:
|
||||
valid_facilities = facility_names.keys()
|
||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||
valid_facilities.extend(consts)
|
||||
raise TypeError(_('syslog facility must be one of: %s') %
|
||||
', '.join("'%s'" % fac
|
||||
for fac in valid_facilities))
|
||||
|
||||
return facility
|
||||
|
||||
|
||||
def _setup_logging_from_conf():
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
if CONF.use_syslog:
|
||||
facility = _find_facility_from_conf()
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
log_root.addHandler(syslog)
|
||||
|
||||
logpath = _get_log_file_path()
|
||||
if logpath:
|
||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||
log_root.addHandler(filelog)
|
||||
|
||||
if CONF.use_stderr:
|
||||
streamlog = ColorHandler()
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
elif not CONF.log_file:
|
||||
# pass sys.stdout as a positional argument
|
||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||
streamlog = logging.StreamHandler(sys.stdout)
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
handler = importutils.import_object(
|
||||
"openstack.common.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
level = logging.getLevelName(level_name)
|
||||
logger = logging.getLogger(mod)
|
||||
logger.setLevel(level)
|
||||
|
||||
_loggers = {}
|
||||
|
||||
|
||||
def getLogger(name='unknown', version='unknown'):
|
||||
if name not in _loggers:
|
||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||
name,
|
||||
version)
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""Returns lazy logger.
|
||||
|
||||
Creates a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created.
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
def __init__(self, logger, level=logging.INFO):
|
||||
self.logger = logger
|
||||
self.level = level
|
||||
|
||||
def write(self, msg):
|
||||
self.logger.log(self.level, msg)
|
||||
|
||||
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
and logging_default_format_string. You can also specify
|
||||
logging_debug_format_suffix to append extra formatting if the log level is
|
||||
debug.
|
||||
|
||||
For information about what variables are available for the formatter see:
|
||||
http://docs.python.org/library/logging.html#formatter
|
||||
|
||||
"""
|
||||
|
||||
def format(self, record):
|
||||
"""Uses contextstring if request_id is set, otherwise default."""
|
||||
# NOTE(sdague): default the fancier formating params
|
||||
# to an empty string so we don't throw an exception if
|
||||
# they get used
|
||||
for key in ('instance', 'color'):
|
||||
if key not in record.__dict__:
|
||||
record.__dict__[key] = ''
|
||||
|
||||
if record.__dict__.get('request_id', None):
|
||||
self._fmt = CONF.logging_context_format_string
|
||||
else:
|
||||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formated copy
|
||||
if record.exc_info:
|
||||
record.exc_text = self.formatException(record.exc_info, record)
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
def formatException(self, exc_info, record=None):
|
||||
"""Format exception output with CONF.logging_exception_prefix."""
|
||||
if not record:
|
||||
return logging.Formatter.formatException(self, exc_info)
|
||||
|
||||
stringbuffer = cStringIO.StringIO()
|
||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||
None, stringbuffer)
|
||||
lines = stringbuffer.getvalue().split('\n')
|
||||
stringbuffer.close()
|
||||
|
||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
formatted_lines = []
|
||||
for line in lines:
|
||||
pl = CONF.logging_exception_prefix % record.__dict__
|
||||
fl = '%s%s' % (pl, line)
|
||||
formatted_lines.append(fl)
|
||||
return '\n'.join(formatted_lines)
|
||||
|
||||
|
||||
class ColorHandler(logging.StreamHandler):
|
||||
LEVEL_COLORS = {
|
||||
logging.DEBUG: '\033[00;32m', # GREEN
|
||||
logging.INFO: '\033[00;36m', # CYAN
|
||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
record.color = self.LEVEL_COLORS[record.levelno]
|
||||
return logging.StreamHandler.format(self, record)
|
||||
|
||||
|
||||
class DeprecatedConfig(Exception):
|
||||
message = _("Fatal call to deprecated config: %(msg)s")
|
||||
|
||||
def __init__(self, msg):
|
||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
@ -1,31 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
|
||||
from openstack.common import notifier
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('openstack.common.notifier.log_notifier' in
|
||||
cfg.CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
notifier.api.ERROR,
|
||||
dict(error=record.msg))
|
@ -1,147 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCall.
|
||||
|
||||
The poll-function passed to LoopingCall can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCall.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = timeutils.utcnow()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = timeutils.utcnow()
|
||||
if not self._running:
|
||||
break
|
||||
delay = interval - timeutils.delta_seconds(start, end)
|
||||
if delay <= 0:
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
@ -1,97 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Super simple fake memcache client."""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import timeutils
|
||||
|
||||
memcache_opts = [
|
||||
cfg.ListOpt('memcached_servers',
|
||||
default=None,
|
||||
help='Memcached servers or None for in process cache.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(memcache_opts)
|
||||
|
||||
|
||||
def get_client(memcached_servers=None):
|
||||
client_cls = Client
|
||||
|
||||
if not memcached_servers:
|
||||
memcached_servers = CONF.memcached_servers
|
||||
if memcached_servers:
|
||||
try:
|
||||
import memcache
|
||||
client_cls = memcache.Client
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return client_cls(memcached_servers, debug=0)
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""Replicates a tiny subset of memcached client interface."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Ignores the passed in args."""
|
||||
self.cache = {}
|
||||
|
||||
def get(self, key):
|
||||
"""Retrieves the value for a key or None.
|
||||
|
||||
This expunges expired keys during each get.
|
||||
"""
|
||||
|
||||
now = timeutils.utcnow_ts()
|
||||
for k in self.cache.keys():
|
||||
(timeout, _value) = self.cache[k]
|
||||
if timeout and now >= timeout:
|
||||
del self.cache[k]
|
||||
|
||||
return self.cache.get(key, (0, None))[1]
|
||||
|
||||
def set(self, key, value, time=0, min_compress_len=0):
|
||||
"""Sets the value for a key."""
|
||||
timeout = 0
|
||||
if time != 0:
|
||||
timeout = timeutils.utcnow_ts() + time
|
||||
self.cache[key] = (timeout, value)
|
||||
return True
|
||||
|
||||
def add(self, key, value, time=0, min_compress_len=0):
|
||||
"""Sets the value for a key if it doesn't exist."""
|
||||
if self.get(key) is not None:
|
||||
return False
|
||||
return self.set(key, value, time, min_compress_len)
|
||||
|
||||
def incr(self, key, delta=1):
|
||||
"""Increments the value for a key."""
|
||||
value = self.get(key)
|
||||
if value is None:
|
||||
return None
|
||||
new_value = int(value) + delta
|
||||
self.cache[key] = (self.cache[key][0], str(new_value))
|
||||
return new_value
|
||||
|
||||
def delete(self, key, time=0):
|
||||
"""Deletes the value associated with a key."""
|
||||
if key in self.cache:
|
||||
del self.cache[key]
|
@ -1,59 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Base class(es) for WSGI Middleware."""
|
||||
|
||||
import webob.dec
|
||||
|
||||
|
||||
class Middleware(object):
|
||||
"""Base WSGI middleware wrapper.
|
||||
|
||||
These classes require an application to be initialized that will be called
|
||||
next. By default the middleware will simply call its wrapped app, or you
|
||||
can override __call__ to customize its behavior.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
"""Factory method for paste.deploy."""
|
||||
|
||||
def filter(app):
|
||||
return cls(app)
|
||||
|
||||
return filter
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
|
||||
def process_request(self, req):
|
||||
"""Called on each request.
|
||||
|
||||
If this returns None, the next application down the stack will be
|
||||
executed. If it returns a response then that response will be returned
|
||||
and execution will stop here.
|
||||
"""
|
||||
return None
|
||||
|
||||
def process_response(self, response):
|
||||
"""Do whatever you'd like to the response."""
|
||||
return response
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
response = self.process_request(req)
|
||||
if response:
|
||||
return response
|
||||
response = req.get_response(self.application)
|
||||
return self.process_response(response)
|
@ -1,61 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Middleware that attaches a context to the WSGI request
|
||||
"""
|
||||
|
||||
from openstack.common import context
|
||||
from openstack.common import importutils
|
||||
from openstack.common.middleware import base
|
||||
|
||||
|
||||
class ContextMiddleware(base.Middleware):
|
||||
def __init__(self, app, options):
|
||||
self.options = options
|
||||
super(ContextMiddleware, self).__init__(app)
|
||||
|
||||
def make_context(self, *args, **kwargs):
|
||||
"""Create a context with the given arguments."""
|
||||
|
||||
# Determine the context class to use
|
||||
ctxcls = context.RequestContext
|
||||
if 'context_class' in self.options:
|
||||
ctxcls = importutils.import_class(self.options['context_class'])
|
||||
|
||||
return ctxcls(*args, **kwargs)
|
||||
|
||||
def process_request(self, req):
|
||||
"""Process the request.
|
||||
|
||||
Extract any authentication information in the request and
|
||||
construct an appropriate context from it.
|
||||
"""
|
||||
# Use the default empty context, with admin turned on for
|
||||
# backwards compatibility
|
||||
req.context = self.make_context(is_admin=True)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""Factory method for paste.deploy."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return ContextMiddleware(app, conf)
|
||||
|
||||
return filter
|
@ -1,29 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2013 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Middleware that attaches a correlation id to WSGI request"""
|
||||
|
||||
from openstack.common.middleware import base
|
||||
from openstack.common import uuidutils
|
||||
|
||||
|
||||
class CorrelationIdMiddleware(base.Middleware):
|
||||
|
||||
def process_request(self, req):
|
||||
correlation_id = (req.headers.get("X_CORRELATION_ID") or
|
||||
uuidutils.generate_uuid())
|
||||
req.headers['X_CORRELATION_ID'] = correlation_id
|
@ -1,58 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Debug middleware"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
import webob.dec
|
||||
|
||||
from openstack.common.middleware import base
|
||||
|
||||
|
||||
class Debug(base.Middleware):
|
||||
"""Helper class that returns debug information.
|
||||
|
||||
Can be inserted into any WSGI application chain to get information about
|
||||
the request and response.
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
print(("*" * 40) + " REQUEST ENVIRON")
|
||||
for key, value in req.environ.items():
|
||||
print(key, "=", value)
|
||||
print()
|
||||
resp = req.get_response(self.application)
|
||||
|
||||
print(("*" * 40) + " RESPONSE HEADERS")
|
||||
for (key, value) in resp.headers.iteritems():
|
||||
print(key, "=", value)
|
||||
print()
|
||||
|
||||
resp.app_iter = self.print_generator(resp.app_iter)
|
||||
|
||||
return resp
|
||||
|
||||
@staticmethod
|
||||
def print_generator(app_iter):
|
||||
"""Prints the contents of a wrapper string iterator when iterated."""
|
||||
print(("*" * 40) + " BODY")
|
||||
for part in app_iter:
|
||||
sys.stdout.write(part)
|
||||
sys.stdout.flush()
|
||||
yield part
|
||||
print()
|
@ -1,86 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Request Body limiting middleware.
|
||||
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from openstack.common.deprecated import wsgi
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common.middleware import base
|
||||
|
||||
|
||||
#default request size is 112k
|
||||
max_req_body_size = cfg.IntOpt('max_request_body_size',
|
||||
deprecated_name='osapi_max_request_body_size',
|
||||
default=114688,
|
||||
help='the maximum body size '
|
||||
'per each request(bytes)')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opt(max_req_body_size)
|
||||
|
||||
|
||||
class LimitingReader(object):
|
||||
"""Reader to limit the size of an incoming request."""
|
||||
def __init__(self, data, limit):
|
||||
"""Initiates LimitingReader object.
|
||||
|
||||
:param data: Underlying data object
|
||||
:param limit: maximum number of bytes the reader should allow
|
||||
"""
|
||||
self.data = data
|
||||
self.limit = limit
|
||||
self.bytes_read = 0
|
||||
|
||||
def __iter__(self):
|
||||
for chunk in self.data:
|
||||
self.bytes_read += len(chunk)
|
||||
if self.bytes_read > self.limit:
|
||||
msg = _("Request is too large.")
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||
else:
|
||||
yield chunk
|
||||
|
||||
def read(self, i=None):
|
||||
result = self.data.read(i)
|
||||
self.bytes_read += len(result)
|
||||
if self.bytes_read > self.limit:
|
||||
msg = _("Request is too large.")
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||
return result
|
||||
|
||||
|
||||
class RequestBodySizeLimiter(base.Middleware):
|
||||
"""Limit the size of incoming requests."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
if req.content_length > CONF.max_request_body_size:
|
||||
msg = _("Request is too large.")
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
|
||||
if req.content_length is None and req.is_body_readable:
|
||||
limiter = LimitingReader(req.body_file,
|
||||
CONF.max_request_body_size)
|
||||
req.body_file = limiter
|
||||
return self.application
|
@ -1,81 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import urlparse
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""Interpret a string as a host:port pair.
|
||||
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
[2001:db8:85a3::8a2e:370]:7334.
|
||||
|
||||
>>> parse_host_port('server01:80')
|
||||
('server01', 80)
|
||||
>>> parse_host_port('server01')
|
||||
('server01', None)
|
||||
>>> parse_host_port('server01', default_port=1234)
|
||||
('server01', 1234)
|
||||
>>> parse_host_port('[::1]:80')
|
||||
('::1', 80)
|
||||
>>> parse_host_port('[::1]')
|
||||
('::1', None)
|
||||
>>> parse_host_port('[::1]', default_port=1234)
|
||||
('::1', 1234)
|
||||
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||
|
||||
"""
|
||||
if address[0] == '[':
|
||||
# Escaped ipv6
|
||||
_host, _port = address[1:].split(']')
|
||||
host = _host
|
||||
if ':' in _port:
|
||||
port = _port.split(':')[1]
|
||||
else:
|
||||
port = default_port
|
||||
else:
|
||||
if address.count(':') == 1:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
# 0 means ipv4, >1 means ipv6.
|
||||
# We prohibit unescaped ipv6 addresses with port.
|
||||
host = address
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
||||
|
||||
|
||||
def urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||
This function papers over Python issue9374 when needed.
|
||||
|
||||
The parameters are the same as urlparse.urlsplit.
|
||||
"""
|
||||
scheme, netloc, path, query, fragment = urlparse.urlsplit(
|
||||
url, scheme, allow_fragments)
|
||||
if allow_fragments and '#' in path:
|
||||
path, fragment = path.split('#', 1)
|
||||
if '?' in path:
|
||||
path, query = path.split('?', 1)
|
||||
return urlparse.SplitResult(scheme, netloc, path, query, fragment)
|
@ -1,14 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,182 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import context
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import timeutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
help='Driver or drivers to handle sending notifications'),
|
||||
cfg.StrOpt('default_notification_level',
|
||||
default='INFO',
|
||||
help='Default notification level for outgoing notifications'),
|
||||
cfg.StrOpt('default_publisher_id',
|
||||
default='$host',
|
||||
help='Default publisher_id for outgoing notifications'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(notifier_opts)
|
||||
|
||||
WARN = 'WARN'
|
||||
INFO = 'INFO'
|
||||
ERROR = 'ERROR'
|
||||
CRITICAL = 'CRITICAL'
|
||||
DEBUG = 'DEBUG'
|
||||
|
||||
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||
|
||||
|
||||
class BadPriorityException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def notify_decorator(name, fn):
|
||||
"""Decorator for notify which is used from utils.monkey_patch().
|
||||
|
||||
:param name: name of the function
|
||||
:param function: - object of the function
|
||||
:returns: function -- decorated function
|
||||
|
||||
"""
|
||||
def wrapped_func(*args, **kwarg):
|
||||
body = {}
|
||||
body['args'] = []
|
||||
body['kwarg'] = {}
|
||||
for arg in args:
|
||||
body['args'].append(arg)
|
||||
for key in kwarg:
|
||||
body['kwarg'][key] = kwarg[key]
|
||||
|
||||
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
||||
notify(ctxt,
|
||||
CONF.default_publisher_id,
|
||||
name,
|
||||
CONF.default_notification_level,
|
||||
body)
|
||||
return fn(*args, **kwarg)
|
||||
return wrapped_func
|
||||
|
||||
|
||||
def publisher_id(service, host=None):
|
||||
if not host:
|
||||
host = CONF.host
|
||||
return "%s.%s" % (service, host)
|
||||
|
||||
|
||||
def notify(context, publisher_id, event_type, priority, payload):
|
||||
"""Sends a notification using the specified driver
|
||||
|
||||
:param publisher_id: the source worker_type.host of the message
|
||||
:param event_type: the literal type of event (ex. Instance Creation)
|
||||
:param priority: patterned after the enumeration of Python logging
|
||||
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||
:param payload: A python dictionary of attributes
|
||||
|
||||
Outgoing message format includes the above parameters, and appends the
|
||||
following:
|
||||
|
||||
message_id
|
||||
a UUID representing the id for this notification
|
||||
|
||||
timestamp
|
||||
the GMT timestamp the notification was sent at
|
||||
|
||||
The composite message will be constructed as a dictionary of the above
|
||||
attributes, which will then be sent via the transport mechanism defined
|
||||
by the driver.
|
||||
|
||||
Message example::
|
||||
|
||||
{'message_id': str(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': timeutils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
|
||||
"""
|
||||
if priority not in log_levels:
|
||||
raise BadPriorityException(
|
||||
_('%s not in valid priorities') % priority)
|
||||
|
||||
# Ensure everything is JSON serializable.
|
||||
payload = jsonutils.to_primitive(payload, convert_instances=True)
|
||||
|
||||
msg = dict(message_id=str(uuid.uuid4()),
|
||||
publisher_id=publisher_id,
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=str(timeutils.utcnow()))
|
||||
|
||||
for driver in _get_drivers():
|
||||
try:
|
||||
driver.notify(context, msg)
|
||||
except Exception as e:
|
||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
||||
"send to notification system. "
|
||||
"Payload=%(payload)s")
|
||||
% dict(e=e, payload=payload))
|
||||
|
||||
|
||||
_drivers = None
|
||||
|
||||
|
||||
def _get_drivers():
|
||||
"""Instantiate, cache, and return drivers based on the CONF."""
|
||||
global _drivers
|
||||
if _drivers is None:
|
||||
_drivers = {}
|
||||
for notification_driver in CONF.notification_driver:
|
||||
add_driver(notification_driver)
|
||||
|
||||
return _drivers.values()
|
||||
|
||||
|
||||
def add_driver(notification_driver):
|
||||
"""Add a notification driver at runtime."""
|
||||
# Make sure the driver list is initialized.
|
||||
_get_drivers()
|
||||
if isinstance(notification_driver, basestring):
|
||||
# Load and add
|
||||
try:
|
||||
driver = importutils.import_module(notification_driver)
|
||||
_drivers[notification_driver] = driver
|
||||
except ImportError:
|
||||
LOG.exception(_("Failed to load notifier %s. "
|
||||
"These notifications will not be sent.") %
|
||||
notification_driver)
|
||||
else:
|
||||
# Driver is already loaded; just add the object.
|
||||
_drivers[notification_driver] = notification_driver
|
||||
|
||||
|
||||
def _reset_drivers():
|
||||
"""Used by unit tests to reset the drivers."""
|
||||
global _drivers
|
||||
_drivers = None
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model.
|
||||
|
||||
Log notifications using openstack's default logging system.
|
||||
"""
|
||||
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
logger = logging.getLogger(
|
||||
'openstack.common.notification.%s' %
|
||||
message['event_type'])
|
||||
getattr(logger, priority)(jsonutils.dumps(message))
|
@ -1,19 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model."""
|
||||
pass
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import context as req_context
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'notification_topics', default=['notifications', ],
|
||||
help='AMQP topic used for openstack notifications')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opt(notification_topic_opt)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.notification_topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''messaging based notification driver, with message envelopes'''
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import context as req_context
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'topics', default=['notifications', ],
|
||||
help='AMQP topic(s) used for openstack notifications')
|
||||
|
||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||
title='Options for rpc_notifier2')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opt(notification_topic_opt, opt_group)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.rpc_notifier2.topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message, envelope=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
NOTIFICATIONS = []
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Test notifier, stores notifications in memory for unittests."""
|
||||
NOTIFICATIONS.append(message)
|
@ -1,164 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from paste import deploy
|
||||
|
||||
from openstack.common import local
|
||||
|
||||
|
||||
class BasePasteFactory(object):
|
||||
|
||||
"""A base class for paste app and filter factories.
|
||||
|
||||
Sub-classes must override the KEY class attribute and provide
|
||||
a __call__ method.
|
||||
"""
|
||||
|
||||
KEY = None
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def _import_factory(self, local_conf):
|
||||
"""Import an app/filter class.
|
||||
|
||||
Lookup the KEY from the PasteDeploy local conf and import the
|
||||
class named there. This class can then be used as an app or
|
||||
filter factory.
|
||||
|
||||
Note we support the <module>:<class> format.
|
||||
|
||||
Note also that if you do e.g.
|
||||
|
||||
key =
|
||||
value
|
||||
|
||||
then ConfigParser returns a value with a leading newline, so
|
||||
we strip() the value before using it.
|
||||
"""
|
||||
mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':')
|
||||
del local_conf[self.KEY]
|
||||
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
|
||||
|
||||
class AppFactory(BasePasteFactory):
|
||||
|
||||
"""A Generic paste.deploy app factory.
|
||||
|
||||
This requires openstack.app_factory to be set to a callable which returns a
|
||||
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
|
||||
|
||||
[app:myfooapp]
|
||||
paste.app_factory = openstack.common.pastedeploy:app_factory
|
||||
openstack.app_factory = myapp:Foo
|
||||
|
||||
The WSGI app constructor must accept a data object and a local config
|
||||
dict as its two arguments.
|
||||
"""
|
||||
|
||||
KEY = 'openstack.app_factory'
|
||||
|
||||
def __call__(self, global_conf, **local_conf):
|
||||
"""The actual paste.app_factory protocol method."""
|
||||
factory = self._import_factory(local_conf)
|
||||
return factory(self.data, **local_conf)
|
||||
|
||||
|
||||
class FilterFactory(AppFactory):
|
||||
|
||||
"""A Generic paste.deploy filter factory.
|
||||
|
||||
This requires openstack.filter_factory to be set to a callable which
|
||||
returns a WSGI filter when invoked. The format is <module>:<callable> e.g.
|
||||
|
||||
[filter:myfoofilter]
|
||||
paste.filter_factory = openstack.common.pastedeploy:filter_factory
|
||||
openstack.filter_factory = myfilter:Foo
|
||||
|
||||
The WSGI filter constructor must accept a WSGI app, a data object and
|
||||
a local config dict as its three arguments.
|
||||
"""
|
||||
|
||||
KEY = 'openstack.filter_factory'
|
||||
|
||||
def __call__(self, global_conf, **local_conf):
|
||||
"""The actual paste.filter_factory protocol method."""
|
||||
factory = self._import_factory(local_conf)
|
||||
|
||||
def filter(app):
|
||||
return factory(app, self.data, **local_conf)
|
||||
|
||||
return filter
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""A paste app factory used with paste_deploy_app()."""
|
||||
return local.store.app_factory(global_conf, **local_conf)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""A paste filter factory used with paste_deploy_app()."""
|
||||
return local.store.filter_factory(global_conf, **local_conf)
|
||||
|
||||
|
||||
def paste_deploy_app(paste_config_file, app_name, data):
|
||||
"""Load a WSGI app from a PasteDeploy configuration.
|
||||
|
||||
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
|
||||
ensuring that the supplied data object is passed to the app and filter
|
||||
factories defined in this module.
|
||||
|
||||
To use these factories and the data object, the configuration should look
|
||||
like this:
|
||||
|
||||
[app:myapp]
|
||||
paste.app_factory = openstack.common.pastedeploy:app_factory
|
||||
openstack.app_factory = myapp:App
|
||||
...
|
||||
[filter:myfilter]
|
||||
paste.filter_factory = openstack.common.pastedeploy:filter_factory
|
||||
openstack.filter_factory = myapp:Filter
|
||||
|
||||
and then:
|
||||
|
||||
myapp.py:
|
||||
|
||||
class App(object):
|
||||
def __init__(self, data):
|
||||
...
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, app, data):
|
||||
...
|
||||
|
||||
:param paste_config_file: a PasteDeploy config file
|
||||
:param app_name: the name of the app/pipeline to load from the file
|
||||
:param data: a data object to supply to the app and its filters
|
||||
:returns: the WSGI app
|
||||
"""
|
||||
(af, ff) = (AppFactory(data), FilterFactory(data))
|
||||
|
||||
local.store.app_factory = af
|
||||
local.store.filter_factory = ff
|
||||
try:
|
||||
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
|
||||
finally:
|
||||
del local.store.app_factory
|
||||
del local.store.filter_factory
|
@ -1,188 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import timeutils
|
||||
|
||||
|
||||
periodic_opts = [
|
||||
cfg.BoolOpt('run_external_periodic_tasks',
|
||||
default=True,
|
||||
help=('Some periodic tasks can be run in a separate process. '
|
||||
'Should we run them here?')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(periodic_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_INTERVAL = 60.0
|
||||
|
||||
|
||||
class InvalidPeriodicTaskArg(Exception):
|
||||
message = _("Unexpected argument for periodic task creation: %(arg)s.")
|
||||
|
||||
|
||||
def periodic_task(*args, **kwargs):
|
||||
"""Decorator to indicate that a method is a periodic task.
|
||||
|
||||
This decorator can be used in two ways:
|
||||
|
||||
1. Without arguments '@periodic_task', this will be run on every cycle
|
||||
of the periodic scheduler.
|
||||
|
||||
2. With arguments:
|
||||
@periodic_task(spacing=N [, run_immediately=[True|False]])
|
||||
this will be run on approximately every N seconds. If this number is
|
||||
negative the periodic task will be disabled. If the run_immediately
|
||||
argument is provided and has a value of 'True', the first run of the
|
||||
task will be shortly after task scheduler starts. If
|
||||
run_immediately is omitted or set to 'False', the first time the
|
||||
task runs will be approximately N seconds after the task scheduler
|
||||
starts.
|
||||
"""
|
||||
def decorator(f):
|
||||
# Test for old style invocation
|
||||
if 'ticks_between_runs' in kwargs:
|
||||
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
|
||||
|
||||
# Control if run at all
|
||||
f._periodic_task = True
|
||||
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
|
||||
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
|
||||
f._periodic_enabled = False
|
||||
else:
|
||||
f._periodic_enabled = kwargs.pop('enabled', True)
|
||||
|
||||
# Control frequency
|
||||
f._periodic_spacing = kwargs.pop('spacing', 0)
|
||||
f._periodic_immediate = kwargs.pop('run_immediately', False)
|
||||
if f._periodic_immediate:
|
||||
f._periodic_last_run = None
|
||||
else:
|
||||
f._periodic_last_run = timeutils.utcnow()
|
||||
return f
|
||||
|
||||
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
|
||||
# and without parens.
|
||||
#
|
||||
# In the 'with-parens' case (with kwargs present), this function needs to
|
||||
# return a decorator function since the interpreter will invoke it like:
|
||||
#
|
||||
# periodic_task(*args, **kwargs)(f)
|
||||
#
|
||||
# In the 'without-parens' case, the original function will be passed
|
||||
# in as the first argument, like:
|
||||
#
|
||||
# periodic_task(f)
|
||||
if kwargs:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(args[0])
|
||||
|
||||
|
||||
class _PeriodicTasksMeta(type):
|
||||
def __init__(cls, names, bases, dict_):
|
||||
"""Metaclass that allows us to collect decorated periodic tasks."""
|
||||
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
|
||||
|
||||
# NOTE(sirp): if the attribute is not present then we must be the base
|
||||
# class, so, go ahead an initialize it. If the attribute is present,
|
||||
# then we're a subclass so make a copy of it so we don't step on our
|
||||
# parent's toes.
|
||||
try:
|
||||
cls._periodic_tasks = cls._periodic_tasks[:]
|
||||
except AttributeError:
|
||||
cls._periodic_tasks = []
|
||||
|
||||
try:
|
||||
cls._periodic_last_run = cls._periodic_last_run.copy()
|
||||
except AttributeError:
|
||||
cls._periodic_last_run = {}
|
||||
|
||||
try:
|
||||
cls._periodic_spacing = cls._periodic_spacing.copy()
|
||||
except AttributeError:
|
||||
cls._periodic_spacing = {}
|
||||
|
||||
for value in cls.__dict__.values():
|
||||
if getattr(value, '_periodic_task', False):
|
||||
task = value
|
||||
name = task.__name__
|
||||
|
||||
if task._periodic_spacing < 0:
|
||||
LOG.info(_('Skipping periodic task %(task)s because '
|
||||
'its interval is negative'),
|
||||
{'task': name})
|
||||
continue
|
||||
if not task._periodic_enabled:
|
||||
LOG.info(_('Skipping periodic task %(task)s because '
|
||||
'it is disabled'),
|
||||
{'task': name})
|
||||
continue
|
||||
|
||||
# A periodic spacing of zero indicates that this task should
|
||||
# be run every pass
|
||||
if task._periodic_spacing == 0:
|
||||
task._periodic_spacing = None
|
||||
|
||||
cls._periodic_tasks.append((name, task))
|
||||
cls._periodic_spacing[name] = task._periodic_spacing
|
||||
cls._periodic_last_run[name] = task._periodic_last_run
|
||||
|
||||
|
||||
class PeriodicTasks(object):
|
||||
__metaclass__ = _PeriodicTasksMeta
|
||||
|
||||
def run_periodic_tasks(self, context, raise_on_error=False):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
idle_for = DEFAULT_INTERVAL
|
||||
for task_name, task in self._periodic_tasks:
|
||||
full_task_name = '.'.join([self.__class__.__name__, task_name])
|
||||
|
||||
now = timeutils.utcnow()
|
||||
spacing = self._periodic_spacing[task_name]
|
||||
last_run = self._periodic_last_run[task_name]
|
||||
|
||||
# If a periodic task is _nearly_ due, then we'll run it early
|
||||
if spacing is not None and last_run is not None:
|
||||
due = last_run + datetime.timedelta(seconds=spacing)
|
||||
if not timeutils.is_soon(due, 0.2):
|
||||
idle_for = min(idle_for, timeutils.delta_seconds(now, due))
|
||||
continue
|
||||
|
||||
if spacing is not None:
|
||||
idle_for = min(idle_for, spacing)
|
||||
|
||||
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
|
||||
self._periodic_last_run[task_name] = timeutils.utcnow()
|
||||
|
||||
try:
|
||||
task(self, context)
|
||||
except Exception as e:
|
||||
if raise_on_error:
|
||||
raise
|
||||
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
|
||||
locals())
|
||||
time.sleep(0)
|
||||
|
||||
return idle_for
|
@ -1,846 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Common Policy Engine Implementation
|
||||
|
||||
Policies can be expressed in one of two forms: A list of lists, or a
|
||||
string written in the new policy language.
|
||||
|
||||
In the list-of-lists representation, each check inside the innermost
|
||||
list is combined as with an "and" conjunction--for that check to pass,
|
||||
all the specified checks must pass. These innermost lists are then
|
||||
combined as with an "or" conjunction. This is the original way of
|
||||
expressing policies, but there now exists a new way: the policy
|
||||
language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct code to perform that check. However, conjunction
|
||||
operators are available, allowing for more expressiveness in crafting
|
||||
policies.
|
||||
|
||||
As an example, take the following rule, expressed in the list-of-lists
|
||||
representation::
|
||||
|
||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||
|
||||
In the policy language, this becomes::
|
||||
|
||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||
|
||||
The policy language also has the "not" operator, allowing a richer
|
||||
policy rule::
|
||||
|
||||
project_id:%(project_id)s and not role:dunce
|
||||
|
||||
Finally, two special policy checks should be mentioned; the policy
|
||||
check "@" will always accept an access, and the policy check "!" will
|
||||
always reject an access. (Note that if a rule is either the empty
|
||||
list ("[]") or the empty string, this is equivalent to the "@" policy
|
||||
check.) Of these, the "!" policy check is probably the most useful,
|
||||
as it allows particular rules to be explicitly disabled.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import re
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from openstack.common import fileutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import log as logging
|
||||
|
||||
policy_opts = [
|
||||
cfg.StrOpt('policy_file',
|
||||
default='policy.json',
|
||||
help=_('JSON file containing policy')),
|
||||
cfg.StrOpt('policy_default_rule',
|
||||
default='default',
|
||||
help=_('Rule enforced when requested rule is not found')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(policy_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_checks = {}
|
||||
|
||||
|
||||
class PolicyNotAuthorized(Exception):
|
||||
|
||||
def __init__(self, rule):
|
||||
msg = _("Policy doesn't allow %s to be performed.") % rule
|
||||
super(PolicyNotAuthorized, self).__init__(msg)
|
||||
|
||||
|
||||
class Rules(dict):
|
||||
"""A store for rules. Handles the default_rule setting directly."""
|
||||
|
||||
@classmethod
|
||||
def load_json(cls, data, default_rule=None):
|
||||
"""Allow loading of JSON rule data."""
|
||||
|
||||
# Suck in the JSON data and parse the rules
|
||||
rules = dict((k, parse_rule(v)) for k, v in
|
||||
jsonutils.loads(data).items())
|
||||
|
||||
return cls(rules, default_rule)
|
||||
|
||||
def __init__(self, rules=None, default_rule=None):
|
||||
"""Initialize the Rules store."""
|
||||
|
||||
super(Rules, self).__init__(rules or {})
|
||||
self.default_rule = default_rule
|
||||
|
||||
def __missing__(self, key):
|
||||
"""Implements the default rule handling."""
|
||||
|
||||
# If the default rule isn't actually defined, do something
|
||||
# reasonably intelligent
|
||||
if not self.default_rule or self.default_rule not in self:
|
||||
raise KeyError(key)
|
||||
|
||||
return self[self.default_rule]
|
||||
|
||||
def __str__(self):
|
||||
"""Dumps a string representation of the rules."""
|
||||
|
||||
# Start by building the canonical strings for the rules
|
||||
out_rules = {}
|
||||
for key, value in self.items():
|
||||
# Use empty string for singleton TrueCheck instances
|
||||
if isinstance(value, TrueCheck):
|
||||
out_rules[key] = ''
|
||||
else:
|
||||
out_rules[key] = str(value)
|
||||
|
||||
# Dump a pretty-printed JSON representation
|
||||
return jsonutils.dumps(out_rules, indent=4)
|
||||
|
||||
|
||||
class Enforcer(object):
|
||||
"""Responsible for loading and enforcing rules.
|
||||
|
||||
:param policy_file: Custom policy file to use, if none is
|
||||
specified, `CONF.policy_file` will be
|
||||
used.
|
||||
:param rules: Default dictionary / Rules to use. It will be
|
||||
considered just in the first instantiation. If
|
||||
`load_rules(True)`, `clear()` or `set_rules(True)`
|
||||
is called this will be overwritten.
|
||||
:param default_rule: Default rule to use, CONF.default_rule will
|
||||
be used if none is specified.
|
||||
"""
|
||||
|
||||
def __init__(self, policy_file=None, rules=None, default_rule=None):
|
||||
self.rules = Rules(rules)
|
||||
self.default_rule = default_rule or CONF.policy_default_rule
|
||||
|
||||
self.policy_path = None
|
||||
self.policy_file = policy_file or CONF.policy_file
|
||||
|
||||
def set_rules(self, rules, overwrite=True):
|
||||
"""Create a new Rules object based on the provided dict of rules.
|
||||
|
||||
:param rules: New rules to use. It should be an instance of dict.
|
||||
:param overwrite: Whether to overwrite current rules or update them
|
||||
with the new rules.
|
||||
"""
|
||||
|
||||
if not isinstance(rules, dict):
|
||||
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||
"got %s instead") % type(rules))
|
||||
|
||||
if overwrite:
|
||||
self.rules = Rules(rules)
|
||||
else:
|
||||
self.update(rules)
|
||||
|
||||
def clear(self):
|
||||
"""Clears Enforcer rules, policy's cache and policy's path."""
|
||||
self.set_rules({})
|
||||
self.policy_path = None
|
||||
|
||||
def load_rules(self, force_reload=False):
|
||||
"""Loads policy_path's rules.
|
||||
|
||||
Policy file is cached and will be reloaded if modified.
|
||||
|
||||
:param force_reload: Whether to overwrite current rules.
|
||||
"""
|
||||
|
||||
if not self.policy_path:
|
||||
self.policy_path = self._get_policy_path()
|
||||
|
||||
reloaded, data = fileutils.read_cached_file(self.policy_path,
|
||||
force_reload=force_reload)
|
||||
|
||||
if reloaded:
|
||||
rules = Rules.load_json(data, self.default_rule)
|
||||
self.set_rules(rules)
|
||||
LOG.debug(_("Rules successfully reloaded"))
|
||||
|
||||
def _get_policy_path(self):
|
||||
"""Locate the policy json data file.
|
||||
|
||||
:param policy_file: Custom policy file to locate.
|
||||
|
||||
:returns: The policy path
|
||||
|
||||
:raises: ConfigFilesNotFoundError if the file couldn't
|
||||
be located.
|
||||
"""
|
||||
policy_file = CONF.find_file(self.policy_file)
|
||||
|
||||
if policy_file:
|
||||
return policy_file
|
||||
|
||||
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
|
||||
|
||||
def enforce(self, rule, target, creds, do_raise=False,
|
||||
exc=None, *args, **kwargs):
|
||||
"""Checks authorization of a rule against the target and credentials.
|
||||
|
||||
:param rule: A string or BaseCheck instance specifying the rule
|
||||
to evaluate.
|
||||
:param target: As much information about the object being operated
|
||||
on as possible, as a dictionary.
|
||||
:param creds: As much information about the user performing the
|
||||
action as possible, as a dictionary.
|
||||
:param do_raise: Whether to raise an exception or not if check
|
||||
fails.
|
||||
:param exc: Class of the exception to raise if the check fails.
|
||||
Any remaining arguments passed to check() (both
|
||||
positional and keyword arguments) will be passed to
|
||||
the exception class. If not specified, PolicyNotAuthorized
|
||||
will be used.
|
||||
|
||||
:return: Returns False if the policy does not allow the action and
|
||||
exc is not provided; otherwise, returns a value that
|
||||
evaluates to True. Note: for rules using the "case"
|
||||
expression, this True value will be the specified string
|
||||
from the expression.
|
||||
"""
|
||||
|
||||
# NOTE(flaper87): Not logging target or creds to avoid
|
||||
# potential security issues.
|
||||
LOG.debug(_("Rule %s will be now enforced") % rule)
|
||||
|
||||
self.load_rules()
|
||||
|
||||
# Allow the rule to be a Check tree
|
||||
if isinstance(rule, BaseCheck):
|
||||
result = rule(target, creds, self)
|
||||
elif not self.rules:
|
||||
# No rules to reference means we're going to fail closed
|
||||
result = False
|
||||
else:
|
||||
try:
|
||||
# Evaluate the rule
|
||||
result = self.rules[rule](target, creds, self)
|
||||
except KeyError:
|
||||
LOG.debug(_("Rule [%s] doesn't exist") % rule)
|
||||
# If the rule doesn't exist, fail closed
|
||||
result = False
|
||||
|
||||
# If it is False, raise the exception if requested
|
||||
if do_raise and not result:
|
||||
if exc:
|
||||
raise exc(*args, **kwargs)
|
||||
|
||||
raise PolicyNotAuthorized(rule)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class BaseCheck(object):
|
||||
"""Abstract base class for Check classes."""
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def __str__(self):
|
||||
"""String representation of the Check tree rooted at this node."""
|
||||
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def __call__(self, target, cred):
|
||||
"""Triggers if instance of the class is called.
|
||||
|
||||
Performs the check. Returns False to reject the access or a
|
||||
true value (not necessary True) to accept the access.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FalseCheck(BaseCheck):
|
||||
"""A policy check that always returns False (disallow)."""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "!"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class TrueCheck(BaseCheck):
|
||||
"""A policy check that always returns True (allow)."""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "@"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Check(BaseCheck):
|
||||
"""A base class to allow for user-defined policy checks."""
|
||||
|
||||
def __init__(self, kind, match):
|
||||
"""Initiates Check instance.
|
||||
|
||||
:param kind: The kind of the check, i.e., the field before the
|
||||
':'.
|
||||
:param match: The match of the check, i.e., the field after
|
||||
the ':'.
|
||||
"""
|
||||
|
||||
self.kind = kind
|
||||
self.match = match
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "%s:%s" % (self.kind, self.match)
|
||||
|
||||
|
||||
class NotCheck(BaseCheck):
|
||||
"""Implements the "not" logical operator.
|
||||
|
||||
A policy check that inverts the result of another policy check.
|
||||
"""
|
||||
|
||||
def __init__(self, rule):
|
||||
"""Initialize the 'not' check.
|
||||
|
||||
:param rule: The rule to negate. Must be a Check.
|
||||
"""
|
||||
|
||||
self.rule = rule
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "not %s" % self.rule
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Returns the logical inverse of the wrapped check.
|
||||
"""
|
||||
|
||||
return not self.rule(target, cred)
|
||||
|
||||
|
||||
class AndCheck(BaseCheck):
|
||||
"""Implements the "and" logical operator.
|
||||
|
||||
A policy check that requires that a list of other checks all return True.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""Initialize the 'and' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Requires that all rules accept in order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if not rule(target, cred):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def add_check(self, rule):
|
||||
"""Adds rule to be tested.
|
||||
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the AndCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
class OrCheck(BaseCheck):
|
||||
"""Implements the "or" operator.
|
||||
|
||||
A policy check that requires that at least one of a list of other
|
||||
checks returns True.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""Initialize the 'or' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Requires that at least one rule accept in order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if rule(target, cred):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def add_check(self, rule):
|
||||
"""Adds rule to be tested.
|
||||
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the OrCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
def _parse_check(rule):
|
||||
"""Parse a single base check rule into an appropriate Check object."""
|
||||
|
||||
# Handle the special checks
|
||||
if rule == '!':
|
||||
return FalseCheck()
|
||||
elif rule == '@':
|
||||
return TrueCheck()
|
||||
|
||||
try:
|
||||
kind, match = rule.split(':', 1)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to understand rule %s") % rule)
|
||||
# If the rule is invalid, we'll fail closed
|
||||
return FalseCheck()
|
||||
|
||||
# Find what implements the check
|
||||
if kind in _checks:
|
||||
return _checks[kind](kind, match)
|
||||
elif None in _checks:
|
||||
return _checks[None](kind, match)
|
||||
else:
|
||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def _parse_list_rule(rule):
|
||||
"""Translates the old list-of-lists syntax into a tree of Check objects.
|
||||
|
||||
Provided for backwards compatibility.
|
||||
"""
|
||||
|
||||
# Empty rule defaults to True
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Outer list is joined by "or"; inner list by "and"
|
||||
or_list = []
|
||||
for inner_rule in rule:
|
||||
# Elide empty inner lists
|
||||
if not inner_rule:
|
||||
continue
|
||||
|
||||
# Handle bare strings
|
||||
if isinstance(inner_rule, basestring):
|
||||
inner_rule = [inner_rule]
|
||||
|
||||
# Parse the inner rules into Check objects
|
||||
and_list = [_parse_check(r) for r in inner_rule]
|
||||
|
||||
# Append the appropriate check to the or_list
|
||||
if len(and_list) == 1:
|
||||
or_list.append(and_list[0])
|
||||
else:
|
||||
or_list.append(AndCheck(and_list))
|
||||
|
||||
# If we have only one check, omit the "or"
|
||||
if not or_list:
|
||||
return FalseCheck()
|
||||
elif len(or_list) == 1:
|
||||
return or_list[0]
|
||||
|
||||
return OrCheck(or_list)
|
||||
|
||||
|
||||
# Used for tokenizing the policy language
|
||||
_tokenize_re = re.compile(r'\s+')
|
||||
|
||||
|
||||
def _parse_tokenize(rule):
|
||||
"""Tokenizer for the policy language.
|
||||
|
||||
Most of the single-character tokens are specified in the
|
||||
_tokenize_re; however, parentheses need to be handled specially,
|
||||
because they can appear inside a check string. Thankfully, those
|
||||
parentheses that appear inside a check string can never occur at
|
||||
the very beginning or end ("%(variable)s" is the correct syntax).
|
||||
"""
|
||||
|
||||
for tok in _tokenize_re.split(rule):
|
||||
# Skip empty tokens
|
||||
if not tok or tok.isspace():
|
||||
continue
|
||||
|
||||
# Handle leading parens on the token
|
||||
clean = tok.lstrip('(')
|
||||
for i in range(len(tok) - len(clean)):
|
||||
yield '(', '('
|
||||
|
||||
# If it was only parentheses, continue
|
||||
if not clean:
|
||||
continue
|
||||
else:
|
||||
tok = clean
|
||||
|
||||
# Handle trailing parens on the token
|
||||
clean = tok.rstrip(')')
|
||||
trail = len(tok) - len(clean)
|
||||
|
||||
# Yield the cleaned token
|
||||
lowered = clean.lower()
|
||||
if lowered in ('and', 'or', 'not'):
|
||||
# Special tokens
|
||||
yield lowered, clean
|
||||
elif clean:
|
||||
# Not a special token, but not composed solely of ')'
|
||||
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
||||
[('"', '"'), ("'", "'")]):
|
||||
# It's a quoted string
|
||||
yield 'string', tok[1:-1]
|
||||
else:
|
||||
yield 'check', _parse_check(clean)
|
||||
|
||||
# Yield the trailing parens
|
||||
for i in range(trail):
|
||||
yield ')', ')'
|
||||
|
||||
|
||||
class ParseStateMeta(type):
|
||||
"""Metaclass for the ParseState class.
|
||||
|
||||
Facilitates identifying reduction methods.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, cls_dict):
|
||||
"""Create the class.
|
||||
|
||||
Injects the 'reducers' list, a list of tuples matching token sequences
|
||||
to the names of the corresponding reduction methods.
|
||||
"""
|
||||
|
||||
reducers = []
|
||||
|
||||
for key, value in cls_dict.items():
|
||||
if not hasattr(value, 'reducers'):
|
||||
continue
|
||||
for reduction in value.reducers:
|
||||
reducers.append((reduction, key))
|
||||
|
||||
cls_dict['reducers'] = reducers
|
||||
|
||||
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
||||
|
||||
|
||||
def reducer(*tokens):
|
||||
"""Decorator for reduction methods.
|
||||
|
||||
Arguments are a sequence of tokens, in order, which should trigger running
|
||||
this reduction method.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
# Make sure we have a list of reducer sequences
|
||||
if not hasattr(func, 'reducers'):
|
||||
func.reducers = []
|
||||
|
||||
# Add the tokens to the list of reducer sequences
|
||||
func.reducers.append(list(tokens))
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ParseState(object):
|
||||
"""Implement the core of parsing the policy language.
|
||||
|
||||
Uses a greedy reduction algorithm to reduce a sequence of tokens into
|
||||
a single terminal, the value of which will be the root of the Check tree.
|
||||
|
||||
Note: error reporting is rather lacking. The best we can get with
|
||||
this parser formulation is an overall "parse failed" error.
|
||||
Fortunately, the policy language is simple enough that this
|
||||
shouldn't be that big a problem.
|
||||
"""
|
||||
|
||||
__metaclass__ = ParseStateMeta
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the ParseState."""
|
||||
|
||||
self.tokens = []
|
||||
self.values = []
|
||||
|
||||
def reduce(self):
|
||||
"""Perform a greedy reduction of the token stream.
|
||||
|
||||
If a reducer method matches, it will be executed, then the
|
||||
reduce() method will be called recursively to search for any more
|
||||
possible reductions.
|
||||
"""
|
||||
|
||||
for reduction, methname in self.reducers:
|
||||
if (len(self.tokens) >= len(reduction) and
|
||||
self.tokens[-len(reduction):] == reduction):
|
||||
# Get the reduction method
|
||||
meth = getattr(self, methname)
|
||||
|
||||
# Reduce the token stream
|
||||
results = meth(*self.values[-len(reduction):])
|
||||
|
||||
# Update the tokens and values
|
||||
self.tokens[-len(reduction):] = [r[0] for r in results]
|
||||
self.values[-len(reduction):] = [r[1] for r in results]
|
||||
|
||||
# Check for any more reductions
|
||||
return self.reduce()
|
||||
|
||||
def shift(self, tok, value):
|
||||
"""Adds one more token to the state. Calls reduce()."""
|
||||
|
||||
self.tokens.append(tok)
|
||||
self.values.append(value)
|
||||
|
||||
# Do a greedy reduce...
|
||||
self.reduce()
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
"""Obtain the final result of the parse.
|
||||
|
||||
Raises ValueError if the parse failed to reduce to a single result.
|
||||
"""
|
||||
|
||||
if len(self.values) != 1:
|
||||
raise ValueError("Could not parse rule")
|
||||
return self.values[0]
|
||||
|
||||
@reducer('(', 'check', ')')
|
||||
@reducer('(', 'and_expr', ')')
|
||||
@reducer('(', 'or_expr', ')')
|
||||
def _wrap_check(self, _p1, check, _p2):
|
||||
"""Turn parenthesized expressions into a 'check' token."""
|
||||
|
||||
return [('check', check)]
|
||||
|
||||
@reducer('check', 'and', 'check')
|
||||
def _make_and_expr(self, check1, _and, check2):
|
||||
"""Create an 'and_expr'.
|
||||
|
||||
Join two checks by the 'and' operator.
|
||||
"""
|
||||
|
||||
return [('and_expr', AndCheck([check1, check2]))]
|
||||
|
||||
@reducer('and_expr', 'and', 'check')
|
||||
def _extend_and_expr(self, and_expr, _and, check):
|
||||
"""Extend an 'and_expr' by adding one more check."""
|
||||
|
||||
return [('and_expr', and_expr.add_check(check))]
|
||||
|
||||
@reducer('check', 'or', 'check')
|
||||
def _make_or_expr(self, check1, _or, check2):
|
||||
"""Create an 'or_expr'.
|
||||
|
||||
Join two checks by the 'or' operator.
|
||||
"""
|
||||
|
||||
return [('or_expr', OrCheck([check1, check2]))]
|
||||
|
||||
@reducer('or_expr', 'or', 'check')
|
||||
def _extend_or_expr(self, or_expr, _or, check):
|
||||
"""Extend an 'or_expr' by adding one more check."""
|
||||
|
||||
return [('or_expr', or_expr.add_check(check))]
|
||||
|
||||
@reducer('not', 'check')
|
||||
def _make_not_expr(self, _not, check):
|
||||
"""Invert the result of another check."""
|
||||
|
||||
return [('check', NotCheck(check))]
|
||||
|
||||
|
||||
def _parse_text_rule(rule):
|
||||
"""Parses policy to the tree.
|
||||
|
||||
Translates a policy written in the policy language into a tree of
|
||||
Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule means always accept
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Parse the token stream
|
||||
state = ParseState()
|
||||
for tok, value in _parse_tokenize(rule):
|
||||
state.shift(tok, value)
|
||||
|
||||
try:
|
||||
return state.result
|
||||
except ValueError:
|
||||
# Couldn't parse the rule
|
||||
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
|
||||
|
||||
# Fail closed
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def parse_rule(rule):
|
||||
"""Parses a policy rule into a tree of Check objects."""
|
||||
|
||||
# If the rule is a string, it's in the policy language
|
||||
if isinstance(rule, basestring):
|
||||
return _parse_text_rule(rule)
|
||||
return _parse_list_rule(rule)
|
||||
|
||||
|
||||
def register(name, func=None):
|
||||
"""Register a function or Check class as a policy check.
|
||||
|
||||
:param name: Gives the name of the check type, e.g., 'rule',
|
||||
'role', etc. If name is None, a default check type
|
||||
will be registered.
|
||||
:param func: If given, provides the function or class to register.
|
||||
If not given, returns a function taking one argument
|
||||
to specify the function or class to register,
|
||||
allowing use as a decorator.
|
||||
"""
|
||||
|
||||
# Perform the actual decoration by registering the function or
|
||||
# class. Returns the function or class for compliance with the
|
||||
# decorator interface.
|
||||
def decorator(func):
|
||||
_checks[name] = func
|
||||
return func
|
||||
|
||||
# If the function or class is given, do the registration
|
||||
if func:
|
||||
return decorator(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@register("rule")
|
||||
class RuleCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Recursively checks credentials based on the defined rules."""
|
||||
|
||||
try:
|
||||
return enforcer.rules[self.match](target, creds, enforcer)
|
||||
except KeyError:
|
||||
# We don't have any matching rule; fail closed
|
||||
return False
|
||||
|
||||
|
||||
@register("role")
|
||||
class RoleCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check that there is a matching role in the cred dict."""
|
||||
|
||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||
|
||||
|
||||
@register('http')
|
||||
class HttpCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check http: rules by calling to a remote server.
|
||||
|
||||
This example implementation simply verifies that the response
|
||||
is exactly 'True'.
|
||||
"""
|
||||
|
||||
url = ('http:' + self.match) % target
|
||||
data = {'target': jsonutils.dumps(target),
|
||||
'credentials': jsonutils.dumps(creds)}
|
||||
post_data = urllib.urlencode(data)
|
||||
f = urllib2.urlopen(url, post_data)
|
||||
return f.read() == "True"
|
||||
|
||||
|
||||
@register(None)
|
||||
class GenericCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check an individual match.
|
||||
|
||||
Matches look like:
|
||||
|
||||
tenant:%(tenant_id)s
|
||||
role:compute:admin
|
||||
"""
|
||||
|
||||
# TODO(termie): do dict inspection via dot syntax
|
||||
match = self.match % target
|
||||
if self.kind in creds:
|
||||
return match == six.text_type(creds[self.kind])
|
||||
return False
|
@ -1,246 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import os
|
||||
import random
|
||||
import shlex
|
||||
import signal
|
||||
|
||||
from eventlet.green import subprocess
|
||||
from eventlet import greenthread
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidArgumentError(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(InvalidArgumentError, self).__init__(message)
|
||||
|
||||
|
||||
class UnknownArgumentError(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(UnknownArgumentError, self).__init__(message)
|
||||
|
||||
|
||||
class ProcessExecutionError(Exception):
|
||||
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
||||
description=None):
|
||||
self.exit_code = exit_code
|
||||
self.stderr = stderr
|
||||
self.stdout = stdout
|
||||
self.cmd = cmd
|
||||
self.description = description
|
||||
|
||||
if description is None:
|
||||
description = "Unexpected error while running command."
|
||||
if exit_code is None:
|
||||
exit_code = '-'
|
||||
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
|
||||
% (description, cmd, exit_code, stdout, stderr))
|
||||
super(ProcessExecutionError, self).__init__(message)
|
||||
|
||||
|
||||
class NoRootWrapSpecified(Exception):
|
||||
def __init__(self, message=None):
|
||||
super(NoRootWrapSpecified, self).__init__(message)
|
||||
|
||||
|
||||
def _subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
# non-Python subprocesses expect.
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
|
||||
def execute(*cmd, **kwargs):
|
||||
"""Helper method to shell out and execute a command through subprocess.
|
||||
|
||||
Allows optional retry.
|
||||
|
||||
:param cmd: Passed to subprocess.Popen.
|
||||
:type cmd: string
|
||||
:param process_input: Send to opened process.
|
||||
:type proces_input: string
|
||||
:param check_exit_code: Single bool, int, or list of allowed exit
|
||||
codes. Defaults to [0]. Raise
|
||||
:class:`ProcessExecutionError` unless
|
||||
program exits with one of these code.
|
||||
:type check_exit_code: boolean, int, or [int]
|
||||
:param delay_on_retry: True | False. Defaults to True. If set to True,
|
||||
wait a short amount of time before retrying.
|
||||
:type delay_on_retry: boolean
|
||||
:param attempts: How many times to retry cmd.
|
||||
:type attempts: int
|
||||
:param run_as_root: True | False. Defaults to False. If set to True,
|
||||
the command is prefixed by the command specified
|
||||
in the root_helper kwarg.
|
||||
:type run_as_root: boolean
|
||||
:param root_helper: command to prefix to commands called with
|
||||
run_as_root=True
|
||||
:type root_helper: string
|
||||
:param shell: whether or not there should be a shell used to
|
||||
execute this command. Defaults to false.
|
||||
:type shell: boolean
|
||||
:returns: (stdout, stderr) from process execution
|
||||
:raises: :class:`UnknownArgumentError` on
|
||||
receiving unknown arguments
|
||||
:raises: :class:`ProcessExecutionError`
|
||||
"""
|
||||
|
||||
process_input = kwargs.pop('process_input', None)
|
||||
check_exit_code = kwargs.pop('check_exit_code', [0])
|
||||
ignore_exit_code = False
|
||||
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
||||
attempts = kwargs.pop('attempts', 1)
|
||||
run_as_root = kwargs.pop('run_as_root', False)
|
||||
root_helper = kwargs.pop('root_helper', '')
|
||||
shell = kwargs.pop('shell', False)
|
||||
|
||||
if isinstance(check_exit_code, bool):
|
||||
ignore_exit_code = not check_exit_code
|
||||
check_exit_code = [0]
|
||||
elif isinstance(check_exit_code, int):
|
||||
check_exit_code = [check_exit_code]
|
||||
|
||||
if kwargs:
|
||||
raise UnknownArgumentError(_('Got unknown keyword args '
|
||||
'to utils.execute: %r') % kwargs)
|
||||
|
||||
if run_as_root and os.geteuid() != 0:
|
||||
if not root_helper:
|
||||
raise NoRootWrapSpecified(
|
||||
message=('Command requested root, but did not specify a root '
|
||||
'helper.'))
|
||||
cmd = shlex.split(root_helper) + list(cmd)
|
||||
|
||||
cmd = map(str, cmd)
|
||||
|
||||
while attempts > 0:
|
||||
attempts -= 1
|
||||
try:
|
||||
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
|
||||
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||
|
||||
if os.name == 'nt':
|
||||
preexec_fn = None
|
||||
close_fds = False
|
||||
else:
|
||||
preexec_fn = _subprocess_setup
|
||||
close_fds = True
|
||||
|
||||
obj = subprocess.Popen(cmd,
|
||||
stdin=_PIPE,
|
||||
stdout=_PIPE,
|
||||
stderr=_PIPE,
|
||||
close_fds=close_fds,
|
||||
preexec_fn=preexec_fn,
|
||||
shell=shell)
|
||||
result = None
|
||||
if process_input is not None:
|
||||
result = obj.communicate(process_input)
|
||||
else:
|
||||
result = obj.communicate()
|
||||
obj.stdin.close() # pylint: disable=E1101
|
||||
_returncode = obj.returncode # pylint: disable=E1101
|
||||
if _returncode:
|
||||
LOG.debug(_('Result was %s') % _returncode)
|
||||
if not ignore_exit_code and _returncode not in check_exit_code:
|
||||
(stdout, stderr) = result
|
||||
raise ProcessExecutionError(exit_code=_returncode,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=' '.join(cmd))
|
||||
return result
|
||||
except ProcessExecutionError:
|
||||
if not attempts:
|
||||
raise
|
||||
else:
|
||||
LOG.debug(_('%r failed. Retrying.'), cmd)
|
||||
if delay_on_retry:
|
||||
greenthread.sleep(random.randint(20, 200) / 100.0)
|
||||
finally:
|
||||
# NOTE(termie): this appears to be necessary to let the subprocess
|
||||
# call clean something up in between calls, without
|
||||
# it two execute calls in a row hangs the second one
|
||||
greenthread.sleep(0)
|
||||
|
||||
|
||||
def trycmd(*args, **kwargs):
|
||||
"""A wrapper around execute() to more easily handle warnings and errors.
|
||||
|
||||
Returns an (out, err) tuple of strings containing the output of
|
||||
the command's stdout and stderr. If 'err' is not empty then the
|
||||
command can be considered to have failed.
|
||||
|
||||
:discard_warnings True | False. Defaults to False. If set to True,
|
||||
then for succeeding commands, stderr is cleared
|
||||
|
||||
"""
|
||||
discard_warnings = kwargs.pop('discard_warnings', False)
|
||||
|
||||
try:
|
||||
out, err = execute(*args, **kwargs)
|
||||
failed = False
|
||||
except ProcessExecutionError as exn:
|
||||
out, err = '', str(exn)
|
||||
failed = True
|
||||
|
||||
if not failed and discard_warnings and err:
|
||||
# Handle commands that output to stderr but otherwise succeed
|
||||
err = ''
|
||||
|
||||
return out, err
|
||||
|
||||
|
||||
def ssh_execute(ssh, cmd, process_input=None,
|
||||
addl_env=None, check_exit_code=True):
|
||||
LOG.debug(_('Running cmd (SSH): %s'), cmd)
|
||||
if addl_env:
|
||||
raise InvalidArgumentError(_('Environment not supported over SSH'))
|
||||
|
||||
if process_input:
|
||||
# This is (probably) fixable if we need it...
|
||||
raise InvalidArgumentError(_('process_input not supported over SSH'))
|
||||
|
||||
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
|
||||
channel = stdout_stream.channel
|
||||
|
||||
# NOTE(justinsb): This seems suspicious...
|
||||
# ...other SSH clients have buffering issues with this approach
|
||||
stdout = stdout_stream.read()
|
||||
stderr = stderr_stream.read()
|
||||
stdin_stream.close()
|
||||
|
||||
exit_status = channel.recv_exit_status()
|
||||
|
||||
# exit_status == -1 if no exit code was returned
|
||||
if exit_status != -1:
|
||||
LOG.debug(_('Result was %s') % exit_status)
|
||||
if check_exit_code and exit_status != 0:
|
||||
raise ProcessExecutionError(exit_code=exit_status,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=cmd)
|
||||
|
||||
return (stdout, stderr)
|
@ -1,16 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,130 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Root wrapper for OpenStack services
|
||||
|
||||
Filters which commands a service is allowed to run as another user.
|
||||
|
||||
To use this with oslo, you should set the following in
|
||||
oslo.conf:
|
||||
rootwrap_config=/etc/oslo/rootwrap.conf
|
||||
|
||||
You also need to let the oslo user run oslo-rootwrap
|
||||
as root in sudoers:
|
||||
oslo ALL = (root) NOPASSWD: /usr/bin/oslo-rootwrap
|
||||
/etc/oslo/rootwrap.conf *
|
||||
|
||||
Service packaging should deploy .filters files only on nodes where
|
||||
they are needed, to avoid allowing more than is necessary.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import ConfigParser
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
RC_UNAUTHORIZED = 99
|
||||
RC_NOCOMMAND = 98
|
||||
RC_BADCONFIG = 97
|
||||
RC_NOEXECFOUND = 96
|
||||
|
||||
|
||||
def _subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
# non-Python subprocesses expect.
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
|
||||
def _exit_error(execname, message, errorcode, log=True):
|
||||
print("%s: %s" % (execname, message))
|
||||
if log:
|
||||
logging.error(message)
|
||||
sys.exit(errorcode)
|
||||
|
||||
|
||||
def main():
|
||||
# Split arguments, require at least a command
|
||||
execname = sys.argv.pop(0)
|
||||
if len(sys.argv) < 2:
|
||||
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
|
||||
|
||||
configfile = sys.argv.pop(0)
|
||||
userargs = sys.argv[:]
|
||||
|
||||
# Add ../ to sys.path to allow running from branch
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
|
||||
os.pardir, os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "oslo", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from openstack.common.rootwrap import wrapper
|
||||
|
||||
# Load configuration
|
||||
try:
|
||||
rawconfig = ConfigParser.RawConfigParser()
|
||||
rawconfig.read(configfile)
|
||||
config = wrapper.RootwrapConfig(rawconfig)
|
||||
except ValueError as exc:
|
||||
msg = "Incorrect value in %s: %s" % (configfile, exc.message)
|
||||
_exit_error(execname, msg, RC_BADCONFIG, log=False)
|
||||
except ConfigParser.Error:
|
||||
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
|
||||
RC_BADCONFIG, log=False)
|
||||
|
||||
if config.use_syslog:
|
||||
wrapper.setup_syslog(execname,
|
||||
config.syslog_log_facility,
|
||||
config.syslog_log_level)
|
||||
|
||||
# Execute command if it matches any of the loaded filters
|
||||
filters = wrapper.load_filters(config.filters_path)
|
||||
try:
|
||||
filtermatch = wrapper.match_filter(filters, userargs,
|
||||
exec_dirs=config.exec_dirs)
|
||||
if filtermatch:
|
||||
command = filtermatch.get_command(userargs,
|
||||
exec_dirs=config.exec_dirs)
|
||||
if config.use_syslog:
|
||||
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
|
||||
os.getlogin(), pwd.getpwuid(os.getuid())[0],
|
||||
command, filtermatch.name))
|
||||
|
||||
obj = subprocess.Popen(command,
|
||||
stdin=sys.stdin,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
preexec_fn=_subprocess_setup,
|
||||
env=filtermatch.get_environment(userargs))
|
||||
obj.wait()
|
||||
sys.exit(obj.returncode)
|
||||
|
||||
except wrapper.FilterMatchNotExecutable as exc:
|
||||
msg = ("Executable not found: %s (filter match = %s)"
|
||||
% (exc.match.exec_path, exc.match.name))
|
||||
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
|
||||
|
||||
except wrapper.NoFilterMatched:
|
||||
msg = ("Unauthorized command: %s (no filter matched)"
|
||||
% ' '.join(userargs))
|
||||
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
|
@ -1,350 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
class CommandFilter(object):
|
||||
"""Command filter only checking that the 1st argument matches exec_path."""
|
||||
|
||||
def __init__(self, exec_path, run_as, *args):
|
||||
self.name = ''
|
||||
self.exec_path = exec_path
|
||||
self.run_as = run_as
|
||||
self.args = args
|
||||
self.real_exec = None
|
||||
|
||||
def get_exec(self, exec_dirs=[]):
|
||||
"""Returns existing executable, or empty string if none found."""
|
||||
if self.real_exec is not None:
|
||||
return self.real_exec
|
||||
self.real_exec = ""
|
||||
if os.path.isabs(self.exec_path):
|
||||
if os.access(self.exec_path, os.X_OK):
|
||||
self.real_exec = self.exec_path
|
||||
else:
|
||||
for binary_path in exec_dirs:
|
||||
expanded_path = os.path.join(binary_path, self.exec_path)
|
||||
if os.access(expanded_path, os.X_OK):
|
||||
self.real_exec = expanded_path
|
||||
break
|
||||
return self.real_exec
|
||||
|
||||
def match(self, userargs):
|
||||
"""Only check that the first argument (command) matches exec_path."""
|
||||
return os.path.basename(self.exec_path) == userargs[0]
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
"""Returns command to execute (with sudo -u if run_as != root)."""
|
||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||
if (self.run_as != 'root'):
|
||||
# Used to run commands at lesser privileges
|
||||
return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
|
||||
return [to_exec] + userargs[1:]
|
||||
|
||||
def get_environment(self, userargs):
|
||||
"""Returns specific environment to set, None if none."""
|
||||
return None
|
||||
|
||||
|
||||
class RegExpFilter(CommandFilter):
|
||||
"""Command filter doing regexp matching for every argument."""
|
||||
|
||||
def match(self, userargs):
|
||||
# Early skip if command or number of args don't match
|
||||
if (len(self.args) != len(userargs)):
|
||||
# DENY: argument numbers don't match
|
||||
return False
|
||||
# Compare each arg (anchoring pattern explicitly at end of string)
|
||||
for (pattern, arg) in zip(self.args, userargs):
|
||||
try:
|
||||
if not re.match(pattern + '$', arg):
|
||||
break
|
||||
except re.error:
|
||||
# DENY: Badly-formed filter
|
||||
return False
|
||||
else:
|
||||
# ALLOW: All arguments matched
|
||||
return True
|
||||
|
||||
# DENY: Some arguments did not match
|
||||
return False
|
||||
|
||||
|
||||
class PathFilter(CommandFilter):
|
||||
"""Command filter checking that path arguments are within given dirs
|
||||
|
||||
One can specify the following constraints for command arguments:
|
||||
1) pass - pass an argument as is to the resulting command
|
||||
2) some_str - check if an argument is equal to the given string
|
||||
3) abs path - check if a path argument is within the given base dir
|
||||
|
||||
A typical rootwrapper filter entry looks like this:
|
||||
# cmdname: filter name, raw command, user, arg_i_constraint [, ...]
|
||||
chown: PathFilter, /bin/chown, root, nova, /var/lib/images
|
||||
|
||||
"""
|
||||
|
||||
def match(self, userargs):
|
||||
command, arguments = userargs[0], userargs[1:]
|
||||
|
||||
equal_args_num = len(self.args) == len(arguments)
|
||||
exec_is_valid = super(PathFilter, self).match(userargs)
|
||||
args_equal_or_pass = all(
|
||||
arg == 'pass' or arg == value
|
||||
for arg, value in zip(self.args, arguments)
|
||||
if not os.path.isabs(arg) # arguments not specifying abs paths
|
||||
)
|
||||
paths_are_within_base_dirs = all(
|
||||
os.path.commonprefix([arg, os.path.realpath(value)]) == arg
|
||||
for arg, value in zip(self.args, arguments)
|
||||
if os.path.isabs(arg) # arguments specifying abs paths
|
||||
)
|
||||
|
||||
return (equal_args_num and
|
||||
exec_is_valid and
|
||||
args_equal_or_pass and
|
||||
paths_are_within_base_dirs)
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
command, arguments = userargs[0], userargs[1:]
|
||||
|
||||
# convert path values to canonical ones; copy other args as is
|
||||
args = [os.path.realpath(value) if os.path.isabs(arg) else value
|
||||
for arg, value in zip(self.args, arguments)]
|
||||
|
||||
return super(PathFilter, self).get_command([command] + args,
|
||||
exec_dirs)
|
||||
|
||||
|
||||
class DnsmasqFilter(CommandFilter):
|
||||
"""Specific filter for the dnsmasq call (which includes env)."""
|
||||
|
||||
CONFIG_FILE_ARG = 'CONFIG_FILE'
|
||||
|
||||
def match(self, userargs):
|
||||
if (userargs[0] == 'env' and
|
||||
userargs[1].startswith(self.CONFIG_FILE_ARG) and
|
||||
userargs[2].startswith('NETWORK_ID=') and
|
||||
userargs[3] == 'dnsmasq'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||
dnsmasq_pos = userargs.index('dnsmasq')
|
||||
return [to_exec] + userargs[dnsmasq_pos + 1:]
|
||||
|
||||
def get_environment(self, userargs):
|
||||
env = os.environ.copy()
|
||||
env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1]
|
||||
env['NETWORK_ID'] = userargs[2].split('=')[-1]
|
||||
return env
|
||||
|
||||
|
||||
class DeprecatedDnsmasqFilter(DnsmasqFilter):
|
||||
"""Variant of dnsmasq filter to support old-style FLAGFILE."""
|
||||
CONFIG_FILE_ARG = 'FLAGFILE'
|
||||
|
||||
|
||||
class KillFilter(CommandFilter):
|
||||
"""Specific filter for the kill calls.
|
||||
|
||||
1st argument is the user to run /bin/kill under
|
||||
2nd argument is the location of the affected executable
|
||||
if the argument is not absolute, it is checked against $PATH
|
||||
Subsequent arguments list the accepted signals (if any)
|
||||
|
||||
This filter relies on /proc to accurately determine affected
|
||||
executable, so it will only work on procfs-capable systems (not OSX).
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
super(KillFilter, self).__init__("/bin/kill", *args)
|
||||
|
||||
def match(self, userargs):
|
||||
if userargs[0] != "kill":
|
||||
return False
|
||||
args = list(userargs)
|
||||
if len(args) == 3:
|
||||
# A specific signal is requested
|
||||
signal = args.pop(1)
|
||||
if signal not in self.args[1:]:
|
||||
# Requested signal not in accepted list
|
||||
return False
|
||||
else:
|
||||
if len(args) != 2:
|
||||
# Incorrect number of arguments
|
||||
return False
|
||||
if len(self.args) > 1:
|
||||
# No signal requested, but filter requires specific signal
|
||||
return False
|
||||
try:
|
||||
command = os.readlink("/proc/%d/exe" % int(args[1]))
|
||||
except (ValueError, OSError):
|
||||
# Incorrect PID
|
||||
return False
|
||||
|
||||
# NOTE(yufang521247): /proc/PID/exe may have '\0' on the
|
||||
# end, because python doen't stop at '\0' when read the
|
||||
# target path.
|
||||
command = command.partition('\0')[0]
|
||||
|
||||
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
|
||||
# the end if an executable is updated or deleted
|
||||
if command.endswith(" (deleted)"):
|
||||
command = command[:-len(" (deleted)")]
|
||||
|
||||
kill_command = self.args[0]
|
||||
|
||||
if os.path.isabs(kill_command):
|
||||
return kill_command == command
|
||||
|
||||
return (os.path.isabs(command) and
|
||||
kill_command == os.path.basename(command) and
|
||||
os.path.dirname(command) in os.environ['PATH'].split(':'))
|
||||
|
||||
|
||||
class ReadFileFilter(CommandFilter):
|
||||
"""Specific filter for the utils.read_file_as_root call."""
|
||||
|
||||
def __init__(self, file_path, *args):
|
||||
self.file_path = file_path
|
||||
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
|
||||
|
||||
def match(self, userargs):
|
||||
if userargs[0] != 'cat':
|
||||
return False
|
||||
if userargs[1] != self.file_path:
|
||||
return False
|
||||
if len(userargs) != 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class IpFilter(CommandFilter):
|
||||
"""Specific filter for the ip utility to that does not match exec."""
|
||||
|
||||
def match(self, userargs):
|
||||
if userargs[0] == 'ip':
|
||||
if userargs[1] == 'netns':
|
||||
return (userargs[2] in ('list', 'add', 'delete'))
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class EnvFilter(CommandFilter):
|
||||
"""Specific filter for the env utility.
|
||||
|
||||
Behaves like CommandFilter, except that it handles
|
||||
leading env A=B.. strings appropriately.
|
||||
"""
|
||||
|
||||
def _extract_env(self, arglist):
|
||||
"""Extract all leading NAME=VALUE arguments from arglist."""
|
||||
|
||||
envs = set()
|
||||
for arg in arglist:
|
||||
if '=' not in arg:
|
||||
break
|
||||
envs.add(arg.partition('=')[0])
|
||||
return envs
|
||||
|
||||
def __init__(self, exec_path, run_as, *args):
|
||||
super(EnvFilter, self).__init__(exec_path, run_as, *args)
|
||||
|
||||
env_list = self._extract_env(self.args)
|
||||
# Set exec_path to X when args are in the form of
|
||||
# env A=a B=b C=c X Y Z
|
||||
if "env" in exec_path and len(env_list) < len(self.args):
|
||||
self.exec_path = self.args[len(env_list)]
|
||||
|
||||
def match(self, userargs):
|
||||
# ignore leading 'env'
|
||||
if userargs[0] == 'env':
|
||||
userargs.pop(0)
|
||||
|
||||
# require one additional argument after configured ones
|
||||
if len(userargs) < len(self.args):
|
||||
return False
|
||||
|
||||
# extract all env args
|
||||
user_envs = self._extract_env(userargs)
|
||||
filter_envs = self._extract_env(self.args)
|
||||
user_command = userargs[len(user_envs):len(user_envs) + 1]
|
||||
|
||||
# match first non-env argument with CommandFilter
|
||||
return (super(EnvFilter, self).match(user_command)
|
||||
and len(filter_envs) and user_envs == filter_envs)
|
||||
|
||||
def exec_args(self, userargs):
|
||||
args = userargs[:]
|
||||
|
||||
# ignore leading 'env'
|
||||
if args[0] == 'env':
|
||||
args.pop(0)
|
||||
|
||||
# Throw away leading NAME=VALUE arguments
|
||||
while args and '=' in args[0]:
|
||||
args.pop(0)
|
||||
|
||||
return args
|
||||
|
||||
def get_command(self, userargs, exec_dirs=[]):
|
||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||
return [to_exec] + self.exec_args(userargs)[1:]
|
||||
|
||||
def get_environment(self, userargs):
|
||||
env = os.environ.copy()
|
||||
|
||||
# ignore leading 'env'
|
||||
if userargs[0] == 'env':
|
||||
userargs.pop(0)
|
||||
|
||||
# Handle leading NAME=VALUE pairs
|
||||
for a in userargs:
|
||||
env_name, equals, env_value = a.partition('=')
|
||||
if not equals:
|
||||
break
|
||||
if env_name and env_value:
|
||||
env[env_name] = env_value
|
||||
|
||||
return env
|
||||
|
||||
|
||||
class ChainingFilter(CommandFilter):
|
||||
def exec_args(self, userargs):
|
||||
return []
|
||||
|
||||
|
||||
class IpNetnsExecFilter(ChainingFilter):
|
||||
"""Specific filter for the ip utility to that does match exec."""
|
||||
|
||||
def match(self, userargs):
|
||||
# Network namespaces currently require root
|
||||
# require <ns> argument
|
||||
if self.run_as != "root" or len(userargs) < 4:
|
||||
return False
|
||||
|
||||
return (userargs[:3] == ['ip', 'netns', 'exec'])
|
||||
|
||||
def exec_args(self, userargs):
|
||||
args = userargs[4:]
|
||||
if args:
|
||||
args[0] = os.path.basename(args[0])
|
||||
return args
|
@ -1,161 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import ConfigParser
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import string
|
||||
|
||||
from openstack.common.rootwrap import filters
|
||||
|
||||
|
||||
class NoFilterMatched(Exception):
|
||||
"""This exception is raised when no filter matched."""
|
||||
pass
|
||||
|
||||
|
||||
class FilterMatchNotExecutable(Exception):
|
||||
"""Raised when a filter matched but no executable was found."""
|
||||
def __init__(self, match=None, **kwargs):
|
||||
self.match = match
|
||||
|
||||
|
||||
class RootwrapConfig(object):
|
||||
|
||||
def __init__(self, config):
|
||||
# filters_path
|
||||
self.filters_path = config.get("DEFAULT", "filters_path").split(",")
|
||||
|
||||
# exec_dirs
|
||||
if config.has_option("DEFAULT", "exec_dirs"):
|
||||
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
|
||||
else:
|
||||
# Use system PATH if exec_dirs is not specified
|
||||
self.exec_dirs = os.environ["PATH"].split(':')
|
||||
|
||||
# syslog_log_facility
|
||||
if config.has_option("DEFAULT", "syslog_log_facility"):
|
||||
v = config.get("DEFAULT", "syslog_log_facility")
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
|
||||
v, None)
|
||||
if self.syslog_log_facility is None and v in facility_names:
|
||||
self.syslog_log_facility = facility_names.get(v)
|
||||
if self.syslog_log_facility is None:
|
||||
raise ValueError('Unexpected syslog_log_facility: %s' % v)
|
||||
else:
|
||||
default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
|
||||
self.syslog_log_facility = default_facility
|
||||
|
||||
# syslog_log_level
|
||||
if config.has_option("DEFAULT", "syslog_log_level"):
|
||||
v = config.get("DEFAULT", "syslog_log_level")
|
||||
self.syslog_log_level = logging.getLevelName(v.upper())
|
||||
if (self.syslog_log_level == "Level %s" % v.upper()):
|
||||
raise ValueError('Unexepected syslog_log_level: %s' % v)
|
||||
else:
|
||||
self.syslog_log_level = logging.ERROR
|
||||
|
||||
# use_syslog
|
||||
if config.has_option("DEFAULT", "use_syslog"):
|
||||
self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
|
||||
else:
|
||||
self.use_syslog = False
|
||||
|
||||
|
||||
def setup_syslog(execname, facility, level):
|
||||
rootwrap_logger = logging.getLogger()
|
||||
rootwrap_logger.setLevel(level)
|
||||
handler = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
handler.setFormatter(logging.Formatter(
|
||||
os.path.basename(execname) + ': %(message)s'))
|
||||
rootwrap_logger.addHandler(handler)
|
||||
|
||||
|
||||
def build_filter(class_name, *args):
|
||||
"""Returns a filter object of class class_name."""
|
||||
if not hasattr(filters, class_name):
|
||||
logging.warning("Skipping unknown filter class (%s) specified "
|
||||
"in filter definitions" % class_name)
|
||||
return None
|
||||
filterclass = getattr(filters, class_name)
|
||||
return filterclass(*args)
|
||||
|
||||
|
||||
def load_filters(filters_path):
|
||||
"""Load filters from a list of directories."""
|
||||
filterlist = []
|
||||
for filterdir in filters_path:
|
||||
if not os.path.isdir(filterdir):
|
||||
continue
|
||||
for filterfile in os.listdir(filterdir):
|
||||
filterconfig = ConfigParser.RawConfigParser()
|
||||
filterconfig.read(os.path.join(filterdir, filterfile))
|
||||
for (name, value) in filterconfig.items("Filters"):
|
||||
filterdefinition = [string.strip(s) for s in value.split(',')]
|
||||
newfilter = build_filter(*filterdefinition)
|
||||
if newfilter is None:
|
||||
continue
|
||||
newfilter.name = name
|
||||
filterlist.append(newfilter)
|
||||
return filterlist
|
||||
|
||||
|
||||
def match_filter(filter_list, userargs, exec_dirs=[]):
|
||||
"""Checks user command and arguments through command filters.
|
||||
|
||||
Returns the first matching filter.
|
||||
|
||||
Raises NoFilterMatched if no filter matched.
|
||||
Raises FilterMatchNotExecutable if no executable was found for the
|
||||
best filter match.
|
||||
"""
|
||||
first_not_executable_filter = None
|
||||
|
||||
for f in filter_list:
|
||||
if f.match(userargs):
|
||||
if isinstance(f, filters.ChainingFilter):
|
||||
# This command calls exec verify that remaining args
|
||||
# matches another filter.
|
||||
def non_chain_filter(fltr):
|
||||
return (fltr.run_as == f.run_as
|
||||
and not isinstance(fltr, filters.ChainingFilter))
|
||||
|
||||
leaf_filters = [fltr for fltr in filter_list
|
||||
if non_chain_filter(fltr)]
|
||||
args = f.exec_args(userargs)
|
||||
if (not args or not match_filter(leaf_filters,
|
||||
args, exec_dirs=exec_dirs)):
|
||||
continue
|
||||
|
||||
# Try other filters if executable is absent
|
||||
if not f.get_exec(exec_dirs=exec_dirs):
|
||||
if not first_not_executable_filter:
|
||||
first_not_executable_filter = f
|
||||
continue
|
||||
# Otherwise return matching filter for execution
|
||||
return f
|
||||
|
||||
if first_not_executable_filter:
|
||||
# A filter matched, but no executable was found for it
|
||||
raise FilterMatchNotExecutable(match=first_not_executable_filter)
|
||||
|
||||
# No filter matched
|
||||
raise NoFilterMatched()
|
@ -1,307 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A remote procedure call (rpc) abstraction.
|
||||
|
||||
For some wrappers that add message versioning to rpc, see:
|
||||
rpc.dispatcher
|
||||
rpc.proxy
|
||||
"""
|
||||
|
||||
import inspect
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import local
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
rpc_opts = [
|
||||
cfg.StrOpt('rpc_backend',
|
||||
default='%s.impl_kombu' % __package__,
|
||||
help="The messaging module to use, defaults to kombu."),
|
||||
cfg.IntOpt('rpc_thread_pool_size',
|
||||
default=64,
|
||||
help='Size of RPC thread pool'),
|
||||
cfg.IntOpt('rpc_conn_pool_size',
|
||||
default=30,
|
||||
help='Size of RPC connection pool'),
|
||||
cfg.IntOpt('rpc_response_timeout',
|
||||
default=60,
|
||||
help='Seconds to wait for a response from call or multicall'),
|
||||
cfg.IntOpt('rpc_cast_timeout',
|
||||
default=30,
|
||||
help='Seconds to wait before a cast expires (TTL). '
|
||||
'Only supported by impl_zmq.'),
|
||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
||||
default=['openstack.common.exception',
|
||||
'nova.exception',
|
||||
'cinder.exception',
|
||||
'exceptions',
|
||||
],
|
||||
help='Modules of exceptions that are permitted to be recreated'
|
||||
'upon receiving exception data from an rpc call.'),
|
||||
cfg.BoolOpt('fake_rabbit',
|
||||
default=False,
|
||||
help='If passed, use a fake RabbitMQ provider'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='openstack',
|
||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rpc_opts)
|
||||
|
||||
|
||||
def set_defaults(control_exchange):
|
||||
cfg.set_defaults(rpc_opts,
|
||||
control_exchange=control_exchange)
|
||||
|
||||
|
||||
def create_connection(new=True):
|
||||
"""Create a connection to the message bus used for rpc.
|
||||
|
||||
For some example usage of creating a connection and some consumers on that
|
||||
connection, see nova.service.
|
||||
|
||||
:param new: Whether or not to create a new connection. A new connection
|
||||
will be created by default. If new is False, the
|
||||
implementation is free to return an existing connection from a
|
||||
pool.
|
||||
|
||||
:returns: An instance of openstack.common.rpc.common.Connection
|
||||
"""
|
||||
return _get_impl().create_connection(CONF, new=new)
|
||||
|
||||
|
||||
def _check_for_lock():
|
||||
if not CONF.debug:
|
||||
return None
|
||||
|
||||
if ((hasattr(local.strong_store, 'locks_held')
|
||||
and local.strong_store.locks_held)):
|
||||
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
|
||||
LOG.warn(_('A RPC is being made while holding a lock. The locks '
|
||||
'currently held are %(locks)s. This is probably a bug. '
|
||||
'Please report it. Include the following: [%(stack)s].'),
|
||||
{'locks': local.strong_store.locks_held,
|
||||
'stack': stack})
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def call(context, topic, msg, timeout=None, check_for_lock=False):
|
||||
"""Invoke a remote method that returns something.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
||||
with a lock held.
|
||||
|
||||
:returns: A dict from the remote method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
if check_for_lock:
|
||||
_check_for_lock()
|
||||
return _get_impl().call(CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast(CONF, context, topic, msg)
|
||||
|
||||
|
||||
def fanout_cast(context, topic, msg):
|
||||
"""Broadcast a remote method invocation with no return.
|
||||
|
||||
This method will get invoked on all consumers that were set up with this
|
||||
topic name and fanout=True.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=True.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
||||
|
||||
|
||||
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
|
||||
"""Invoke a remote method and get back an iterator.
|
||||
|
||||
In this case, the remote method will be returning multiple values in
|
||||
separate messages, so the return values can be processed as the come in via
|
||||
an iterator.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the rpc message to. This correlates to the
|
||||
topic argument of
|
||||
openstack.common.rpc.common.Connection.create_consumer()
|
||||
and only applies when the consumer was created with
|
||||
fanout=False.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
||||
with a lock held.
|
||||
|
||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
||||
an index that starts at 0 and increases by one for each value
|
||||
returned and X is the Nth value that was returned by the remote
|
||||
method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
if check_for_lock:
|
||||
_check_for_lock()
|
||||
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def notify(context, topic, msg, envelope=False):
|
||||
"""Send notification event.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict of content of event.
|
||||
:param envelope: Set to True to enable message envelope for notifications.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resoruces in use by implementation.
|
||||
|
||||
Clean up any resources that have been allocated by the RPC implementation.
|
||||
This is typically open connections to a messaging service. This function
|
||||
would get called before an application using this API exits to allow
|
||||
connections to get torn down cleanly.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cleanup()
|
||||
|
||||
|
||||
def cast_to_server(context, server_params, topic, msg):
|
||||
"""Invoke a remote method that does not return anything.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast_to_server(CONF, context, server_params, topic,
|
||||
msg)
|
||||
|
||||
|
||||
def fanout_cast_to_server(context, server_params, topic, msg):
|
||||
"""Broadcast to a remote method invocation with no return.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param server_params: Connection information
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
||||
"args" : dict_of_kwargs }
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
|
||||
topic, msg)
|
||||
|
||||
|
||||
def queue_get_for(context, topic, host):
|
||||
"""Get a queue name for a given topic + host.
|
||||
|
||||
This function only works if this naming convention is followed on the
|
||||
consumer side, as well. For example, in nova, every instance of the
|
||||
nova-foo service calls create_consumer() for two topics:
|
||||
|
||||
foo
|
||||
foo.<host>
|
||||
|
||||
Messages sent to the 'foo' topic are distributed to exactly one instance of
|
||||
the nova-foo service. The services are chosen in a round-robin fashion.
|
||||
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
|
||||
<host>.
|
||||
"""
|
||||
return '%s.%s' % (topic, host) if host else topic
|
||||
|
||||
|
||||
_RPCIMPL = None
|
||||
|
||||
|
||||
def _get_impl():
|
||||
"""Delay import of rpc_backend until configuration is loaded."""
|
||||
global _RPCIMPL
|
||||
if _RPCIMPL is None:
|
||||
try:
|
||||
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
|
||||
except ImportError:
|
||||
# For backwards compatibility with older nova config.
|
||||
impl = CONF.rpc_backend.replace('nova.rpc',
|
||||
'nova.openstack.common.rpc')
|
||||
_RPCIMPL = importutils.import_module(impl)
|
||||
return _RPCIMPL
|
@ -1,596 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Shared code between AMQP based openstack.common.rpc implementations.
|
||||
|
||||
The code in this module is shared between the rpc implemenations based on AMQP.
|
||||
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
||||
AMQP, but is deprecated and predates this code.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import inspect
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from eventlet import queue
|
||||
from eventlet import semaphore
|
||||
|
||||
from openstack.common import excutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import local
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
UNIQUE_ID = '_unique_id'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Pool(pools.Pool):
|
||||
"""Class that implements a Pool of Connections."""
|
||||
def __init__(self, conf, connection_cls, *args, **kwargs):
|
||||
self.connection_cls = connection_cls
|
||||
self.conf = conf
|
||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
||||
kwargs.setdefault("order_as_stack", True)
|
||||
super(Pool, self).__init__(*args, **kwargs)
|
||||
self.reply_proxy = None
|
||||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
LOG.debug(_('Pool creating new connection'))
|
||||
return self.connection_cls(self.conf)
|
||||
|
||||
def empty(self):
|
||||
while self.free_items:
|
||||
self.get().close()
|
||||
# Force a new connection pool to be created.
|
||||
# Note that this was added due to failing unit test cases. The issue
|
||||
# is the above "while loop" gets all the cached connections from the
|
||||
# pool and closes them, but never returns them to the pool, a pool
|
||||
# leak. The unit tests hang waiting for an item to be returned to the
|
||||
# pool. The unit tests get here via the tearDown() method. In the run
|
||||
# time code, it gets here via cleanup() and only appears in service.py
|
||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
||||
# the leakage is not a problem.
|
||||
self.connection_cls.pool = None
|
||||
|
||||
|
||||
_pool_create_sem = semaphore.Semaphore()
|
||||
|
||||
|
||||
def get_connection_pool(conf, connection_cls):
|
||||
with _pool_create_sem:
|
||||
# Make sure only one thread tries to create the connection pool.
|
||||
if not connection_cls.pool:
|
||||
connection_cls.pool = Pool(conf, connection_cls)
|
||||
return connection_cls.pool
|
||||
|
||||
|
||||
class ConnectionContext(rpc_common.Connection):
|
||||
"""The class that is actually returned to the create_connection() caller.
|
||||
|
||||
This is essentially a wrapper around Connection that supports 'with'.
|
||||
It can also return a new Connection, or one from a pool.
|
||||
|
||||
The function will also catch when an instance of this class is to be
|
||||
deleted. With that we can return Connections to the pool on exceptions
|
||||
and so forth without making the caller be responsible for catching them.
|
||||
If possible the function makes sure to return a connection to the pool.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||
"""Create a new connection, or get one from the pool."""
|
||||
self.connection = None
|
||||
self.conf = conf
|
||||
self.connection_pool = connection_pool
|
||||
if pooled:
|
||||
self.connection = connection_pool.get()
|
||||
else:
|
||||
self.connection = connection_pool.connection_cls(
|
||||
conf,
|
||||
server_params=server_params)
|
||||
self.pooled = pooled
|
||||
|
||||
def __enter__(self):
|
||||
"""When with ConnectionContext() is used, return self."""
|
||||
return self
|
||||
|
||||
def _done(self):
|
||||
"""If the connection came from a pool, clean it up and put it back.
|
||||
If it did not come from a pool, close it.
|
||||
"""
|
||||
if self.connection:
|
||||
if self.pooled:
|
||||
# Reset the connection so it's ready for the next caller
|
||||
# to grab from the pool
|
||||
self.connection.reset()
|
||||
self.connection_pool.put(self.connection)
|
||||
else:
|
||||
try:
|
||||
self.connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.connection = None
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
"""End of 'with' statement. We're done here."""
|
||||
self._done()
|
||||
|
||||
def __del__(self):
|
||||
"""Caller is done with this connection. Make sure we cleaned up."""
|
||||
self._done()
|
||||
|
||||
def close(self):
|
||||
"""Caller is done with this connection."""
|
||||
self._done()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
self.connection.create_consumer(topic, proxy, fanout)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
self.connection.create_worker(topic, proxy, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
|
||||
ack_on_error=True):
|
||||
self.connection.join_consumer_pool(callback,
|
||||
pool_name,
|
||||
topic,
|
||||
exchange_name,
|
||||
ack_on_error)
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.connection.consume_in_thread()
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""Proxy all other calls to the Connection instance."""
|
||||
if self.connection:
|
||||
return getattr(self.connection, key)
|
||||
else:
|
||||
raise rpc_common.InvalidRPCConnectionReuse()
|
||||
|
||||
|
||||
class ReplyProxy(ConnectionContext):
|
||||
"""Connection class for RPC replies / callbacks."""
|
||||
def __init__(self, conf, connection_pool):
|
||||
self._call_waiters = {}
|
||||
self._num_call_waiters = 0
|
||||
self._num_call_waiters_wrn_threshhold = 10
|
||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
||||
self.consume_in_thread()
|
||||
|
||||
def _process_data(self, message_data):
|
||||
msg_id = message_data.pop('_msg_id', None)
|
||||
waiter = self._call_waiters.get(msg_id)
|
||||
if not waiter:
|
||||
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
||||
', message : %(data)s'), {'msg_id': msg_id,
|
||||
'data': message_data})
|
||||
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
|
||||
else:
|
||||
waiter.put(message_data)
|
||||
|
||||
def add_call_waiter(self, waiter, msg_id):
|
||||
self._num_call_waiters += 1
|
||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
|
||||
LOG.warn(_('Number of call waiters is greater than warning '
|
||||
'threshhold: %d. There could be a MulticallProxyWaiter '
|
||||
'leak.') % self._num_call_waiters_wrn_threshhold)
|
||||
self._num_call_waiters_wrn_threshhold *= 2
|
||||
self._call_waiters[msg_id] = waiter
|
||||
|
||||
def del_call_waiter(self, msg_id):
|
||||
self._num_call_waiters -= 1
|
||||
del self._call_waiters[msg_id]
|
||||
|
||||
def get_reply_q(self):
|
||||
return self._reply_q
|
||||
|
||||
|
||||
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
||||
failure=None, ending=False, log_failure=True):
|
||||
"""Sends a reply or an error on the channel signified by msg_id.
|
||||
|
||||
Failure should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if failure:
|
||||
failure = rpc_common.serialize_remote_exception(failure,
|
||||
log_failure)
|
||||
|
||||
msg = {'result': reply, 'failure': failure}
|
||||
if ending:
|
||||
msg['ending'] = True
|
||||
_add_unique_id(msg)
|
||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
||||
# reply_q to direct_send() to use it as the response queue.
|
||||
# Otherwise use the msg_id for backward compatibilty.
|
||||
if reply_q:
|
||||
msg['_msg_id'] = msg_id
|
||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
||||
else:
|
||||
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call."""
|
||||
def __init__(self, **kwargs):
|
||||
self.msg_id = kwargs.pop('msg_id', None)
|
||||
self.reply_q = kwargs.pop('reply_q', None)
|
||||
self.conf = kwargs.pop('conf')
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['conf'] = self.conf
|
||||
values['msg_id'] = self.msg_id
|
||||
values['reply_q'] = self.reply_q
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False,
|
||||
connection_pool=None, log_failure=True):
|
||||
if self.msg_id:
|
||||
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
|
||||
reply, failure, ending, log_failure)
|
||||
if ending:
|
||||
self.msg_id = None
|
||||
|
||||
|
||||
def unpack_context(conf, msg):
|
||||
"""Unpack context from msg."""
|
||||
context_dict = {}
|
||||
for key in list(msg.keys()):
|
||||
# NOTE(vish): Some versions of python don't like unicode keys
|
||||
# in kwargs.
|
||||
key = str(key)
|
||||
if key.startswith('_context_'):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||
context_dict['conf'] = conf
|
||||
ctx = RpcContext.from_dict(context_dict)
|
||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
||||
return ctx
|
||||
|
||||
|
||||
def pack_context(msg, context):
|
||||
"""Pack context into msg.
|
||||
|
||||
Values for message keys need to be less than 255 chars, so we pull
|
||||
context out into a bunch of separate keys. If we want to support
|
||||
more arguments in rabbit messages, we may want to do the same
|
||||
for args at some point.
|
||||
|
||||
"""
|
||||
context_d = dict([('_context_%s' % key, value)
|
||||
for (key, value) in context.to_dict().iteritems()])
|
||||
msg.update(context_d)
|
||||
|
||||
|
||||
class _MsgIdCache(object):
|
||||
"""This class checks any duplicate messages."""
|
||||
|
||||
# NOTE: This value is considered can be a configuration item, but
|
||||
# it is not necessary to change its value in most cases,
|
||||
# so let this value as static for now.
|
||||
DUP_MSG_CHECK_SIZE = 16
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.prev_msgids = collections.deque([],
|
||||
maxlen=self.DUP_MSG_CHECK_SIZE)
|
||||
|
||||
def check_duplicate_message(self, message_data):
|
||||
"""AMQP consumers may read same message twice when exceptions occur
|
||||
before ack is returned. This method prevents doing it.
|
||||
"""
|
||||
if UNIQUE_ID in message_data:
|
||||
msg_id = message_data[UNIQUE_ID]
|
||||
if msg_id not in self.prev_msgids:
|
||||
self.prev_msgids.append(msg_id)
|
||||
else:
|
||||
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
|
||||
|
||||
|
||||
def _add_unique_id(msg):
|
||||
"""Add unique_id for checking duplicate messages."""
|
||||
unique_id = uuid.uuid4().hex
|
||||
msg.update({UNIQUE_ID: unique_id})
|
||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
||||
|
||||
|
||||
class _ThreadPoolWithWait(object):
|
||||
"""Base class for a delayed invocation manager.
|
||||
|
||||
Used by the Connection class to start up green threads
|
||||
to handle incoming messages.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool):
|
||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
self.connection_pool = connection_pool
|
||||
self.conf = conf
|
||||
|
||||
def wait(self):
|
||||
"""Wait for all callback threads to exit."""
|
||||
self.pool.waitall()
|
||||
|
||||
|
||||
class CallbackWrapper(_ThreadPoolWithWait):
|
||||
"""Wraps a straight callback.
|
||||
|
||||
Allows it to be invoked in a green thread.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, callback, connection_pool):
|
||||
"""Initiates CallbackWrapper object.
|
||||
|
||||
:param conf: cfg.CONF instance
|
||||
:param callback: a callable (probably a function)
|
||||
:param connection_pool: connection pool as returned by
|
||||
get_connection_pool()
|
||||
"""
|
||||
super(CallbackWrapper, self).__init__(
|
||||
conf=conf,
|
||||
connection_pool=connection_pool,
|
||||
)
|
||||
self.callback = callback
|
||||
|
||||
def __call__(self, message_data):
|
||||
self.pool.spawn_n(self.callback, message_data)
|
||||
|
||||
|
||||
class ProxyCallback(_ThreadPoolWithWait):
|
||||
"""Calls methods on a proxy object based on method and args."""
|
||||
|
||||
def __init__(self, conf, proxy, connection_pool):
|
||||
super(ProxyCallback, self).__init__(
|
||||
conf=conf,
|
||||
connection_pool=connection_pool,
|
||||
)
|
||||
self.proxy = proxy
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def __call__(self, message_data):
|
||||
"""Consumer callback to call a method on a proxy object.
|
||||
|
||||
Parses the message for validity and fires off a thread to call the
|
||||
proxy object method.
|
||||
|
||||
Message data should be a dictionary with two keys:
|
||||
method: string representing the method to call
|
||||
args: dictionary of arg: value
|
||||
|
||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
||||
|
||||
"""
|
||||
# It is important to clear the context here, because at this point
|
||||
# the previous context is stored in local.store.context
|
||||
if hasattr(local.store, 'context'):
|
||||
del local.store.context
|
||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
||||
self.msg_id_cache.check_duplicate_message(message_data)
|
||||
ctxt = unpack_context(self.conf, message_data)
|
||||
method = message_data.get('method')
|
||||
args = message_data.get('args', {})
|
||||
version = message_data.get('version')
|
||||
namespace = message_data.get('namespace')
|
||||
if not method:
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
ctxt.reply(_('No method for message: %s') % message_data,
|
||||
connection_pool=self.connection_pool)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
||||
namespace, args)
|
||||
|
||||
def _process_data(self, ctxt, version, method, namespace, args):
|
||||
"""Process a message in a new thread.
|
||||
|
||||
If the proxy object we have has a dispatch method
|
||||
(see rpc.dispatcher.RpcDispatcher), pass it the version,
|
||||
method, and args and let it dispatch as appropriate. If not, use
|
||||
the old behavior of magically calling the specified method on the
|
||||
proxy we have here.
|
||||
"""
|
||||
ctxt.update_store()
|
||||
try:
|
||||
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
||||
**args)
|
||||
# Check if the result was a generator
|
||||
if inspect.isgenerator(rval):
|
||||
for x in rval:
|
||||
ctxt.reply(x, None, connection_pool=self.connection_pool)
|
||||
else:
|
||||
ctxt.reply(rval, None, connection_pool=self.connection_pool)
|
||||
# This final None tells multicall that it is done.
|
||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
||||
e._exc_info[1])
|
||||
ctxt.reply(None, e._exc_info,
|
||||
connection_pool=self.connection_pool,
|
||||
log_failure=False)
|
||||
except Exception:
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error(_('Exception during message handling'),
|
||||
exc_info=exc_info)
|
||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
||||
|
||||
|
||||
class MulticallProxyWaiter(object):
|
||||
def __init__(self, conf, msg_id, timeout, connection_pool):
|
||||
self._msg_id = msg_id
|
||||
self._timeout = timeout or conf.rpc_response_timeout
|
||||
self._reply_proxy = connection_pool.reply_proxy
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
self._dataqueue = queue.LightQueue()
|
||||
# Add this caller to the reply proxy's call_waiters
|
||||
self._reply_proxy.add_call_waiter(self, self._msg_id)
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def put(self, data):
|
||||
self._dataqueue.put(data)
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
return
|
||||
self._done = True
|
||||
# Remove this caller from reply proxy's call_waiters
|
||||
self._reply_proxy.del_call_waiter(self._msg_id)
|
||||
|
||||
def _process_data(self, data):
|
||||
result = None
|
||||
self.msg_id_cache.check_duplicate_message(data)
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
failure)
|
||||
elif data.get('ending', False):
|
||||
self._got_ending = True
|
||||
else:
|
||||
result = data['result']
|
||||
return result
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a reply with an 'ending' flag."""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
try:
|
||||
data = self._dataqueue.get(timeout=self._timeout)
|
||||
result = self._process_data(data)
|
||||
except queue.Empty:
|
||||
self.done()
|
||||
raise rpc_common.Timeout()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.done()
|
||||
if self._got_ending:
|
||||
self.done()
|
||||
raise StopIteration
|
||||
if isinstance(result, Exception):
|
||||
self.done()
|
||||
raise result
|
||||
yield result
|
||||
|
||||
|
||||
def create_connection(conf, new, connection_pool):
|
||||
"""Create a connection."""
|
||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||
|
||||
|
||||
_reply_proxy_create_sem = semaphore.Semaphore()
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Make a call that returns multiple times."""
|
||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
|
||||
with _reply_proxy_create_sem:
|
||||
if not connection_pool.reply_proxy:
|
||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||
return wait_msg
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
||||
connection_pool):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
||||
dict(event_type=msg.get('event_type'),
|
||||
topic=topic))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if envelope:
|
||||
msg = rpc_common.serialize_msg(msg)
|
||||
conn.notify_send(topic, msg)
|
||||
|
||||
|
||||
def cleanup(connection_pool):
|
||||
if connection_pool:
|
||||
connection_pool.empty()
|
||||
|
||||
|
||||
def get_control_exchange(conf):
|
||||
return conf.control_exchange
|
@ -1,530 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import local
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
'''RPC Envelope Version.
|
||||
|
||||
This version number applies to the top level structure of messages sent out.
|
||||
It does *not* apply to the message payload, which must be versioned
|
||||
independently. For example, when using rpc APIs, a version number is applied
|
||||
for changes to the API being exposed over rpc. This version number is handled
|
||||
in the rpc proxy and dispatcher modules.
|
||||
|
||||
This version number applies to the message envelope that is used in the
|
||||
serialization done inside the rpc layer. See serialize_msg() and
|
||||
deserialize_msg().
|
||||
|
||||
The current message format (version 2.0) is very simple. It is:
|
||||
|
||||
{
|
||||
'oslo.version': <RPC Envelope Version as a String>,
|
||||
'oslo.message': <Application Message Payload, JSON encoded>
|
||||
}
|
||||
|
||||
Message format version '1.0' is just considered to be the messages we sent
|
||||
without a message envelope.
|
||||
|
||||
So, the current message envelope just includes the envelope version. It may
|
||||
eventually contain additional information, such as a signature for the message
|
||||
payload.
|
||||
|
||||
We will JSON encode the application message payload. The message envelope,
|
||||
which includes the JSON encoded application message body, will be passed down
|
||||
to the messaging libraries as a dict.
|
||||
'''
|
||||
_RPC_ENVELOPE_VERSION = '2.0'
|
||||
|
||||
_VERSION_KEY = 'oslo.version'
|
||||
_MESSAGE_KEY = 'oslo.message'
|
||||
|
||||
_REMOTE_POSTFIX = '_Remote'
|
||||
|
||||
|
||||
class RPCException(Exception):
|
||||
message = _("An unknown RPC related exception occurred.")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if not message:
|
||||
try:
|
||||
message = self.message % kwargs
|
||||
|
||||
except Exception:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
# at least get the core message out if something happened
|
||||
message = self.message
|
||||
|
||||
super(RPCException, self).__init__(message)
|
||||
|
||||
|
||||
class RemoteError(RPCException):
|
||||
"""Signifies that a remote class has raised an exception.
|
||||
|
||||
Contains a string representation of the type of the original exception,
|
||||
the value of the original exception, and the traceback. These are
|
||||
sent to the parent as a joined string so printing the exception
|
||||
contains all of the relevant info.
|
||||
|
||||
"""
|
||||
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
||||
|
||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||
self.exc_type = exc_type
|
||||
self.value = value
|
||||
self.traceback = traceback
|
||||
super(RemoteError, self).__init__(exc_type=exc_type,
|
||||
value=value,
|
||||
traceback=traceback)
|
||||
|
||||
|
||||
class Timeout(RPCException):
|
||||
"""Signifies that a timeout has occurred.
|
||||
|
||||
This exception is raised if the rpc_response_timeout is reached while
|
||||
waiting for a response from the remote side.
|
||||
"""
|
||||
message = _('Timeout while waiting on RPC response - '
|
||||
'topic: "%(topic)s", RPC method: "%(method)s" '
|
||||
'info: "%(info)s"')
|
||||
|
||||
def __init__(self, info=None, topic=None, method=None):
|
||||
"""Initiates Timeout object.
|
||||
|
||||
:param info: Extra info to convey to the user
|
||||
:param topic: The topic that the rpc call was sent to
|
||||
:param rpc_method_name: The name of the rpc method being
|
||||
called
|
||||
"""
|
||||
self.info = info
|
||||
self.topic = topic
|
||||
self.method = method
|
||||
super(Timeout, self).__init__(
|
||||
None,
|
||||
info=info or _('<unknown>'),
|
||||
topic=topic or _('<unknown>'),
|
||||
method=method or _('<unknown>'))
|
||||
|
||||
|
||||
class DuplicateMessageError(RPCException):
|
||||
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
||||
|
||||
|
||||
class InvalidRPCConnectionReuse(RPCException):
|
||||
message = _("Invalid reuse of an RPC connection.")
|
||||
|
||||
|
||||
class UnsupportedRpcVersion(RPCException):
|
||||
message = _("Specified RPC version, %(version)s, not supported by "
|
||||
"this endpoint.")
|
||||
|
||||
|
||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
||||
message = _("Specified RPC envelope version, %(version)s, "
|
||||
"not supported by this endpoint.")
|
||||
|
||||
|
||||
class RpcVersionCapError(RPCException):
|
||||
message = _("Specified RPC version cap, %(version_cap)s, is too low")
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A connection, returned by rpc.create_connection().
|
||||
|
||||
This class represents a connection to the message bus used for rpc.
|
||||
An instance of this class should never be created by users of the rpc API.
|
||||
Use rpc.create_connection() instead.
|
||||
"""
|
||||
def close(self):
|
||||
"""Close the connection.
|
||||
|
||||
This method must be called when the connection will no longer be used.
|
||||
It will ensure that any resources associated with the connection, such
|
||||
as a network connection, and cleaned up.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer on this connection.
|
||||
|
||||
A consumer is associated with a message queue on the backend message
|
||||
bus. The consumer will read messages from the queue, unpack them, and
|
||||
dispatch them to the proxy object. The contents of the message pulled
|
||||
off of the queue will determine which method gets called on the proxy
|
||||
object.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic. For example, all instances of nova-compute consume
|
||||
from a queue called "compute". In that case, the
|
||||
messages will get distributed amongst the consumers in a
|
||||
round-robin fashion if fanout=False. If fanout=True,
|
||||
every consumer associated with this topic will get a
|
||||
copy of every message.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param fanout: Whether or not this is a fanout topic. See the
|
||||
documentation for the topic parameter for some
|
||||
additional comments on this.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker on this connection.
|
||||
|
||||
A worker is like a regular consumer of messages directed to a
|
||||
topic, except that it is part of a set of such consumers (the
|
||||
"pool") which may run in parallel. Every pool of workers will
|
||||
receive a given message, but only one worker in the pool will
|
||||
be asked to process it. Load is distributed across the members
|
||||
of the pool in round-robin fashion.
|
||||
|
||||
:param topic: This is a name associated with what to consume from.
|
||||
Multiple instances of a service may consume from the same
|
||||
topic.
|
||||
:param proxy: The object that will handle all incoming messages.
|
||||
:param pool_name: String containing the name of the pool of workers
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||
"""Register as a member of a group of consumers.
|
||||
|
||||
Uses given topic from the specified exchange.
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
|
||||
:param callback: Callable to be invoked for each message.
|
||||
:type callback: callable accepting one argument
|
||||
:param pool_name: The name of the consumer pool.
|
||||
:type pool_name: str
|
||||
:param topic: The routing topic for desired messages.
|
||||
:type topic: str
|
||||
:param exchange_name: The name of the message exchange where
|
||||
the client should attach. Defaults to
|
||||
the configured exchange.
|
||||
:type exchange_name: str
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Spawn a thread to handle incoming messages.
|
||||
|
||||
Spawn a thread that will be responsible for handling all incoming
|
||||
messages for consumers that were set up on this connection.
|
||||
|
||||
Message dispatching inside of this is expected to be implemented in a
|
||||
non-blocking manner. An example implementation would be having this
|
||||
thread pull messages in for all of the consumers, but utilize a thread
|
||||
pool for dispatching the messages to the proxy objects.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _safe_log(log_func, msg, msg_data):
|
||||
"""Sanitizes the msg_data field before logging."""
|
||||
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
|
||||
'run_instance': [('args', 'admin_password')],
|
||||
'route_message': [('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'password'),
|
||||
('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'admin_password')]}
|
||||
|
||||
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
|
||||
has_context_token = '_context_auth_token' in msg_data
|
||||
has_token = 'auth_token' in msg_data
|
||||
|
||||
if not any([has_method, has_context_token, has_token]):
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
msg_data = copy.deepcopy(msg_data)
|
||||
|
||||
if has_method:
|
||||
for arg in SANITIZE.get(msg_data['method'], []):
|
||||
try:
|
||||
d = msg_data
|
||||
for elem in arg[:-1]:
|
||||
d = d[elem]
|
||||
d[arg[-1]] = '<SANITIZED>'
|
||||
except KeyError as e:
|
||||
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
|
||||
{'item': arg,
|
||||
'err': e})
|
||||
|
||||
if has_context_token:
|
||||
msg_data['_context_auth_token'] = '<SANITIZED>'
|
||||
|
||||
if has_token:
|
||||
msg_data['auth_token'] = '<SANITIZED>'
|
||||
|
||||
return log_func(msg, msg_data)
|
||||
|
||||
|
||||
def serialize_remote_exception(failure_info, log_failure=True):
|
||||
"""Prepares exception data to be sent over rpc.
|
||||
|
||||
Failure_info should be a sys.exc_info() tuple.
|
||||
|
||||
"""
|
||||
tb = traceback.format_exception(*failure_info)
|
||||
failure = failure_info[1]
|
||||
if log_failure:
|
||||
LOG.error(_("Returning exception %s to caller"),
|
||||
six.text_type(failure))
|
||||
LOG.error(tb)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(failure, 'kwargs'):
|
||||
kwargs = failure.kwargs
|
||||
|
||||
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
||||
# exceptions. Lets turn it back into the original exception type.
|
||||
cls_name = str(failure.__class__.__name__)
|
||||
mod_name = str(failure.__class__.__module__)
|
||||
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
||||
mod_name.endswith(_REMOTE_POSTFIX)):
|
||||
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
||||
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
||||
|
||||
data = {
|
||||
'class': cls_name,
|
||||
'module': mod_name,
|
||||
'message': six.text_type(failure),
|
||||
'tb': tb,
|
||||
'args': failure.args,
|
||||
'kwargs': kwargs
|
||||
}
|
||||
|
||||
json_data = jsonutils.dumps(data)
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
def deserialize_remote_exception(conf, data):
|
||||
failure = jsonutils.loads(str(data))
|
||||
|
||||
trace = failure.get('tb', [])
|
||||
message = failure.get('message', "") + "\n" + "\n".join(trace)
|
||||
name = failure.get('class')
|
||||
module = failure.get('module')
|
||||
|
||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
||||
# order to prevent arbitrary code execution.
|
||||
if module not in conf.allowed_rpc_exception_modules:
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
try:
|
||||
mod = importutils.import_module(module)
|
||||
klass = getattr(mod, name)
|
||||
if not issubclass(klass, Exception):
|
||||
raise TypeError("Can only deserialize Exceptions")
|
||||
|
||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
ex_type = type(failure)
|
||||
str_override = lambda self: message
|
||||
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
||||
{'__str__': str_override, '__unicode__': str_override})
|
||||
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
||||
try:
|
||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||
# as the new type for the exception. This only works on user defined
|
||||
# Exceptions and not core python exceptions. This is important because
|
||||
# we cannot necessarily change an exception message so we must override
|
||||
# the __str__ method.
|
||||
failure.__class__ = new_ex_type
|
||||
except TypeError:
|
||||
# NOTE(ameade): If a core exception then just add the traceback to the
|
||||
# first exception argument.
|
||||
failure.args = (message,) + failure.args[1:]
|
||||
return failure
|
||||
|
||||
|
||||
class CommonRpcContext(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.values = kwargs
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
return self.values[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def to_dict(self):
|
||||
return copy.deepcopy(self.values)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
def deepcopy(self):
|
||||
return self.from_dict(self.to_dict())
|
||||
|
||||
def update_store(self):
|
||||
local.store.context = self
|
||||
|
||||
def elevated(self, read_deleted=None, overwrite=False):
|
||||
"""Return a version of this context with admin flag set."""
|
||||
# TODO(russellb) This method is a bit of a nova-ism. It makes
|
||||
# some assumptions about the data in the request context sent
|
||||
# across rpc, while the rest of this class does not. We could get
|
||||
# rid of this if we changed the nova code that uses this to
|
||||
# convert the RpcContext back to its native RequestContext doing
|
||||
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
|
||||
|
||||
context = self.deepcopy()
|
||||
context.values['is_admin'] = True
|
||||
|
||||
context.values.setdefault('roles', [])
|
||||
|
||||
if 'admin' not in context.values['roles']:
|
||||
context.values['roles'].append('admin')
|
||||
|
||||
if read_deleted is not None:
|
||||
context.values['read_deleted'] = read_deleted
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
||||
|
||||
Merely instantiating it records the current exception information, which
|
||||
will be passed back to the RPC client without exceptional logging.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._exc_info = sys.exc_info()
|
||||
|
||||
|
||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if type(e) in exceptions:
|
||||
raise ClientException()
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def client_exceptions(*exceptions):
|
||||
"""Decorator for manager methods that raise expected exceptions.
|
||||
|
||||
Marking a Manager method with this decorator allows the declaration
|
||||
of expected exceptions that the RPC layer should not consider fatal,
|
||||
and not log as if they were generated in a real error scenario. Note
|
||||
that this will cause listed exceptions to be wrapped in a
|
||||
ClientException, which is used internally by the RPC layer.
|
||||
"""
|
||||
def outer(func):
|
||||
def inner(*args, **kwargs):
|
||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||
return inner
|
||||
return outer
|
||||
|
||||
|
||||
def version_is_compatible(imp_version, version):
|
||||
"""Determine whether versions are compatible.
|
||||
|
||||
:param imp_version: The version implemented
|
||||
:param version: The version requested by an incoming message.
|
||||
"""
|
||||
version_parts = version.split('.')
|
||||
imp_version_parts = imp_version.split('.')
|
||||
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
|
||||
return False
|
||||
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def serialize_msg(raw_msg):
|
||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
||||
# information about this format.
|
||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
||||
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def deserialize_msg(msg):
|
||||
# NOTE(russellb): Hang on to your hats, this road is about to
|
||||
# get a little bumpy.
|
||||
#
|
||||
# Robustness Principle:
|
||||
# "Be strict in what you send, liberal in what you accept."
|
||||
#
|
||||
# At this point we have to do a bit of guessing about what it
|
||||
# is we just received. Here is the set of possibilities:
|
||||
#
|
||||
# 1) We received a dict. This could be 2 things:
|
||||
#
|
||||
# a) Inspect it to see if it looks like a standard message envelope.
|
||||
# If so, great!
|
||||
#
|
||||
# b) If it doesn't look like a standard message envelope, it could either
|
||||
# be a notification, or a message from before we added a message
|
||||
# envelope (referred to as version 1.0).
|
||||
# Just return the message as-is.
|
||||
#
|
||||
# 2) It's any other non-dict type. Just return it and hope for the best.
|
||||
# This case covers return values from rpc.call() from before message
|
||||
# envelopes were used. (messages to call a method were always a dict)
|
||||
|
||||
if not isinstance(msg, dict):
|
||||
# See #2 above.
|
||||
return msg
|
||||
|
||||
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
|
||||
if not all(map(lambda key: key in msg, base_envelope_keys)):
|
||||
# See #1.b above.
|
||||
return msg
|
||||
|
||||
# At this point we think we have the message envelope
|
||||
# format we were expecting. (#1.a above)
|
||||
|
||||
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
|
||||
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
|
||||
|
||||
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
|
||||
|
||||
return raw_msg
|
@ -1,178 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Code for rpc message dispatching.
|
||||
|
||||
Messages that come in have a version number associated with them. RPC API
|
||||
version numbers are in the form:
|
||||
|
||||
Major.Minor
|
||||
|
||||
For a given message with version X.Y, the receiver must be marked as able to
|
||||
handle messages of version A.B, where:
|
||||
|
||||
A = X
|
||||
|
||||
B >= Y
|
||||
|
||||
The Major version number would be incremented for an almost completely new API.
|
||||
The Minor version number would be incremented for backwards compatible changes
|
||||
to an existing API. A backwards compatible change could be something like
|
||||
adding a new method, adding an argument to an existing method (but not
|
||||
requiring it), or changing the type for an existing argument (but still
|
||||
handling the old type as well).
|
||||
|
||||
The conversion over to a versioned API must be done on both the client side and
|
||||
server side of the API at the same time. However, as the code stands today,
|
||||
there can be both versioned and unversioned APIs implemented in the same code
|
||||
base.
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
|
||||
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
|
||||
API as an example. The client side is in nova/compute/rpcapi.py and the server
|
||||
side is in nova/compute/manager.py.
|
||||
|
||||
|
||||
Example 1) Adding a new method.
|
||||
-------------------------------
|
||||
|
||||
Adding a new method is a backwards compatible change. It should be added to
|
||||
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
|
||||
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
|
||||
have a specific version specified to indicate the minimum API version that must
|
||||
be implemented for the method to be supported. For example::
|
||||
|
||||
def get_host_uptime(self, ctxt, host):
|
||||
topic = _compute_topic(self.topic, ctxt, host, None)
|
||||
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
|
||||
version='1.1')
|
||||
|
||||
In this case, version '1.1' is the first version that supported the
|
||||
get_host_uptime() method.
|
||||
|
||||
|
||||
Example 2) Adding a new parameter.
|
||||
----------------------------------
|
||||
|
||||
Adding a new parameter to an rpc method can be made backwards compatible. The
|
||||
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
|
||||
The implementation of the method must not expect the parameter to be present.::
|
||||
|
||||
def some_remote_method(self, arg1, arg2, newarg=None):
|
||||
# The code needs to deal with newarg=None for cases
|
||||
# where an older client sends a message without it.
|
||||
pass
|
||||
|
||||
On the client side, the same changes should be made as in example 1. The
|
||||
minimum version that supports the new parameter should be specified.
|
||||
"""
|
||||
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
from openstack.common.rpc import serializer as rpc_serializer
|
||||
|
||||
|
||||
class RpcDispatcher(object):
|
||||
"""Dispatch rpc messages according to the requested API version.
|
||||
|
||||
This class can be used as the top level 'manager' for a service. It
|
||||
contains a list of underlying managers that have an API_VERSION attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, callbacks, serializer=None):
|
||||
"""Initialize the rpc dispatcher.
|
||||
|
||||
:param callbacks: List of proxy objects that are an instance
|
||||
of a class with rpc methods exposed. Each proxy
|
||||
object should have an RPC_API_VERSION attribute.
|
||||
:param serializer: The Serializer object that will be used to
|
||||
deserialize arguments before the method call and
|
||||
to serialize the result after it returns.
|
||||
"""
|
||||
self.callbacks = callbacks
|
||||
if serializer is None:
|
||||
serializer = rpc_serializer.NoOpSerializer()
|
||||
self.serializer = serializer
|
||||
super(RpcDispatcher, self).__init__()
|
||||
|
||||
def _deserialize_args(self, context, kwargs):
|
||||
"""Helper method called to deserialize args before dispatch.
|
||||
|
||||
This calls our serializer on each argument, returning a new set of
|
||||
args that have been deserialized.
|
||||
|
||||
:param context: The request context
|
||||
:param kwargs: The arguments to be deserialized
|
||||
:returns: A new set of deserialized args
|
||||
"""
|
||||
new_kwargs = dict()
|
||||
for argname, arg in kwargs.iteritems():
|
||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
||||
arg)
|
||||
return new_kwargs
|
||||
|
||||
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
||||
"""Dispatch a message based on a requested version.
|
||||
|
||||
:param ctxt: The request context
|
||||
:param version: The requested API version from the incoming message
|
||||
:param method: The method requested to be called by the incoming
|
||||
message.
|
||||
:param namespace: The namespace for the requested method. If None,
|
||||
the dispatcher will look for a method on a callback
|
||||
object with no namespace set.
|
||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||
|
||||
:returns: Whatever is returned by the underlying method that gets
|
||||
called.
|
||||
"""
|
||||
if not version:
|
||||
version = '1.0'
|
||||
|
||||
had_compatible = False
|
||||
for proxyobj in self.callbacks:
|
||||
# Check for namespace compatibility
|
||||
try:
|
||||
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
||||
except AttributeError:
|
||||
cb_namespace = None
|
||||
|
||||
if namespace != cb_namespace:
|
||||
continue
|
||||
|
||||
# Check for version compatibility
|
||||
try:
|
||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||
except AttributeError:
|
||||
rpc_api_version = '1.0'
|
||||
|
||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
||||
version)
|
||||
had_compatible = had_compatible or is_compatible
|
||||
|
||||
if not hasattr(proxyobj, method):
|
||||
continue
|
||||
if is_compatible:
|
||||
kwargs = self._deserialize_args(ctxt, kwargs)
|
||||
result = getattr(proxyobj, method)(ctxt, **kwargs)
|
||||
return self.serializer.serialize_entity(ctxt, result)
|
||||
|
||||
if had_compatible:
|
||||
raise AttributeError("No such RPC function '%s'" % method)
|
||||
else:
|
||||
raise rpc_common.UnsupportedRpcVersion(version=version)
|
@ -1,195 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Fake RPC implementation which calls proxy methods directly with no
|
||||
queues. Casts will block, but this is very useful for tests.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
|
||||
# jsonutils has some extra logic to automatically convert objects to primitive
|
||||
# types so that they can be serialized. We want to catch all cases where
|
||||
# non-primitive types make it into this code and treat it as an error.
|
||||
import json
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
|
||||
CONSUMERS = {}
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
def __init__(self, **kwargs):
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
self._response = []
|
||||
self._done = False
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
new_inst = self.__class__(**values)
|
||||
new_inst._response = self._response
|
||||
new_inst._done = self._done
|
||||
return new_inst
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
self._done = True
|
||||
if not self._done:
|
||||
self._response.append((reply, failure))
|
||||
|
||||
|
||||
class Consumer(object):
|
||||
def __init__(self, topic, proxy):
|
||||
self.topic = topic
|
||||
self.proxy = proxy
|
||||
|
||||
def call(self, context, version, method, namespace, args, timeout):
|
||||
done = eventlet.event.Event()
|
||||
|
||||
def _inner():
|
||||
ctxt = RpcContext.from_dict(context.to_dict())
|
||||
try:
|
||||
rval = self.proxy.dispatch(context, version, method,
|
||||
namespace, **args)
|
||||
res = []
|
||||
# Caller might have called ctxt.reply() manually
|
||||
for (reply, failure) in ctxt._response:
|
||||
if failure:
|
||||
raise failure[0], failure[1], failure[2]
|
||||
res.append(reply)
|
||||
# if ending not 'sent'...we might have more data to
|
||||
# return from the function itself
|
||||
if not ctxt._done:
|
||||
if inspect.isgenerator(rval):
|
||||
for val in rval:
|
||||
res.append(val)
|
||||
else:
|
||||
res.append(rval)
|
||||
done.send(res)
|
||||
except rpc_common.ClientException as e:
|
||||
done.send_exception(e._exc_info[1])
|
||||
except Exception as e:
|
||||
done.send_exception(e)
|
||||
|
||||
thread = eventlet.greenthread.spawn(_inner)
|
||||
|
||||
if timeout:
|
||||
start_time = time.time()
|
||||
while not done.ready():
|
||||
eventlet.greenthread.sleep(1)
|
||||
cur_time = time.time()
|
||||
if (cur_time - start_time) > timeout:
|
||||
thread.kill()
|
||||
raise rpc_common.Timeout()
|
||||
|
||||
return done.wait()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
def __init__(self):
|
||||
self.consumers = []
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
consumer = Consumer(topic, proxy)
|
||||
self.consumers.append(consumer)
|
||||
if topic not in CONSUMERS:
|
||||
CONSUMERS[topic] = []
|
||||
CONSUMERS[topic].append(consumer)
|
||||
|
||||
def close(self):
|
||||
for consumer in self.consumers:
|
||||
CONSUMERS[consumer.topic].remove(consumer)
|
||||
self.consumers = []
|
||||
|
||||
def consume_in_thread(self):
|
||||
pass
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection."""
|
||||
return Connection()
|
||||
|
||||
|
||||
def check_serialize(msg):
|
||||
"""Make sure a message intended for rpc can be serialized."""
|
||||
json.dumps(msg)
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
|
||||
check_serialize(msg)
|
||||
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
try:
|
||||
consumer = CONSUMERS[topic][0]
|
||||
except (KeyError, IndexError):
|
||||
return iter([None])
|
||||
else:
|
||||
return consumer.call(context, version, method, namespace, args,
|
||||
timeout)
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
rv = multicall(conf, context, topic, msg, timeout)
|
||||
# NOTE(vish): return the last result from the multicall
|
||||
rv = list(rv)
|
||||
if not rv:
|
||||
return
|
||||
return rv[-1]
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
check_serialize(msg)
|
||||
try:
|
||||
call(conf, context, topic, msg)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
check_serialize(msg)
|
||||
|
||||
|
||||
def cleanup():
|
||||
pass
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Cast to all consumers of a topic."""
|
||||
check_serialize(msg)
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
for consumer in CONSUMERS.get(topic, []):
|
||||
try:
|
||||
consumer.call(context, version, method, namespace, args, None)
|
||||
except Exception:
|
||||
pass
|
@ -1,866 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import kombu
|
||||
import kombu.connection
|
||||
import kombu.entity
|
||||
import kombu.messaging
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import excutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import network_utils
|
||||
from openstack.common.rpc import amqp as rpc_amqp
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
|
||||
kombu_opts = [
|
||||
cfg.StrOpt('kombu_ssl_version',
|
||||
default='',
|
||||
help='SSL version to use (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_keyfile',
|
||||
default='',
|
||||
help='SSL key file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_certfile',
|
||||
default='',
|
||||
help='SSL cert file (valid only if SSL enabled)'),
|
||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
||||
default='',
|
||||
help=('SSL certification authority file '
|
||||
'(valid only if SSL enabled)')),
|
||||
cfg.StrOpt('rabbit_host',
|
||||
default='localhost',
|
||||
help='The RabbitMQ broker address where a single node is used'),
|
||||
cfg.IntOpt('rabbit_port',
|
||||
default=5672,
|
||||
help='The RabbitMQ broker port where a single node is used'),
|
||||
cfg.ListOpt('rabbit_hosts',
|
||||
default=['$rabbit_host:$rabbit_port'],
|
||||
help='RabbitMQ HA cluster host:port pairs'),
|
||||
cfg.BoolOpt('rabbit_use_ssl',
|
||||
default=False,
|
||||
help='connect over SSL for RabbitMQ'),
|
||||
cfg.StrOpt('rabbit_userid',
|
||||
default='guest',
|
||||
help='the RabbitMQ userid'),
|
||||
cfg.StrOpt('rabbit_password',
|
||||
default='guest',
|
||||
help='the RabbitMQ password',
|
||||
secret=True),
|
||||
cfg.StrOpt('rabbit_virtual_host',
|
||||
default='/',
|
||||
help='the RabbitMQ virtual host'),
|
||||
cfg.IntOpt('rabbit_retry_interval',
|
||||
default=1,
|
||||
help='how frequently to retry connecting with RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_retry_backoff',
|
||||
default=2,
|
||||
help='how long to backoff for between retries when connecting '
|
||||
'to RabbitMQ'),
|
||||
cfg.IntOpt('rabbit_max_retries',
|
||||
default=0,
|
||||
help='maximum retries with trying to connect to RabbitMQ '
|
||||
'(the default of 0 implies an infinite retry count)'),
|
||||
cfg.BoolOpt('rabbit_durable_queues',
|
||||
default=False,
|
||||
help='use durable queues in RabbitMQ'),
|
||||
cfg.BoolOpt('rabbit_ha_queues',
|
||||
default=False,
|
||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
||||
'You need to wipe RabbitMQ database when '
|
||||
'changing this option.'),
|
||||
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(kombu_opts)
|
||||
|
||||
LOG = rpc_common.LOG
|
||||
|
||||
|
||||
def _get_queue_arguments(conf):
|
||||
"""Construct the arguments for declaring a queue.
|
||||
|
||||
If the rabbit_ha_queues option is set, we declare a mirrored queue
|
||||
as described here:
|
||||
|
||||
http://www.rabbitmq.com/ha.html
|
||||
|
||||
Setting x-ha-policy to all means that the queue will be mirrored
|
||||
to all nodes in the cluster.
|
||||
"""
|
||||
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, channel, callback, tag, **kwargs):
|
||||
"""Declare a queue on an amqp channel.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
queue name, exchange name, and other kombu options are
|
||||
passed in here as a dictionary.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.tag = str(tag)
|
||||
self.kwargs = kwargs
|
||||
self.queue = None
|
||||
self.ack_on_error = kwargs.get('ack_on_error', True)
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-declare the queue after a rabbit reconnect."""
|
||||
self.channel = channel
|
||||
self.kwargs['channel'] = channel
|
||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||
self.queue.declare()
|
||||
|
||||
def _callback_handler(self, message, callback):
|
||||
"""Call callback with deserialized message.
|
||||
|
||||
Messages that are processed without exception are ack'ed.
|
||||
|
||||
If the message processing generates an exception, it will be
|
||||
ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
|
||||
Rejection is better than waiting for the message to timeout.
|
||||
Rejected messages are immediately requeued.
|
||||
"""
|
||||
|
||||
ack_msg = False
|
||||
try:
|
||||
msg = rpc_common.deserialize_msg(message.payload)
|
||||
callback(msg)
|
||||
ack_msg = True
|
||||
except Exception:
|
||||
if self.ack_on_error:
|
||||
ack_msg = True
|
||||
LOG.exception(_("Failed to process message"
|
||||
" ... skipping it."))
|
||||
else:
|
||||
LOG.exception(_("Failed to process message"
|
||||
" ... will requeue."))
|
||||
finally:
|
||||
if ack_msg:
|
||||
message.ack()
|
||||
else:
|
||||
message.reject()
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
"""Actually declare the consumer on the amqp channel. This will
|
||||
start the flow of messages from the queue. Using the
|
||||
Connection.iterconsume() iterator will process the messages,
|
||||
calling the appropriate callback.
|
||||
|
||||
If a callback is specified in kwargs, use that. Otherwise,
|
||||
use the callback passed during __init__()
|
||||
|
||||
If kwargs['nowait'] is True, then this call will block until
|
||||
a message is read.
|
||||
|
||||
"""
|
||||
|
||||
options = {'consumer_tag': self.tag}
|
||||
options['nowait'] = kwargs.get('nowait', False)
|
||||
callback = kwargs.get('callback', self.callback)
|
||||
if not callback:
|
||||
raise ValueError("No callback defined")
|
||||
|
||||
def _callback(raw_message):
|
||||
message = self.channel.message_to_python(raw_message)
|
||||
self._callback_handler(message, callback)
|
||||
|
||||
self.queue.consume(*args, callback=_callback, **options)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the consuming from the queue, if it has started."""
|
||||
try:
|
||||
self.queue.cancel(self.tag)
|
||||
except KeyError as e:
|
||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||
if str(e) != "u'%s'" % self.tag:
|
||||
raise
|
||||
self.queue = None
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'."""
|
||||
|
||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=msg_id,
|
||||
type='direct',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(DirectConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=msg_id,
|
||||
exchange=exchange,
|
||||
routing_key=msg_id,
|
||||
**options)
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||
exchange_name=None, **kwargs):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param channel: the amqp channel to use
|
||||
:param topic: the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param tag: a unique ID for the consumer on the channel
|
||||
:param name: optional queue name, defaults to topic
|
||||
:paramtype name: str
|
||||
|
||||
Other kombu options may be passed as keyword arguments
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name,
|
||||
type='topic',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(TopicConsumer, self).__init__(channel,
|
||||
callback,
|
||||
tag,
|
||||
name=name or topic,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'channel' is the amqp channel to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
'tag' is a unique ID for the consumer on the channel
|
||||
|
||||
Other kombu options may be passed
|
||||
"""
|
||||
unique = uuid.uuid4().hex
|
||||
exchange_name = '%s_fanout' % topic
|
||||
queue_name = '%s_fanout_%s' % (topic, unique)
|
||||
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
||||
durable=options['durable'],
|
||||
auto_delete=options['auto_delete'])
|
||||
super(FanoutConsumer, self).__init__(channel, callback, tag,
|
||||
name=queue_name,
|
||||
exchange=exchange,
|
||||
routing_key=topic,
|
||||
**options)
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class."""
|
||||
|
||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.exchange_name = exchange_name
|
||||
self.routing_key = routing_key
|
||||
self.kwargs = kwargs
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-establish the Producer after a rabbit reconnection."""
|
||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||
**self.kwargs)
|
||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||
channel=channel,
|
||||
routing_key=self.routing_key)
|
||||
|
||||
def send(self, msg, timeout=None):
|
||||
"""Send a message."""
|
||||
if timeout:
|
||||
#
|
||||
# AMQP TTL is in milliseconds when set in the header.
|
||||
#
|
||||
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
|
||||
else:
|
||||
self.producer.publish(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'."""
|
||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||
"""init a 'direct' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
||||
type='direct', **options)
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'."""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'topic' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': conf.rabbit_durable_queues,
|
||||
'auto_delete': False,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(channel,
|
||||
exchange_name,
|
||||
topic,
|
||||
type='topic',
|
||||
**options)
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'."""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'fanout' publisher.
|
||||
|
||||
Kombu options may be passed as keyword args to override defaults
|
||||
"""
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
||||
None, type='fanout', **options)
|
||||
|
||||
|
||||
class NotifyPublisher(TopicPublisher):
|
||||
"""Publisher class for 'notify'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
||||
self.queue_arguments = _get_queue_arguments(conf)
|
||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
||||
|
||||
def reconnect(self, channel):
|
||||
super(NotifyPublisher, self).reconnect(channel)
|
||||
|
||||
# NOTE(jerdfelt): Normally the consumer would create the queue, but
|
||||
# we do this to ensure that messages don't get dropped if the
|
||||
# consumer is started after we do
|
||||
queue = kombu.entity.Queue(channel=channel,
|
||||
exchange=self.exchange,
|
||||
durable=self.durable,
|
||||
name=self.routing_key,
|
||||
routing_key=self.routing_key,
|
||||
queue_arguments=self.queue_arguments)
|
||||
queue.declare()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.consumers = []
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
self.max_retries = self.conf.rabbit_max_retries
|
||||
# Try forever?
|
||||
if self.max_retries <= 0:
|
||||
self.max_retries = None
|
||||
self.interval_start = self.conf.rabbit_retry_interval
|
||||
self.interval_stepping = self.conf.rabbit_retry_backoff
|
||||
# max retry-interval = 30 seconds
|
||||
self.interval_max = 30
|
||||
self.memory_transport = False
|
||||
|
||||
if server_params is None:
|
||||
server_params = {}
|
||||
# Keys to translate from server_params to kombu params
|
||||
server_params_to_kombu_params = {'username': 'userid'}
|
||||
|
||||
ssl_params = self._fetch_ssl_params()
|
||||
params_list = []
|
||||
for adr in self.conf.rabbit_hosts:
|
||||
hostname, port = network_utils.parse_host_port(
|
||||
adr, default_port=self.conf.rabbit_port)
|
||||
|
||||
params = {
|
||||
'hostname': hostname,
|
||||
'port': port,
|
||||
'userid': self.conf.rabbit_userid,
|
||||
'password': self.conf.rabbit_password,
|
||||
'virtual_host': self.conf.rabbit_virtual_host,
|
||||
}
|
||||
|
||||
for sp_key, value in server_params.iteritems():
|
||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||
params[p_key] = value
|
||||
|
||||
if self.conf.fake_rabbit:
|
||||
params['transport'] = 'memory'
|
||||
if self.conf.rabbit_use_ssl:
|
||||
params['ssl'] = ssl_params
|
||||
|
||||
params_list.append(params)
|
||||
|
||||
self.params_list = params_list
|
||||
|
||||
self.memory_transport = self.conf.fake_rabbit
|
||||
|
||||
self.connection = None
|
||||
self.reconnect()
|
||||
|
||||
def _fetch_ssl_params(self):
|
||||
"""Handles fetching what ssl params should be used for the connection
|
||||
(if any).
|
||||
"""
|
||||
ssl_params = dict()
|
||||
|
||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||
if self.conf.kombu_ssl_version:
|
||||
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
|
||||
if self.conf.kombu_ssl_keyfile:
|
||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
||||
if self.conf.kombu_ssl_certfile:
|
||||
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
|
||||
if self.conf.kombu_ssl_ca_certs:
|
||||
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
|
||||
# We might want to allow variations in the
|
||||
# future with this?
|
||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
if not ssl_params:
|
||||
# Just have the default behavior
|
||||
return True
|
||||
else:
|
||||
# Return the extended behavior
|
||||
return ssl_params
|
||||
|
||||
def _connect(self, params):
|
||||
"""Connect to rabbit. Re-establish any queues that may have
|
||||
been declared before if we are reconnecting. Exceptions should
|
||||
be handled by the caller.
|
||||
"""
|
||||
if self.connection:
|
||||
LOG.info(_("Reconnecting to AMQP server on "
|
||||
"%(hostname)s:%(port)d") % params)
|
||||
try:
|
||||
self.connection.release()
|
||||
except self.connection_errors:
|
||||
pass
|
||||
# Setting this in case the next statement fails, though
|
||||
# it shouldn't be doing any network operations, yet.
|
||||
self.connection = None
|
||||
self.connection = kombu.connection.BrokerConnection(**params)
|
||||
self.connection_errors = self.connection.connection_errors
|
||||
if self.memory_transport:
|
||||
# Kludge to speed up tests.
|
||||
self.connection.transport.polling_interval = 0.0
|
||||
self.consumer_num = itertools.count(1)
|
||||
self.connection.connect()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
for consumer in self.consumers:
|
||||
consumer.reconnect(self.channel)
|
||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
||||
params)
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing queues.
|
||||
Will retry up to self.max_retries number of times.
|
||||
self.max_retries = 0 means to retry forever.
|
||||
Sleep between tries, starting at self.interval_start
|
||||
seconds, backing off self.interval_stepping number of seconds
|
||||
each attempt.
|
||||
"""
|
||||
|
||||
attempt = 0
|
||||
while True:
|
||||
params = self.params_list[attempt % len(self.params_list)]
|
||||
attempt += 1
|
||||
try:
|
||||
self._connect(params)
|
||||
return
|
||||
except (IOError, self.connection_errors) as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
|
||||
log_info = {}
|
||||
log_info['err_str'] = str(e)
|
||||
log_info['max_retries'] = self.max_retries
|
||||
log_info.update(params)
|
||||
|
||||
if self.max_retries and attempt == self.max_retries:
|
||||
LOG.error(_('Unable to connect to AMQP server on '
|
||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||
'tries: %(err_str)s') % log_info)
|
||||
# NOTE(comstud): Copied from original code. There's
|
||||
# really no better recourse because if this was a queue we
|
||||
# need to consume on, we have no way to consume anymore.
|
||||
sys.exit(1)
|
||||
|
||||
if attempt == 1:
|
||||
sleep_time = self.interval_start or 1
|
||||
elif attempt > 1:
|
||||
sleep_time += self.interval_stepping
|
||||
if self.interval_max:
|
||||
sleep_time = min(sleep_time, self.interval_max)
|
||||
|
||||
log_info['sleep_time'] = sleep_time
|
||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
||||
'unreachable: %(err_str)s. Trying again in '
|
||||
'%(sleep_time)d seconds.') % log_info)
|
||||
time.sleep(sleep_time)
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (self.connection_errors, socket.timeout, IOError) as e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
# a protocol response. (See paste link in LP888621)
|
||||
# So, we check all exceptions for 'timeout' in them
|
||||
# and try to reconnect in this case.
|
||||
if 'timeout' not in str(e):
|
||||
raise
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def get_channel(self):
|
||||
"""Convenience call for bin/clear_rabbit_queues."""
|
||||
return self.channel
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.release()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.channel.close()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
if self.memory_transport:
|
||||
self.channel._new_queue('ae.undeliver')
|
||||
self.consumers = []
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
||||
self.consumer_num.next())
|
||||
self.consumers.append(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers."""
|
||||
|
||||
info = {'do_consume': True}
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, socket.timeout):
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
info['do_consume'] = True
|
||||
|
||||
def _consume():
|
||||
if info['do_consume']:
|
||||
queues_head = self.consumers[:-1] # not fanout.
|
||||
queues_tail = self.consumers[-1] # fanout
|
||||
for queue in queues_head:
|
||||
queue.consume(nowait=True)
|
||||
queues_tail.consume(nowait=False)
|
||||
info['do_consume'] = False
|
||||
return self.connection.drain_events(timeout=timeout)
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread."""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
||||
"""Send to a publisher based on the publisher class."""
|
||||
|
||||
def _error_callback(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publish():
|
||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
||||
publisher.send(msg, timeout)
|
||||
|
||||
self.ensure(_error_callback, _publish)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
ack_on_error=ack_on_error,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer."""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message."""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message."""
|
||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message."""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic."""
|
||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers."""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread."""
|
||||
@excutils.forever_retry_uncaught_exceptions
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
self.declare_fanout_consumer(topic, proxy_cb)
|
||||
else:
|
||||
self.declare_topic_consumer(topic, proxy_cb)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
"""
|
||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
||||
conf=self.conf,
|
||||
callback=callback,
|
||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||
Connection),
|
||||
)
|
||||
self.proxy_callbacks.append(callback_wrapper)
|
||||
self.declare_topic_consumer(
|
||||
queue_name=pool_name,
|
||||
topic=topic,
|
||||
exchange_name=exchange_name,
|
||||
callback=callback_wrapper,
|
||||
ack_on_error=ack_on_error,
|
||||
)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection."""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
@ -1,729 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import excutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.rpc import amqp as rpc_amqp
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
|
||||
qpid_codec = importutils.try_import("qpid.codec010")
|
||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
qpid_opts = [
|
||||
cfg.StrOpt('qpid_hostname',
|
||||
default='localhost',
|
||||
help='Qpid broker hostname'),
|
||||
cfg.IntOpt('qpid_port',
|
||||
default=5672,
|
||||
help='Qpid broker port'),
|
||||
cfg.ListOpt('qpid_hosts',
|
||||
default=['$qpid_hostname:$qpid_port'],
|
||||
help='Qpid HA cluster host:port pairs'),
|
||||
cfg.StrOpt('qpid_username',
|
||||
default='',
|
||||
help='Username for qpid connection'),
|
||||
cfg.StrOpt('qpid_password',
|
||||
default='',
|
||||
help='Password for qpid connection',
|
||||
secret=True),
|
||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
||||
default='',
|
||||
help='Space separated list of SASL mechanisms to use for auth'),
|
||||
cfg.IntOpt('qpid_heartbeat',
|
||||
default=60,
|
||||
help='Seconds between connection keepalive heartbeats'),
|
||||
cfg.StrOpt('qpid_protocol',
|
||||
default='tcp',
|
||||
help="Transport to use, either 'tcp' or 'ssl'"),
|
||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
||||
default=True,
|
||||
help='Disable Nagle algorithm'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(qpid_opts)
|
||||
|
||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
|
||||
def __init__(self, session, callback, node_name, node_opts,
|
||||
link_name, link_opts):
|
||||
"""Declare a queue on an amqp session.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'callback' is the callback to call when messages are received
|
||||
'node_name' is the first part of the Qpid address string, before ';'
|
||||
'node_opts' will be applied to the "x-declare" section of "node"
|
||||
in the address string.
|
||||
'link_name' goes into the "name" field of the "link" in the address
|
||||
string
|
||||
'link_opts' will be applied to the "x-declare" section of "link"
|
||||
in the address string.
|
||||
"""
|
||||
self.callback = callback
|
||||
self.receiver = None
|
||||
self.session = None
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": True,
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
"link": {
|
||||
"name": link_name,
|
||||
"durable": True,
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
"auto-delete": True,
|
||||
"exclusive": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
addr_opts["link"]["x-declare"].update(link_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.connect(session)
|
||||
|
||||
def connect(self, session):
|
||||
"""Declare the reciever on connect."""
|
||||
self._declare_receiver(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-declare the receiver after a qpid reconnect."""
|
||||
self._declare_receiver(session)
|
||||
|
||||
def _declare_receiver(self, session):
|
||||
self.session = session
|
||||
self.receiver = session.receiver(self.address)
|
||||
self.receiver.capacity = 1
|
||||
|
||||
def _unpack_json_msg(self, msg):
|
||||
"""Load the JSON data in msg if msg.content_type indicates that it
|
||||
is necessary. Put the loaded data back into msg.content and
|
||||
update msg.content_type appropriately.
|
||||
|
||||
A Qpid Message containing a dict will have a content_type of
|
||||
'amqp/map', whereas one containing a string that needs to be converted
|
||||
back from JSON will have a content_type of JSON_CONTENT_TYPE.
|
||||
|
||||
:param msg: a Qpid Message object
|
||||
:returns: None
|
||||
"""
|
||||
if msg.content_type == JSON_CONTENT_TYPE:
|
||||
msg.content = jsonutils.loads(msg.content)
|
||||
msg.content_type = 'amqp/map'
|
||||
|
||||
def consume(self):
|
||||
"""Fetch the message and pass it to the callback object."""
|
||||
message = self.receiver.fetch()
|
||||
try:
|
||||
self._unpack_json_msg(message)
|
||||
msg = rpc_common.deserialize_msg(message.content)
|
||||
self.callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
# TODO(sandy): Need support for optional ack_on_error.
|
||||
self.session.acknowledge(message)
|
||||
|
||||
def get_receiver(self):
|
||||
return self.receiver
|
||||
|
||||
def get_node_name(self):
|
||||
return self.address.split(';')[0]
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'."""
|
||||
|
||||
def __init__(self, conf, session, msg_id, callback):
|
||||
"""Init a 'direct' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'msg_id' is the msg_id to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
|
||||
super(DirectConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (msg_id, msg_id),
|
||||
{"type": "direct"},
|
||||
msg_id,
|
||||
{"exclusive": True})
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'."""
|
||||
|
||||
def __init__(self, conf, session, topic, callback, name=None,
|
||||
exchange_name=None):
|
||||
"""Init a 'topic' queue.
|
||||
|
||||
:param session: the amqp session to use
|
||||
:param topic: is the topic to listen on
|
||||
:paramtype topic: str
|
||||
:param callback: the callback to call when messages are received
|
||||
:param name: optional queue name, defaults to topic
|
||||
"""
|
||||
|
||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicConsumer, self).__init__(session, callback,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{}, name or topic, {})
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'."""
|
||||
|
||||
def __init__(self, conf, session, topic, callback):
|
||||
"""Init a 'fanout' queue.
|
||||
|
||||
'session' is the amqp session to use
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
self.conf = conf
|
||||
|
||||
super(FanoutConsumer, self).__init__(
|
||||
session, callback,
|
||||
"%s_fanout" % topic,
|
||||
{"durable": False, "type": "fanout"},
|
||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
||||
{"exclusive": True})
|
||||
|
||||
def reconnect(self, session):
|
||||
topic = self.get_node_name()
|
||||
params = {
|
||||
'session': session,
|
||||
'topic': topic,
|
||||
'callback': self.callback,
|
||||
}
|
||||
|
||||
self.__init__(conf=self.conf, **params)
|
||||
|
||||
super(FanoutConsumer, self).reconnect(session)
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class."""
|
||||
|
||||
def __init__(self, session, node_name, node_opts=None):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
and other options
|
||||
"""
|
||||
self.sender = None
|
||||
self.session = session
|
||||
|
||||
addr_opts = {
|
||||
"create": "always",
|
||||
"node": {
|
||||
"type": "topic",
|
||||
"x-declare": {
|
||||
"durable": False,
|
||||
# auto-delete isn't implemented for exchanges in qpid,
|
||||
# but put in here anyway
|
||||
"auto-delete": True,
|
||||
},
|
||||
},
|
||||
}
|
||||
if node_opts:
|
||||
addr_opts["node"]["x-declare"].update(node_opts)
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-establish the Sender after a reconnection."""
|
||||
self.sender = session.sender(self.address)
|
||||
|
||||
def _pack_json_msg(self, msg):
|
||||
"""Qpid cannot serialize dicts containing strings longer than 65535
|
||||
characters. This function dumps the message content to a JSON
|
||||
string, which Qpid is able to handle.
|
||||
|
||||
:param msg: May be either a Qpid Message object or a bare dict.
|
||||
:returns: A Qpid Message with its content field JSON encoded.
|
||||
"""
|
||||
try:
|
||||
msg.content = jsonutils.dumps(msg.content)
|
||||
except AttributeError:
|
||||
# Need to have a Qpid message so we can set the content_type.
|
||||
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
||||
msg.content_type = JSON_CONTENT_TYPE
|
||||
return msg
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message."""
|
||||
try:
|
||||
# Check if Qpid can encode the message
|
||||
check_msg = msg
|
||||
if not hasattr(check_msg, 'content_type'):
|
||||
check_msg = qpid_messaging.Message(msg)
|
||||
content_type = check_msg.content_type
|
||||
enc, dec = qpid_messaging.message.get_codec(content_type)
|
||||
enc(check_msg.content)
|
||||
except qpid_codec.CodecException:
|
||||
# This means the message couldn't be serialized as a dict.
|
||||
msg = self._pack_json_msg(msg)
|
||||
self.sender.send(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'."""
|
||||
def __init__(self, conf, session, msg_id):
|
||||
"""Init a 'direct' publisher."""
|
||||
super(DirectPublisher, self).__init__(session, msg_id,
|
||||
{"type": "Direct"})
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(TopicPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic))
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'fanout' publisher.
|
||||
"""
|
||||
super(FanoutPublisher, self).__init__(
|
||||
session,
|
||||
"%s_fanout" % topic, {"type": "fanout"})
|
||||
|
||||
|
||||
class NotifyPublisher(Publisher):
|
||||
"""Publisher class for notifications."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||
super(NotifyPublisher, self).__init__(session,
|
||||
"%s/%s" % (exchange_name, topic),
|
||||
{"durable": True})
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""Connection object."""
|
||||
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
if not qpid_messaging:
|
||||
raise ImportError("Failed to import qpid.messaging")
|
||||
|
||||
self.session = None
|
||||
self.consumers = {}
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
|
||||
if server_params and 'hostname' in server_params:
|
||||
# NOTE(russellb) This enables support for cast_to_server.
|
||||
server_params['qpid_hosts'] = [
|
||||
'%s:%d' % (server_params['hostname'],
|
||||
server_params.get('port', 5672))
|
||||
]
|
||||
|
||||
params = {
|
||||
'qpid_hosts': self.conf.qpid_hosts,
|
||||
'username': self.conf.qpid_username,
|
||||
'password': self.conf.qpid_password,
|
||||
}
|
||||
params.update(server_params or {})
|
||||
|
||||
self.brokers = params['qpid_hosts']
|
||||
self.username = params['username']
|
||||
self.password = params['password']
|
||||
self.connection_create(self.brokers[0])
|
||||
self.reconnect()
|
||||
|
||||
def connection_create(self, broker):
|
||||
# Create the connection - this does not open the connection
|
||||
self.connection = qpid_messaging.Connection(broker)
|
||||
|
||||
# Check if flags are set and if so set them for the connection
|
||||
# before we call open
|
||||
self.connection.username = self.username
|
||||
self.connection.password = self.password
|
||||
|
||||
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
|
||||
# Reconnection is done by self.reconnect()
|
||||
self.connection.reconnect = False
|
||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||
self.connection.transport = self.conf.qpid_protocol
|
||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||
|
||||
def _register_consumer(self, consumer):
|
||||
self.consumers[str(consumer.get_receiver())] = consumer
|
||||
|
||||
def _lookup_consumer(self, receiver):
|
||||
return self.consumers[str(receiver)]
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing sessions and queues."""
|
||||
attempt = 0
|
||||
delay = 1
|
||||
while True:
|
||||
# Close the session if necessary
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid_exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
broker = self.brokers[attempt % len(self.brokers)]
|
||||
attempt += 1
|
||||
|
||||
try:
|
||||
self.connection_create(broker)
|
||||
self.connection.open()
|
||||
except qpid_exceptions.ConnectionError as e:
|
||||
msg_dict = dict(e=e, delay=delay)
|
||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||
"Sleeping %(delay)s seconds") % msg_dict
|
||||
LOG.error(msg)
|
||||
time.sleep(delay)
|
||||
delay = min(2 * delay, 60)
|
||||
else:
|
||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
||||
break
|
||||
|
||||
self.session = self.connection.session()
|
||||
|
||||
if self.consumers:
|
||||
consumers = self.consumers
|
||||
self.consumers = {}
|
||||
|
||||
for consumer in consumers.itervalues():
|
||||
consumer.reconnect(self.session)
|
||||
self._register_consumer(consumer)
|
||||
|
||||
LOG.debug(_("Re-established AMQP queues"))
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (qpid_exceptions.Empty,
|
||||
qpid_exceptions.ConnectionError) as e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
try:
|
||||
self.connection.close()
|
||||
except Exception:
|
||||
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
||||
# causes confusion; there's really nothing useful we can do with
|
||||
# them.
|
||||
pass
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.session.close()
|
||||
self.session = self.connection.session()
|
||||
self.consumers = {}
|
||||
|
||||
def declare_consumer(self, consumer_cls, topic, callback):
|
||||
"""Create a Consumer using the class that was passed in and
|
||||
add it to our list of consumers
|
||||
"""
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
||||
"%(err_str)s") % log_info)
|
||||
|
||||
def _declare_consumer():
|
||||
consumer = consumer_cls(self.conf, self.session, topic, callback)
|
||||
self._register_consumer(consumer)
|
||||
return consumer
|
||||
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers."""
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid_exceptions.Empty):
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
str(exc))
|
||||
|
||||
def _consume():
|
||||
nxt_receiver = self.session.next_receiver(timeout=timeout)
|
||||
try:
|
||||
self._lookup_consumer(nxt_receiver).consume()
|
||||
except Exception:
|
||||
LOG.exception(_("Error processing message. Skipping it."))
|
||||
|
||||
for iteration in itertools.count(0):
|
||||
if limit and iteration >= limit:
|
||||
raise StopIteration
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread."""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
self.consumer_thread.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg):
|
||||
"""Send to a publisher based on the publisher class."""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
LOG.exception(_("Failed to publish message to topic "
|
||||
"'%(topic)s': %(err_str)s") % log_info)
|
||||
|
||||
def _publisher_send():
|
||||
publisher = cls(self.conf, self.session, topic)
|
||||
publisher.send(msg)
|
||||
|
||||
return self.ensure(_connect_error, _publisher_send)
|
||||
|
||||
def declare_direct_consumer(self, topic, callback):
|
||||
"""Create a 'direct' queue.
|
||||
In nova's use, this is generally a msg_id queue used for
|
||||
responses for call/multicall
|
||||
"""
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer."""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message."""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message."""
|
||||
#
|
||||
# We want to create a message with attributes, e.g. a TTL. We
|
||||
# don't really need to keep 'msg' in its JSON format any longer
|
||||
# so let's create an actual qpid message here and get some
|
||||
# value-add on the go.
|
||||
#
|
||||
# WARNING: Request timeout happens to be in the same units as
|
||||
# qpid's TTL (seconds). If this changes in the future, then this
|
||||
# will need to be altered accordingly.
|
||||
#
|
||||
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
|
||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message."""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic."""
|
||||
self.publisher_send(NotifyPublisher, topic, msg)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers."""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread."""
|
||||
@excutils.forever_retry_uncaught_exceptions
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
except greenlet.GreenletExit:
|
||||
return
|
||||
if self.consumer_thread is None:
|
||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
else:
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
||||
name=pool_name)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
|
||||
return consumer
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
"""
|
||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
||||
conf=self.conf,
|
||||
callback=callback,
|
||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||
Connection),
|
||||
)
|
||||
self.proxy_callbacks.append(callback_wrapper)
|
||||
|
||||
consumer = TopicConsumer(conf=self.conf,
|
||||
session=self.session,
|
||||
topic=topic,
|
||||
callback=callback_wrapper,
|
||||
name=pool_name,
|
||||
exchange_name=exchange_name)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
return consumer
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection."""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout=None):
|
||||
"""Make a call that returns multiple times."""
|
||||
return rpc_amqp.multicall(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
"""Sends a message on a topic and wait for a response."""
|
||||
return rpc_amqp.call(
|
||||
conf, context, topic, msg, timeout,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast(conf, context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
return rpc_amqp.cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
return rpc_amqp.fanout_cast(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
return rpc_amqp.cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
return rpc_amqp.fanout_cast_to_server(
|
||||
conf, context, server_params, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
return rpc_amqp.cleanup(Connection.pool)
|
@ -1,817 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import types
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import excutils
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
|
||||
zmq = importutils.try_import('eventlet.green.zmq')
|
||||
|
||||
# for convenience, are not modified.
|
||||
pformat = pprint.pformat
|
||||
Timeout = eventlet.timeout.Timeout
|
||||
LOG = rpc_common.LOG
|
||||
RemoteError = rpc_common.RemoteError
|
||||
RPCException = rpc_common.RPCException
|
||||
|
||||
zmq_opts = [
|
||||
cfg.StrOpt('rpc_zmq_bind_address', default='*',
|
||||
help='ZeroMQ bind address. Should be a wildcard (*), '
|
||||
'an ethernet interface, or IP. '
|
||||
'The "host" option should point or resolve to this '
|
||||
'address.'),
|
||||
|
||||
# The module.Class to use for matchmaking.
|
||||
cfg.StrOpt(
|
||||
'rpc_zmq_matchmaker',
|
||||
default=('openstack.common.rpc.'
|
||||
'matchmaker.MatchMakerLocalhost'),
|
||||
help='MatchMaker driver',
|
||||
),
|
||||
|
||||
# The following port is unassigned by IANA as of 2012-05-21
|
||||
cfg.IntOpt('rpc_zmq_port', default=9501,
|
||||
help='ZeroMQ receiver listening port'),
|
||||
|
||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
||||
|
||||
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
|
||||
help='Maximum number of ingress messages to locally buffer '
|
||||
'per topic. Default is unlimited.'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
||||
help='Directory for holding IPC sockets'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
|
||||
help='Name of this node. Must be a valid hostname, FQDN, or '
|
||||
'IP address. Must match "host" option, if running Nova.')
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(zmq_opts)
|
||||
|
||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
||||
matchmaker = None # memoized matchmaker object
|
||||
|
||||
|
||||
def _serialize(data):
|
||||
"""Serialization wrapper.
|
||||
|
||||
We prefer using JSON, but it cannot encode all types.
|
||||
Error if a developer passes us bad data.
|
||||
"""
|
||||
try:
|
||||
return jsonutils.dumps(data, ensure_ascii=True)
|
||||
except TypeError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("JSON serialization failed."))
|
||||
|
||||
|
||||
def _deserialize(data):
|
||||
"""Deserialization wrapper."""
|
||||
LOG.debug(_("Deserializing: %s"), data)
|
||||
return jsonutils.loads(data)
|
||||
|
||||
|
||||
class ZmqSocket(object):
|
||||
"""A tiny wrapper around ZeroMQ.
|
||||
|
||||
Simplifies the send/recv protocol and connection management.
|
||||
Can be used as a Context (supports the 'with' statement).
|
||||
"""
|
||||
|
||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
||||
self.sock = _get_ctxt().socket(zmq_type)
|
||||
self.addr = addr
|
||||
self.type = zmq_type
|
||||
self.subscriptions = []
|
||||
|
||||
# Support failures on sending/receiving on wrong socket type.
|
||||
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
|
||||
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
|
||||
self.can_sub = zmq_type in (zmq.SUB, )
|
||||
|
||||
# Support list, str, & None for subscribe arg (cast to list)
|
||||
do_sub = {
|
||||
list: subscribe,
|
||||
str: [subscribe],
|
||||
type(None): []
|
||||
}[type(subscribe)]
|
||||
|
||||
for f in do_sub:
|
||||
self.subscribe(f)
|
||||
|
||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||
'subscribe': subscribe, 'bind': bind}
|
||||
|
||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
||||
|
||||
try:
|
||||
if bind:
|
||||
self.sock.bind(addr)
|
||||
else:
|
||||
self.sock.connect(addr)
|
||||
except Exception:
|
||||
raise RPCException(_("Could not open socket."))
|
||||
|
||||
def socket_s(self):
|
||||
"""Get socket type as string."""
|
||||
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
|
||||
'DEALER')
|
||||
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
|
||||
|
||||
def subscribe(self, msg_filter):
|
||||
"""Subscribe."""
|
||||
if not self.can_sub:
|
||||
raise RPCException("Cannot subscribe on this socket.")
|
||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
||||
|
||||
try:
|
||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
self.subscriptions.append(msg_filter)
|
||||
|
||||
def unsubscribe(self, msg_filter):
|
||||
"""Unsubscribe."""
|
||||
if msg_filter not in self.subscriptions:
|
||||
return
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
|
||||
self.subscriptions.remove(msg_filter)
|
||||
|
||||
def close(self):
|
||||
if self.sock is None or self.sock.closed:
|
||||
return
|
||||
|
||||
# We must unsubscribe, or we'll leak descriptors.
|
||||
if self.subscriptions:
|
||||
for f in self.subscriptions:
|
||||
try:
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
||||
except Exception:
|
||||
pass
|
||||
self.subscriptions = []
|
||||
|
||||
try:
|
||||
# Default is to linger
|
||||
self.sock.close()
|
||||
except Exception:
|
||||
# While this is a bad thing to happen,
|
||||
# it would be much worse if some of the code calling this
|
||||
# were to fail. For now, lets log, and later evaluate
|
||||
# if we can safely raise here.
|
||||
LOG.error("ZeroMQ socket could not be closed.")
|
||||
self.sock = None
|
||||
|
||||
def recv(self, **kwargs):
|
||||
if not self.can_recv:
|
||||
raise RPCException(_("You cannot recv on this socket."))
|
||||
return self.sock.recv_multipart(**kwargs)
|
||||
|
||||
def send(self, data, **kwargs):
|
||||
if not self.can_send:
|
||||
raise RPCException(_("You cannot send on this socket."))
|
||||
self.sock.send_multipart(data, **kwargs)
|
||||
|
||||
|
||||
class ZmqClient(object):
|
||||
"""Client for ZMQ sockets."""
|
||||
|
||||
def __init__(self, addr):
|
||||
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
||||
|
||||
def cast(self, msg_id, topic, data, envelope):
|
||||
msg_id = msg_id or 0
|
||||
|
||||
if not envelope:
|
||||
self.outq.send(map(bytes,
|
||||
(msg_id, topic, 'cast', _serialize(data))))
|
||||
return
|
||||
|
||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
||||
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
|
||||
self.outq.send(map(bytes,
|
||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
||||
|
||||
def close(self):
|
||||
self.outq.close()
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call."""
|
||||
def __init__(self, **kwargs):
|
||||
self.replies = []
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
def deepcopy(self):
|
||||
values = self.to_dict()
|
||||
values['replies'] = self.replies
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False):
|
||||
if ending:
|
||||
return
|
||||
self.replies.append(reply)
|
||||
|
||||
@classmethod
|
||||
def marshal(self, ctx):
|
||||
ctx_data = ctx.to_dict()
|
||||
return _serialize(ctx_data)
|
||||
|
||||
@classmethod
|
||||
def unmarshal(self, data):
|
||||
return RpcContext.from_dict(_deserialize(data))
|
||||
|
||||
|
||||
class InternalContext(object):
|
||||
"""Used by ConsumerBase as a private context for - methods."""
|
||||
|
||||
def __init__(self, proxy):
|
||||
self.proxy = proxy
|
||||
self.msg_waiter = None
|
||||
|
||||
def _get_response(self, ctx, proxy, topic, data):
|
||||
"""Process a curried message and cast the result to topic."""
|
||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
|
||||
try:
|
||||
result = proxy.dispatch(
|
||||
ctx, data['version'], data['method'],
|
||||
data.get('namespace'), **data['args'])
|
||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||
except greenlet.GreenletExit:
|
||||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||
e._exc_info[1])
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(e._exc_info,
|
||||
log_failure=False)}
|
||||
except Exception:
|
||||
LOG.error(_("Exception during message handling"))
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
||||
|
||||
def reply(self, ctx, proxy,
|
||||
msg_id=None, context=None, topic=None, msg=None):
|
||||
"""Reply to a casted call."""
|
||||
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
||||
# this may be able to be removed earlier than
|
||||
# 'I' if ConsumerBase.process were refactored.
|
||||
if type(msg) is list:
|
||||
payload = msg[-1]
|
||||
else:
|
||||
payload = msg
|
||||
|
||||
response = ConsumerBase.normalize_reply(
|
||||
self._get_response(ctx, proxy, topic, payload),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
_multi_send(_cast, ctx, topic, {
|
||||
'method': '-process_reply',
|
||||
'args': {
|
||||
'msg_id': msg_id, # Include for Folsom compat.
|
||||
'response': response
|
||||
}
|
||||
}, _msg_id=msg_id)
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Base Consumer."""
|
||||
|
||||
def __init__(self):
|
||||
self.private_ctx = InternalContext(None)
|
||||
|
||||
@classmethod
|
||||
def normalize_reply(self, result, replies):
|
||||
#TODO(ewindisch): re-evaluate and document this method.
|
||||
if isinstance(result, types.GeneratorType):
|
||||
return list(result)
|
||||
elif replies:
|
||||
return replies
|
||||
else:
|
||||
return [result]
|
||||
|
||||
def process(self, proxy, ctx, data):
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
|
||||
# Method starting with - are
|
||||
# processed internally. (non-valid method name)
|
||||
method = data.get('method')
|
||||
if not method:
|
||||
LOG.error(_("RPC message did not include method."))
|
||||
return
|
||||
|
||||
# Internal method
|
||||
# uses internal context for safety.
|
||||
if method == '-reply':
|
||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
||||
return
|
||||
|
||||
proxy.dispatch(ctx, data['version'],
|
||||
data['method'], data.get('namespace'), **data['args'])
|
||||
|
||||
|
||||
class ZmqBaseReactor(ConsumerBase):
|
||||
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
||||
|
||||
Used for RoundRobin requests.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqBaseReactor, self).__init__()
|
||||
|
||||
self.proxies = {}
|
||||
self.threads = []
|
||||
self.sockets = []
|
||||
self.subscribe = {}
|
||||
|
||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
|
||||
def register(self, proxy, in_addr, zmq_type_in,
|
||||
in_bind=True, subscribe=None):
|
||||
|
||||
LOG.info(_("Registering reactor"))
|
||||
|
||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
||||
raise RPCException("Bad input socktype")
|
||||
|
||||
# Items push in.
|
||||
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
|
||||
subscribe=subscribe)
|
||||
|
||||
self.proxies[inq] = proxy
|
||||
self.sockets.append(inq)
|
||||
|
||||
LOG.info(_("In reactor registered"))
|
||||
|
||||
def consume_in_thread(self):
|
||||
def _consume(sock):
|
||||
LOG.info(_("Consuming socket"))
|
||||
while True:
|
||||
self.consume(sock)
|
||||
|
||||
for k in self.proxies.keys():
|
||||
self.threads.append(
|
||||
self.pool.spawn(_consume, k)
|
||||
)
|
||||
|
||||
def wait(self):
|
||||
for t in self.threads:
|
||||
t.wait()
|
||||
|
||||
def close(self):
|
||||
for s in self.sockets:
|
||||
s.close()
|
||||
|
||||
for t in self.threads:
|
||||
t.kill()
|
||||
|
||||
|
||||
class ZmqProxy(ZmqBaseReactor):
|
||||
"""A consumer class implementing a topic-based proxy.
|
||||
|
||||
Forwards to IPC sockets.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqProxy, self).__init__(conf)
|
||||
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
|
||||
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
|
||||
|
||||
self.topic_proxy = {}
|
||||
|
||||
def consume(self, sock):
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
data = sock.recv(copy=False)
|
||||
topic = data[1].bytes
|
||||
|
||||
if topic.startswith('fanout~'):
|
||||
sock_type = zmq.PUB
|
||||
topic = topic.split('.', 1)[0]
|
||||
elif topic.startswith('zmq_replies'):
|
||||
sock_type = zmq.PUB
|
||||
else:
|
||||
sock_type = zmq.PUSH
|
||||
|
||||
if topic not in self.topic_proxy:
|
||||
def publisher(waiter):
|
||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
||||
|
||||
try:
|
||||
# The topic is received over the network,
|
||||
# don't trust this input.
|
||||
if self.badchars.search(topic) is not None:
|
||||
emsg = _("Topic contained dangerous characters.")
|
||||
LOG.warn(emsg)
|
||||
raise RPCException(emsg)
|
||||
|
||||
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
|
||||
(ipc_dir, topic),
|
||||
sock_type, bind=True)
|
||||
except RPCException:
|
||||
waiter.send_exception(*sys.exc_info())
|
||||
return
|
||||
|
||||
self.topic_proxy[topic] = eventlet.queue.LightQueue(
|
||||
CONF.rpc_zmq_topic_backlog)
|
||||
self.sockets.append(out_sock)
|
||||
|
||||
# It takes some time for a pub socket to open,
|
||||
# before we can have any faith in doing a send() to it.
|
||||
if sock_type == zmq.PUB:
|
||||
eventlet.sleep(.5)
|
||||
|
||||
waiter.send(True)
|
||||
|
||||
while(True):
|
||||
data = self.topic_proxy[topic].get()
|
||||
out_sock.send(data, copy=False)
|
||||
|
||||
wait_sock_creation = eventlet.event.Event()
|
||||
eventlet.spawn(publisher, wait_sock_creation)
|
||||
|
||||
try:
|
||||
wait_sock_creation.wait()
|
||||
except RPCException:
|
||||
LOG.error(_("Topic socket file creation failed."))
|
||||
return
|
||||
|
||||
try:
|
||||
self.topic_proxy[topic].put_nowait(data)
|
||||
except eventlet.queue.Full:
|
||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Runs the ZmqProxy service."""
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
consume_in = "tcp://%s:%s" % \
|
||||
(CONF.rpc_zmq_bind_address,
|
||||
CONF.rpc_zmq_port)
|
||||
consumption_proxy = InternalContext(None)
|
||||
|
||||
try:
|
||||
os.makedirs(ipc_dir)
|
||||
except os.error:
|
||||
if not os.path.isdir(ipc_dir):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Required IPC directory does not exist at"
|
||||
" %s") % (ipc_dir, ))
|
||||
try:
|
||||
self.register(consumption_proxy,
|
||||
consume_in,
|
||||
zmq.PULL)
|
||||
except zmq.ZMQError:
|
||||
if os.access(ipc_dir, os.X_OK):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Permission denied to IPC directory at"
|
||||
" %s") % (ipc_dir, ))
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
||||
"Socket may already be in use."))
|
||||
|
||||
super(ZmqProxy, self).consume_in_thread()
|
||||
|
||||
|
||||
def unflatten_envelope(packenv):
|
||||
"""Unflattens the RPC envelope.
|
||||
|
||||
Takes a list and returns a dictionary.
|
||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||
"""
|
||||
i = iter(packenv)
|
||||
h = {}
|
||||
try:
|
||||
while True:
|
||||
k = i.next()
|
||||
h[k] = i.next()
|
||||
except StopIteration:
|
||||
return h
|
||||
|
||||
|
||||
class ZmqReactor(ZmqBaseReactor):
|
||||
"""A consumer class implementing a consumer for messages.
|
||||
|
||||
Can also be used as a 1:1 proxy
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqReactor, self).__init__(conf)
|
||||
|
||||
def consume(self, sock):
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
if data[2] == 'cast': # Legacy protocol
|
||||
packenv = data[3]
|
||||
|
||||
ctx, msg = _deserialize(packenv)
|
||||
request = rpc_common.deserialize_msg(msg)
|
||||
ctx = RpcContext.unmarshal(ctx)
|
||||
elif data[2] == 'impl_zmq_v2':
|
||||
packenv = data[4:]
|
||||
|
||||
msg = unflatten_envelope(packenv)
|
||||
request = rpc_common.deserialize_msg(msg)
|
||||
|
||||
# Unmarshal only after verifying the message.
|
||||
ctx = RpcContext.unmarshal(data[3])
|
||||
else:
|
||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
||||
return
|
||||
|
||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
||||
|
||||
|
||||
class Connection(rpc_common.Connection):
|
||||
"""Manages connections and threads."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.topics = []
|
||||
self.reactor = ZmqReactor(conf)
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
# Register with matchmaker.
|
||||
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
|
||||
|
||||
# Subscription scenarios
|
||||
if fanout:
|
||||
sock_type = zmq.SUB
|
||||
subscribe = ('', fanout)[type(fanout) == str]
|
||||
topic = 'fanout~' + topic.split('.', 1)[0]
|
||||
else:
|
||||
sock_type = zmq.PULL
|
||||
subscribe = None
|
||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
||||
|
||||
if topic in self.topics:
|
||||
LOG.info(_("Skipping topic registration. Already registered."))
|
||||
return
|
||||
|
||||
# Receive messages from (local) proxy
|
||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||
(CONF.rpc_zmq_ipc_dir, topic)
|
||||
|
||||
LOG.debug(_("Consumer is a zmq.%s"),
|
||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||
|
||||
self.reactor.register(proxy, inaddr, sock_type,
|
||||
subscribe=subscribe, in_bind=False)
|
||||
self.topics.append(topic)
|
||||
|
||||
def close(self):
|
||||
_get_matchmaker().stop_heartbeat()
|
||||
for topic in self.topics:
|
||||
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
|
||||
|
||||
self.reactor.close()
|
||||
self.topics = []
|
||||
|
||||
def wait(self):
|
||||
self.reactor.wait()
|
||||
|
||||
def consume_in_thread(self):
|
||||
_get_matchmaker().start_heartbeat()
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
|
||||
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
|
||||
_msg_id=None):
|
||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
||||
payload = [RpcContext.marshal(context), msg]
|
||||
|
||||
with Timeout(timeout_cast, exception=rpc_common.Timeout):
|
||||
try:
|
||||
conn = ZmqClient(addr)
|
||||
|
||||
# assumes cast can't return an exception
|
||||
conn.cast(_msg_id, topic, payload, envelope)
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
||||
finally:
|
||||
if 'conn' in vars():
|
||||
conn.close()
|
||||
|
||||
|
||||
def _call(addr, context, topic, msg, timeout=None,
|
||||
envelope=False):
|
||||
# timeout_response is how long we wait for a response
|
||||
timeout = timeout or CONF.rpc_response_timeout
|
||||
|
||||
# The msg_id is used to track replies.
|
||||
msg_id = uuid.uuid4().hex
|
||||
|
||||
# Replies always come into the reply service.
|
||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||
|
||||
LOG.debug(_("Creating payload"))
|
||||
# Curry the original request into a reply method.
|
||||
mcontext = RpcContext.marshal(context)
|
||||
payload = {
|
||||
'method': '-reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'topic': reply_topic,
|
||||
# TODO(ewindisch): safe to remove mcontext in I.
|
||||
'msg': [mcontext, msg]
|
||||
}
|
||||
}
|
||||
|
||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
||||
|
||||
# Messages arriving async.
|
||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
||||
try:
|
||||
msg_waiter = ZmqSocket(
|
||||
"ipc://%s/zmq_topic_zmq_replies.%s" %
|
||||
(CONF.rpc_zmq_ipc_dir,
|
||||
CONF.rpc_zmq_host),
|
||||
zmq.SUB, subscribe=msg_id, bind=False
|
||||
)
|
||||
|
||||
LOG.debug(_("Sending cast"))
|
||||
_cast(addr, context, topic, payload, envelope)
|
||||
|
||||
LOG.debug(_("Cast sent; Waiting reply"))
|
||||
# Blocks until receives reply
|
||||
msg = msg_waiter.recv()
|
||||
LOG.debug(_("Received message: %s"), msg)
|
||||
LOG.debug(_("Unpacking response"))
|
||||
|
||||
if msg[2] == 'cast': # Legacy version
|
||||
raw_msg = _deserialize(msg[-1])[-1]
|
||||
elif msg[2] == 'impl_zmq_v2':
|
||||
rpc_envelope = unflatten_envelope(msg[4:])
|
||||
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
|
||||
else:
|
||||
raise rpc_common.UnsupportedRpcEnvelopeVersion(
|
||||
_("Unsupported or unknown ZMQ envelope returned."))
|
||||
|
||||
responses = raw_msg['args']['response']
|
||||
# ZMQError trumps the Timeout error.
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("ZMQ Socket Error")
|
||||
except (IndexError, KeyError):
|
||||
raise RPCException(_("RPC Message Invalid."))
|
||||
finally:
|
||||
if 'msg_waiter' in vars():
|
||||
msg_waiter.close()
|
||||
|
||||
# It seems we don't need to do all of the following,
|
||||
# but perhaps it would be useful for multicall?
|
||||
# One effect of this is that we're checking all
|
||||
# responses for Exceptions.
|
||||
for resp in responses:
|
||||
if isinstance(resp, types.DictType) and 'exc' in resp:
|
||||
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
|
||||
|
||||
return responses[-1]
|
||||
|
||||
|
||||
def _multi_send(method, context, topic, msg, timeout=None,
|
||||
envelope=False, _msg_id=None):
|
||||
"""Wraps the sending of messages.
|
||||
|
||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||
"""
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
|
||||
queues = _get_matchmaker().queues(topic)
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
if not queues:
|
||||
LOG.warn(_("No matchmaker results. Not casting."))
|
||||
# While not strictly a timeout, callers know how to handle
|
||||
# this exception and a timeout isn't too big a lie.
|
||||
raise rpc_common.Timeout(_("No match from matchmaker."))
|
||||
|
||||
# This supports brokerless fanout (addresses > 1)
|
||||
for queue in queues:
|
||||
(_topic, ip_addr) = queue
|
||||
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
|
||||
|
||||
if method.__name__ == '_cast':
|
||||
eventlet.spawn_n(method, _addr, context,
|
||||
_topic, msg, timeout, envelope,
|
||||
_msg_id)
|
||||
return
|
||||
return method(_addr, context, _topic, msg, timeout,
|
||||
envelope)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
return Connection(conf)
|
||||
|
||||
|
||||
def multicall(conf, *args, **kwargs):
|
||||
"""Multiple calls."""
|
||||
return _multi_send(_call, *args, **kwargs)
|
||||
|
||||
|
||||
def call(conf, *args, **kwargs):
|
||||
"""Send a message, expect a response."""
|
||||
data = _multi_send(_call, *args, **kwargs)
|
||||
return data[-1]
|
||||
|
||||
|
||||
def cast(conf, *args, **kwargs):
|
||||
"""Send a message expecting no reply."""
|
||||
_multi_send(_cast, *args, **kwargs)
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, **kwargs):
|
||||
"""Send a message to all listening and expect no reply."""
|
||||
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
|
||||
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
|
||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Send notification event.
|
||||
|
||||
Notifications are sent to topic-priority.
|
||||
This differs from the AMQP drivers which send to topic.priority.
|
||||
"""
|
||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
||||
# work with our assumptions.
|
||||
topic = topic.replace('.', '-')
|
||||
cast(conf, context, topic, msg, envelope=envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resources in use by implementation."""
|
||||
global ZMQ_CTX
|
||||
if ZMQ_CTX:
|
||||
ZMQ_CTX.term()
|
||||
ZMQ_CTX = None
|
||||
|
||||
global matchmaker
|
||||
matchmaker = None
|
||||
|
||||
|
||||
def _get_ctxt():
|
||||
if not zmq:
|
||||
raise ImportError("Failed to import eventlet.green.zmq")
|
||||
|
||||
global ZMQ_CTX
|
||||
if not ZMQ_CTX:
|
||||
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
|
||||
return ZMQ_CTX
|
||||
|
||||
|
||||
def _get_matchmaker(*args, **kwargs):
|
||||
global matchmaker
|
||||
if not matchmaker:
|
||||
mm = CONF.rpc_zmq_matchmaker
|
||||
if mm.endswith('matchmaker.MatchMakerRing'):
|
||||
mm.replace('matchmaker', 'matchmaker_ring')
|
||||
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
|
||||
' %(new)s instead') % dict(
|
||||
orig=CONF.rpc_zmq_matchmaker, new=mm))
|
||||
matchmaker = importutils.import_object(mm, *args, **kwargs)
|
||||
return matchmaker
|
@ -1,330 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
||||
default=300,
|
||||
help='Heartbeat frequency'),
|
||||
cfg.IntOpt('matchmaker_heartbeat_ttl',
|
||||
default=600,
|
||||
help='Heartbeat time-to-live.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(matchmaker_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
contextmanager = contextlib.contextmanager
|
||||
|
||||
|
||||
class MatchMakerException(Exception):
|
||||
"""Signified a match could not be found."""
|
||||
message = _("Match not found by MatchMaker.")
|
||||
|
||||
|
||||
class Exchange(object):
|
||||
"""Implements lookups.
|
||||
|
||||
Subclass this to support hashtables, dns, etc.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Binding(object):
|
||||
"""A binding on which to perform a lookup."""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def test(self, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class MatchMakerBase(object):
|
||||
"""Match Maker Base Class.
|
||||
|
||||
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
||||
MatchMaker.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||
self.bindings = []
|
||||
|
||||
self.no_heartbeat_msg = _('Matchmaker does not implement '
|
||||
'registration or heartbeat.')
|
||||
|
||||
def register(self, key, host):
|
||||
"""Register a host on a backend.
|
||||
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
pass
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""Acknowledge that a key.host is alive.
|
||||
|
||||
Used internally for updating heartbeats, but may also be used
|
||||
publically to acknowledge a system is alive (i.e. rpc message
|
||||
successfully sent to host)
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_alive(self, topic, host):
|
||||
"""Checks if a host is alive."""
|
||||
pass
|
||||
|
||||
def expire(self, topic, host):
|
||||
"""Explicitly expire a host's registration."""
|
||||
pass
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""Send all heartbeats.
|
||||
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
pass
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""Unregister a topic."""
|
||||
pass
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""Spawn heartbeat greenthread."""
|
||||
pass
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""Destroys the heartbeat greenthread."""
|
||||
pass
|
||||
|
||||
def add_binding(self, binding, rule, last=True):
|
||||
self.bindings.append((binding, rule, False, last))
|
||||
|
||||
#NOTE(ewindisch): kept the following method in case we implement the
|
||||
# underlying support.
|
||||
#def add_negate_binding(self, binding, rule, last=True):
|
||||
# self.bindings.append((binding, rule, True, last))
|
||||
|
||||
def queues(self, key):
|
||||
workers = []
|
||||
|
||||
# bit is for negate bindings - if we choose to implement it.
|
||||
# last stops processing rules if this matches.
|
||||
for (binding, exchange, bit, last) in self.bindings:
|
||||
if binding.test(key):
|
||||
workers.extend(exchange.run(key))
|
||||
|
||||
# Support last.
|
||||
if last:
|
||||
return workers
|
||||
return workers
|
||||
|
||||
|
||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
"""Base for a heart-beat capable MatchMaker.
|
||||
|
||||
Provides common methods for registering, unregistering, and maintaining
|
||||
heartbeats.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.hosts = set()
|
||||
self._heart = None
|
||||
self.host_topic = {}
|
||||
|
||||
super(HeartbeatMatchMakerBase, self).__init__()
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""Send all heartbeats.
|
||||
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
for key, host in self.host_topic:
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""Acknowledge that a host.topic is alive.
|
||||
|
||||
Used internally for updating heartbeats, but may also be used
|
||||
publically to acknowledge a system is alive (i.e. rpc message
|
||||
successfully sent to host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement ack_alive")
|
||||
|
||||
def backend_register(self, key, host):
|
||||
"""Implements registration logic.
|
||||
|
||||
Called by register(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_register")
|
||||
|
||||
def backend_unregister(self, key, key_host):
|
||||
"""Implements de-registration logic.
|
||||
|
||||
Called by unregister(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_unregister")
|
||||
|
||||
def register(self, key, host):
|
||||
"""Register a host on a backend.
|
||||
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
self.hosts.add(host)
|
||||
self.host_topic[(key, host)] = host
|
||||
key_host = '.'.join((key, host))
|
||||
|
||||
self.backend_register(key, key_host)
|
||||
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""Unregister a topic."""
|
||||
if (key, host) in self.host_topic:
|
||||
del self.host_topic[(key, host)]
|
||||
|
||||
self.hosts.discard(host)
|
||||
self.backend_unregister(key, '.'.join((key, host)))
|
||||
|
||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||
{'key': key, 'host': host})
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""Implementation of MatchMakerBase.start_heartbeat.
|
||||
|
||||
Launches greenthread looping send_heartbeats(),
|
||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
||||
between iterations.
|
||||
"""
|
||||
if not self.hosts:
|
||||
raise MatchMakerException(
|
||||
_("Register before starting heartbeat."))
|
||||
|
||||
def do_heartbeat():
|
||||
while True:
|
||||
self.send_heartbeats()
|
||||
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
|
||||
|
||||
self._heart = eventlet.spawn(do_heartbeat)
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""Destroys the heartbeat greenthread."""
|
||||
if self._heart:
|
||||
self._heart.kill()
|
||||
|
||||
|
||||
class DirectBinding(Binding):
|
||||
"""Specifies a host in the key via a '.' character.
|
||||
|
||||
Although dots are used in the key, the behavior here is
|
||||
that it maps directly to a host, thus direct.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class TopicBinding(Binding):
|
||||
"""Where a 'bare' key without dots.
|
||||
|
||||
AMQP generally considers topic exchanges to be those *with* dots,
|
||||
but we deviate here in terminology as the behavior here matches
|
||||
that of a topic exchange (whereas where there are dots, behavior
|
||||
matches that of a direct exchange.
|
||||
"""
|
||||
def test(self, key):
|
||||
if '.' not in key:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FanoutBinding(Binding):
|
||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
||||
def test(self, key):
|
||||
if key.startswith('fanout~'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class StubExchange(Exchange):
|
||||
"""Exchange that does nothing."""
|
||||
def run(self, key):
|
||||
return [(key, None)]
|
||||
|
||||
|
||||
class LocalhostExchange(Exchange):
|
||||
"""Exchange where all direct topics are local."""
|
||||
def __init__(self, host='localhost'):
|
||||
self.host = host
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
return [('.'.join((key.split('.')[0], self.host)), self.host)]
|
||||
|
||||
|
||||
class DirectExchange(Exchange):
|
||||
"""Exchange where all topic keys are split, sending to second half.
|
||||
|
||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
e = key.split('.', 1)[1]
|
||||
return [(key, e)]
|
||||
|
||||
|
||||
class MatchMakerLocalhost(MatchMakerBase):
|
||||
"""Match Maker where all bare topics resolve to localhost.
|
||||
|
||||
Useful for testing.
|
||||
"""
|
||||
def __init__(self, host='localhost'):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
self.add_binding(FanoutBinding(), LocalhostExchange(host))
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), LocalhostExchange(host))
|
||||
|
||||
|
||||
class MatchMakerStub(MatchMakerBase):
|
||||
"""Match Maker where topics are untouched.
|
||||
|
||||
Useful for testing, or for AMQP/brokered queues.
|
||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerStub, self).__init__()
|
||||
|
||||
self.add_binding(FanoutBinding(), StubExchange())
|
||||
self.add_binding(DirectBinding(), StubExchange())
|
||||
self.add_binding(TopicBinding(), StubExchange())
|
@ -1,145 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import importutils
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.rpc import matchmaker as mm_common
|
||||
|
||||
redis = importutils.try_import('redis')
|
||||
|
||||
|
||||
matchmaker_redis_opts = [
|
||||
cfg.StrOpt('host',
|
||||
default='127.0.0.1',
|
||||
help='Host to locate redis'),
|
||||
cfg.IntOpt('port',
|
||||
default=6379,
|
||||
help='Use this port to connect to redis host.'),
|
||||
cfg.StrOpt('password',
|
||||
default=None,
|
||||
help='Password for Redis server. (optional)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
opt_group = cfg.OptGroup(name='matchmaker_redis',
|
||||
title='Options for Redis-based MatchMaker')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(matchmaker_redis_opts, opt_group)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisExchange(mm_common.Exchange):
|
||||
def __init__(self, matchmaker):
|
||||
self.matchmaker = matchmaker
|
||||
self.redis = matchmaker.redis
|
||||
super(RedisExchange, self).__init__()
|
||||
|
||||
|
||||
class RedisTopicExchange(RedisExchange):
|
||||
"""Exchange where all topic keys are split, sending to second half.
|
||||
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
"""
|
||||
def run(self, topic):
|
||||
while True:
|
||||
member_name = self.redis.srandmember(topic)
|
||||
|
||||
if not member_name:
|
||||
# If this happens, there are no
|
||||
# longer any members.
|
||||
break
|
||||
|
||||
if not self.matchmaker.is_alive(topic, member_name):
|
||||
continue
|
||||
|
||||
host = member_name.split('.', 1)[1]
|
||||
return [(member_name, host)]
|
||||
return []
|
||||
|
||||
|
||||
class RedisFanoutExchange(RedisExchange):
|
||||
"""Return a list of all hosts."""
|
||||
def run(self, topic):
|
||||
topic = topic.split('~', 1)[1]
|
||||
hosts = self.redis.smembers(topic)
|
||||
good_hosts = filter(
|
||||
lambda host: self.matchmaker.is_alive(topic, host), hosts)
|
||||
|
||||
return [(x, x.split('.', 1)[1]) for x in good_hosts]
|
||||
|
||||
|
||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
||||
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
||||
def __init__(self):
|
||||
super(MatchMakerRedis, self).__init__()
|
||||
|
||||
if not redis:
|
||||
raise ImportError("Failed to import module redis.")
|
||||
|
||||
self.redis = redis.StrictRedis(
|
||||
host=CONF.matchmaker_redis.host,
|
||||
port=CONF.matchmaker_redis.port,
|
||||
password=CONF.matchmaker_redis.password)
|
||||
|
||||
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
|
||||
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
|
||||
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
topic = "%s.%s" % (key, host)
|
||||
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
|
||||
# If we could not update the expiration, the key
|
||||
# might have been pruned. Re-register, creating a new
|
||||
# key in Redis.
|
||||
self.register(self.topic_host[host], host)
|
||||
|
||||
def is_alive(self, topic, host):
|
||||
if self.redis.ttl(host) == -1:
|
||||
self.expire(topic, host)
|
||||
return False
|
||||
return True
|
||||
|
||||
def expire(self, topic, host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.delete(host)
|
||||
pipe.srem(topic, host)
|
||||
pipe.execute()
|
||||
|
||||
def backend_register(self, key, key_host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.sadd(key, key_host)
|
||||
|
||||
# No value is needed, we just
|
||||
# care if it exists. Sets aren't viable
|
||||
# because only keys can expire.
|
||||
pipe.set(key_host, '')
|
||||
|
||||
pipe.execute()
|
||||
|
||||
def backend_unregister(self, key, key_host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.srem(key, key_host)
|
||||
pipe.delete(key_host)
|
||||
pipe.execute()
|
@ -1,110 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.rpc import matchmaker as mm
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
||||
cfg.StrOpt('ringfile',
|
||||
deprecated_name='matchmaker_ringfile',
|
||||
deprecated_group='DEFAULT',
|
||||
default='/etc/oslo/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RingExchange(mm.Exchange):
|
||||
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
||||
|
||||
__init__ takes optional ring dictionary argument, otherwise
|
||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(RingExchange, self).__init__()
|
||||
|
||||
if ring:
|
||||
self.ring = ring
|
||||
else:
|
||||
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
||||
self.ring = json.load(fh)
|
||||
fh.close()
|
||||
|
||||
self.ring0 = {}
|
||||
for k in self.ring.keys():
|
||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||
|
||||
def _ring_has(self, key):
|
||||
if key in self.ring0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RoundRobinRingExchange(RingExchange):
|
||||
"""A Topic Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(RoundRobinRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
if not self._ring_has(key):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (key, )
|
||||
)
|
||||
return []
|
||||
host = next(self.ring0[key])
|
||||
return [(key + '.' + host, host)]
|
||||
|
||||
|
||||
class FanoutRingExchange(RingExchange):
|
||||
"""Fanout Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(FanoutRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
# Assume starts with "fanout~", strip it for lookup.
|
||||
nkey = key.split('fanout~')[1:][0]
|
||||
if not self._ring_has(nkey):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (nkey, )
|
||||
)
|
||||
return []
|
||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||
|
||||
|
||||
class MatchMakerRing(mm.MatchMakerBase):
|
||||
"""Match Maker where hosts are loaded from a static hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(MatchMakerRing, self).__init__()
|
||||
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
||||
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
|
||||
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
|
@ -1,226 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A helper class for proxy objects to remote APIs.
|
||||
|
||||
For more information about rpc API version numbers, see:
|
||||
rpc/dispatcher.py
|
||||
"""
|
||||
|
||||
|
||||
from openstack.common import rpc
|
||||
from openstack.common.rpc import common as rpc_common
|
||||
from openstack.common.rpc import serializer as rpc_serializer
|
||||
|
||||
|
||||
class RpcProxy(object):
|
||||
"""A helper class for rpc clients.
|
||||
|
||||
This class is a wrapper around the RPC client API. It allows you to
|
||||
specify the topic and API version in a single place. This is intended to
|
||||
be used as a base class for a class that implements the client side of an
|
||||
rpc API.
|
||||
"""
|
||||
|
||||
# The default namespace, which can be overriden in a subclass.
|
||||
RPC_API_NAMESPACE = None
|
||||
|
||||
def __init__(self, topic, default_version, version_cap=None,
|
||||
serializer=None):
|
||||
"""Initialize an RpcProxy.
|
||||
|
||||
:param topic: The topic to use for all messages.
|
||||
:param default_version: The default API version to request in all
|
||||
outgoing messages. This can be overridden on a per-message
|
||||
basis.
|
||||
:param version_cap: Optionally cap the maximum version used for sent
|
||||
messages.
|
||||
:param serializer: Optionaly (de-)serialize entities with a
|
||||
provided helper.
|
||||
"""
|
||||
self.topic = topic
|
||||
self.default_version = default_version
|
||||
self.version_cap = version_cap
|
||||
if serializer is None:
|
||||
serializer = rpc_serializer.NoOpSerializer()
|
||||
self.serializer = serializer
|
||||
super(RpcProxy, self).__init__()
|
||||
|
||||
def _set_version(self, msg, vers):
|
||||
"""Helper method to set the version in a message.
|
||||
|
||||
:param msg: The message having a version added to it.
|
||||
:param vers: The version number to add to the message.
|
||||
"""
|
||||
v = vers if vers else self.default_version
|
||||
if (self.version_cap and not
|
||||
rpc_common.version_is_compatible(self.version_cap, v)):
|
||||
raise rpc_common.RpcVersionCapError(version=self.version_cap)
|
||||
msg['version'] = v
|
||||
|
||||
def _get_topic(self, topic):
|
||||
"""Return the topic to use for a message."""
|
||||
return topic if topic else self.topic
|
||||
|
||||
def can_send_version(self, version):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
return (not self.version_cap or
|
||||
rpc_common.version_is_compatible(self.version_cap, version))
|
||||
|
||||
@staticmethod
|
||||
def make_namespaced_msg(method, namespace, **kwargs):
|
||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
||||
|
||||
def make_msg(self, method, **kwargs):
|
||||
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
|
||||
**kwargs)
|
||||
|
||||
def _serialize_msg_args(self, context, kwargs):
|
||||
"""Helper method called to serialize message arguments.
|
||||
|
||||
This calls our serializer on each argument, returning a new
|
||||
set of args that have been serialized.
|
||||
|
||||
:param context: The request context
|
||||
:param kwargs: The arguments to serialize
|
||||
:returns: A new set of serialized arguments
|
||||
"""
|
||||
new_kwargs = dict()
|
||||
for argname, arg in kwargs.iteritems():
|
||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
||||
arg)
|
||||
return new_kwargs
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.call() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
|
||||
:returns: The return value from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
result = rpc.call(context, real_topic, msg, timeout)
|
||||
return self.serializer.deserialize_entity(context, result)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.multicall() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
|
||||
:returns: An iterator that lets you process each of the returned values
|
||||
from the remote method as they arrive.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
result = rpc.multicall(context, real_topic, msg, timeout)
|
||||
return self.serializer.deserialize_entity(context, result)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast() does not wait on any return value from the
|
||||
remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.fanout_cast() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast() does not wait on any return value
|
||||
from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
||||
version=None):
|
||||
"""rpc.fanout_cast_to_server() a remote method.
|
||||
|
||||
:param context: The request context
|
||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
||||
details.
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: None. rpc.fanout_cast_to_server() does not wait on any
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.fanout_cast_to_server(context, server_params,
|
||||
self._get_topic(topic), msg)
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Provides the definition of an RPC serialization handler"""
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""Generic (de-)serialization definition base class."""
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_entity(self, context, entity):
|
||||
"""Serialize something to primitive form.
|
||||
|
||||
:param context: Security context
|
||||
:param entity: Entity to be serialized
|
||||
:returns: Serialized form of entity
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_entity(self, context, entity):
|
||||
"""Deserialize something from primitive form.
|
||||
|
||||
:param context: Security context
|
||||
:param entity: Primitive to be deserialized
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoOpSerializer(Serializer):
|
||||
"""A serializer that does nothing."""
|
||||
|
||||
def serialize_entity(self, context, entity):
|
||||
return entity
|
||||
|
||||
def deserialize_entity(self, context, entity):
|
||||
return entity
|
@ -1,76 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2011 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import rpc
|
||||
from openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||
from openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service enables rpc by listening to queues based on topic and host.
|
||||
"""
|
||||
def __init__(self, host, topic, manager=None):
|
||||
super(Service, self).__init__()
|
||||
self.host = host
|
||||
self.topic = topic
|
||||
if manager is None:
|
||||
self.manager = self
|
||||
else:
|
||||
self.manager = manager
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
|
||||
self.conn = rpc.create_connection(new=True)
|
||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
||||
self.topic)
|
||||
|
||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
|
||||
|
||||
# Share this same connection for these Consumers
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
||||
|
||||
node_topic = '%s.%s' % (self.topic, self.host)
|
||||
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
|
||||
|
||||
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
|
||||
|
||||
# Hook to allow the manager to do other initializations after
|
||||
# the rpc connection is created.
|
||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
||||
self.manager.initialize_service_hook(self)
|
||||
|
||||
# Consume from all consumers in a thread
|
||||
self.conn.consume_in_thread()
|
||||
|
||||
def stop(self):
|
||||
# Try to shut the connection down, but if we get any sort of
|
||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||
try:
|
||||
self.conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
super(Service, self).stop()
|
@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import rpc
|
||||
from openstack.common.rpc import impl_zmq
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rpc.rpc_opts)
|
||||
CONF.register_opts(impl_zmq.zmq_opts)
|
||||
|
||||
|
||||
def main():
|
||||
CONF(sys.argv[1:], project='oslo')
|
||||
logging.setup("oslo")
|
||||
|
||||
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
||||
reactor.consume_in_thread()
|
||||
reactor.wait()
|
@ -1,53 +0,0 @@
|
||||
# Copyright (c) 2011-2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Filter support
|
||||
"""
|
||||
|
||||
from openstack.common.scheduler import base_handler
|
||||
|
||||
|
||||
class BaseFilter(object):
|
||||
"""Base class for all filter classes."""
|
||||
def _filter_one(self, obj, filter_properties):
|
||||
"""Return True if it passes the filter, False otherwise.
|
||||
Override this in a subclass.
|
||||
"""
|
||||
return True
|
||||
|
||||
def filter_all(self, filter_obj_list, filter_properties):
|
||||
"""Yield objects that pass the filter.
|
||||
|
||||
Can be overriden in a subclass, if you need to base filtering
|
||||
decisions on all objects. Otherwise, one can just override
|
||||
_filter_one() to filter a single object.
|
||||
"""
|
||||
for obj in filter_obj_list:
|
||||
if self._filter_one(obj, filter_properties):
|
||||
yield obj
|
||||
|
||||
|
||||
class BaseFilterHandler(base_handler.BaseHandler):
|
||||
"""Base class to handle loading filter classes.
|
||||
|
||||
This class should be subclassed where one needs to use filters.
|
||||
"""
|
||||
|
||||
def get_filtered_objects(self, filter_classes, objs,
|
||||
filter_properties):
|
||||
for filter_cls in filter_classes:
|
||||
objs = filter_cls().filter_all(objs, filter_properties)
|
||||
return list(objs)
|
@ -1,45 +0,0 @@
|
||||
# Copyright (c) 2011-2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
A common base for handling extension classes.
|
||||
|
||||
Used by BaseFilterHandler and BaseWeightHandler
|
||||
"""
|
||||
|
||||
import inspect
|
||||
|
||||
from stevedore import extension
|
||||
|
||||
|
||||
class BaseHandler(object):
|
||||
"""Base class to handle loading filter and weight classes."""
|
||||
def __init__(self, modifier_class_type, modifier_namespace):
|
||||
self.namespace = modifier_namespace
|
||||
self.modifier_class_type = modifier_class_type
|
||||
self.extension_manager = extension.ExtensionManager(modifier_namespace)
|
||||
|
||||
def _is_correct_class(self, cls):
|
||||
"""Return whether an object is a class of the correct type and
|
||||
is not prefixed with an underscore.
|
||||
"""
|
||||
return (inspect.isclass(cls) and
|
||||
not cls.__name__.startswith('_') and
|
||||
issubclass(cls, self.modifier_class_type))
|
||||
|
||||
def get_all_classes(self):
|
||||
# We use a set, as some classes may have an entrypoint of their own,
|
||||
# and also be returned by a function such as 'all_filters' for example
|
||||
return [ext.plugin for ext in self.extension_manager if
|
||||
self._is_correct_class(ext.plugin)]
|
@ -1,72 +0,0 @@
|
||||
# Copyright (c) 2011-2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Pluggable Weighing support
|
||||
"""
|
||||
|
||||
from openstack.common.scheduler import base_handler
|
||||
|
||||
|
||||
class WeighedObject(object):
|
||||
"""Object with weight information."""
|
||||
def __init__(self, obj, weight):
|
||||
self.obj = obj
|
||||
self.weight = weight
|
||||
|
||||
def __repr__(self):
|
||||
return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
|
||||
|
||||
|
||||
class BaseWeigher(object):
|
||||
"""Base class for pluggable weighers."""
|
||||
def _weight_multiplier(self):
|
||||
"""How weighted this weigher should be. Normally this would
|
||||
be overriden in a subclass based on a config value.
|
||||
"""
|
||||
return 1.0
|
||||
|
||||
def _weigh_object(self, obj, weight_properties):
|
||||
"""Override in a subclass to specify a weight for a specific
|
||||
object.
|
||||
"""
|
||||
return 0.0
|
||||
|
||||
def weigh_objects(self, weighed_obj_list, weight_properties):
|
||||
"""Weigh multiple objects. Override in a subclass if you need
|
||||
need access to all objects in order to manipulate weights.
|
||||
"""
|
||||
constant = self._weight_multiplier()
|
||||
for obj in weighed_obj_list:
|
||||
obj.weight += (constant *
|
||||
self._weigh_object(obj.obj, weight_properties))
|
||||
|
||||
|
||||
class BaseWeightHandler(base_handler.BaseHandler):
|
||||
object_class = WeighedObject
|
||||
|
||||
def get_weighed_objects(self, weigher_classes, obj_list,
|
||||
weighing_properties):
|
||||
"""Return a sorted (highest score first) list of WeighedObjects."""
|
||||
|
||||
if not obj_list:
|
||||
return []
|
||||
|
||||
weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
|
||||
for weigher_cls in weigher_classes:
|
||||
weigher = weigher_cls()
|
||||
weigher.weigh_objects(weighed_objs, weighing_properties)
|
||||
|
||||
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
|
@ -1,41 +0,0 @@
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Scheduler host filters
|
||||
"""
|
||||
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.scheduler import base_filter
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseHostFilter(base_filter.BaseFilter):
|
||||
"""Base class for host filters."""
|
||||
def _filter_one(self, obj, filter_properties):
|
||||
"""Return True if the object passes the filter, otherwise False."""
|
||||
return self.host_passes(obj, filter_properties)
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return True if the HostState passes the filter, otherwise False.
|
||||
Override this in a subclass.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class HostFilterHandler(base_filter.BaseFilterHandler):
|
||||
def __init__(self, namespace):
|
||||
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
|
@ -1,30 +0,0 @@
|
||||
# Copyright (c) 2011-2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from openstack.common.scheduler import filters
|
||||
|
||||
|
||||
class AvailabilityZoneFilter(filters.BaseHostFilter):
|
||||
"""Filters Hosts by availability zone."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
props = spec.get('resource_properties', [])
|
||||
availability_zone = props.get('availability_zone')
|
||||
|
||||
if availability_zone:
|
||||
return availability_zone == host_state.service['availability_zone']
|
||||
return True
|
@ -1,64 +0,0 @@
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from openstack.common import log as logging
|
||||
from openstack.common.scheduler import filters
|
||||
from openstack.common.scheduler.filters import extra_specs_ops
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapabilitiesFilter(filters.BaseHostFilter):
|
||||
"""HostFilter to work with resource (instance & volume) type records."""
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, resource_type):
|
||||
"""Check that the capabilities provided by the services satisfy
|
||||
the extra specs associated with the instance type.
|
||||
"""
|
||||
extra_specs = resource_type.get('extra_specs', [])
|
||||
if not extra_specs:
|
||||
return True
|
||||
|
||||
for key, req in extra_specs.iteritems():
|
||||
# Either not scope format, or in capabilities scope
|
||||
scope = key.split(':')
|
||||
if len(scope) > 1 and scope[0] != "capabilities":
|
||||
continue
|
||||
elif scope[0] == "capabilities":
|
||||
del scope[0]
|
||||
|
||||
cap = capabilities
|
||||
for index in range(0, len(scope)):
|
||||
try:
|
||||
cap = cap.get(scope[index], None)
|
||||
except AttributeError:
|
||||
return False
|
||||
if cap is None:
|
||||
return False
|
||||
if not extra_specs_ops.match(cap, req):
|
||||
return False
|
||||
return True
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
# Note(zhiteng) Currently only Cinder and Nova are using
|
||||
# this filter, so the resource type is either instance or
|
||||
# volume.
|
||||
resource_type = filter_properties.get('resource_type')
|
||||
if not self._satisfies_extra_specs(host_state.capabilities,
|
||||
resource_type):
|
||||
return False
|
||||
return True
|
@ -1,72 +0,0 @@
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
|
||||
from openstack.common import strutils
|
||||
|
||||
# 1. The following operations are supported:
|
||||
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <is>, <or>, ==, !=, >=, <=
|
||||
# 2. Note that <or> is handled in a different way below.
|
||||
# 3. If the first word in the extra_specs is not one of the operators,
|
||||
# it is ignored.
|
||||
_op_methods = {'=': lambda x, y: float(x) >= float(y),
|
||||
'<in>': lambda x, y: y in x,
|
||||
'<is>': lambda x, y: (strutils.bool_from_string(x) is
|
||||
strutils.bool_from_string(y)),
|
||||
'==': lambda x, y: float(x) == float(y),
|
||||
'!=': lambda x, y: float(x) != float(y),
|
||||
'>=': lambda x, y: float(x) >= float(y),
|
||||
'<=': lambda x, y: float(x) <= float(y),
|
||||
's==': operator.eq,
|
||||
's!=': operator.ne,
|
||||
's<': operator.lt,
|
||||
's<=': operator.le,
|
||||
's>': operator.gt,
|
||||
's>=': operator.ge}
|
||||
|
||||
|
||||
def match(value, req):
|
||||
words = req.split()
|
||||
|
||||
op = method = None
|
||||
if words:
|
||||
op = words.pop(0)
|
||||
method = _op_methods.get(op)
|
||||
|
||||
if op != '<or>' and not method:
|
||||
return value == req
|
||||
|
||||
if value is None:
|
||||
return False
|
||||
|
||||
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
|
||||
while True:
|
||||
if words.pop(0) == value:
|
||||
return True
|
||||
if not words:
|
||||
break
|
||||
op = words.pop(0) # remove a keyword <or>
|
||||
if not words:
|
||||
break
|
||||
return False
|
||||
|
||||
try:
|
||||
if words and method(value, words[0]):
|
||||
return True
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return False
|
@ -1,150 +0,0 @@
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import operator
|
||||
|
||||
from openstack.common import jsonutils
|
||||
from openstack.common.scheduler import filters
|
||||
|
||||
|
||||
class JsonFilter(filters.BaseHostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
def _op_compare(self, args, op):
|
||||
"""Returns True if the specified operator can successfully
|
||||
compare the first item in the args with all the rest. Will
|
||||
return False if only one item is in the list.
|
||||
"""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
if op is operator.contains:
|
||||
bad = args[0] not in args[1:]
|
||||
else:
|
||||
bad = [arg for arg in args[1:]
|
||||
if not op(args[0], arg)]
|
||||
return not bool(bad)
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
return self._op_compare(args, operator.eq)
|
||||
|
||||
def _less_than(self, args):
|
||||
"""First term is < all the other terms."""
|
||||
return self._op_compare(args, operator.lt)
|
||||
|
||||
def _greater_than(self, args):
|
||||
"""First term is > all the other terms."""
|
||||
return self._op_compare(args, operator.gt)
|
||||
|
||||
def _in(self, args):
|
||||
"""First term is in set of remaining terms."""
|
||||
return self._op_compare(args, operator.contains)
|
||||
|
||||
def _less_than_equal(self, args):
|
||||
"""First term is <= all the other terms."""
|
||||
return self._op_compare(args, operator.le)
|
||||
|
||||
def _greater_than_equal(self, args):
|
||||
"""First term is >= all the other terms."""
|
||||
return self._op_compare(args, operator.ge)
|
||||
|
||||
def _not(self, args):
|
||||
"""Flip each of the arguments."""
|
||||
return [not arg for arg in args]
|
||||
|
||||
def _or(self, args):
|
||||
"""True if any arg is True."""
|
||||
return any(args)
|
||||
|
||||
def _and(self, args):
|
||||
"""True if all args are True."""
|
||||
return all(args)
|
||||
|
||||
commands = {
|
||||
'=': _equals,
|
||||
'<': _less_than,
|
||||
'>': _greater_than,
|
||||
'in': _in,
|
||||
'<=': _less_than_equal,
|
||||
'>=': _greater_than_equal,
|
||||
'not': _not,
|
||||
'or': _or,
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def _parse_string(self, string, host_state):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$variable' where 'variable' is an attribute in the
|
||||
HostState class. If $variable is a dictionary, you may
|
||||
use: $variable.dictkey
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if not string.startswith("$"):
|
||||
return string
|
||||
|
||||
path = string[1:].split(".")
|
||||
obj = getattr(host_state, path[0], None)
|
||||
if obj is None:
|
||||
return None
|
||||
for item in path[1:]:
|
||||
obj = obj.get(item, None)
|
||||
if obj is None:
|
||||
return None
|
||||
return obj
|
||||
|
||||
def _process_filter(self, query, host_state):
|
||||
"""Recursively parse the query structure."""
|
||||
if not query:
|
||||
return True
|
||||
cmd = query[0]
|
||||
method = self.commands[cmd]
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(arg, host_state)
|
||||
elif isinstance(arg, basestring):
|
||||
arg = self._parse_string(arg, host_state)
|
||||
if arg is not None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can fulfill the requirements
|
||||
specified in the query.
|
||||
"""
|
||||
# TODO(zhiteng) Add description for filter_properties structure
|
||||
# and scheduler_hints.
|
||||
try:
|
||||
query = filter_properties['scheduler_hints']['query']
|
||||
except KeyError:
|
||||
query = None
|
||||
if not query:
|
||||
return True
|
||||
|
||||
# NOTE(comstud): Not checking capabilities or service for
|
||||
# enabled/disabled so that a provided json filter can decide
|
||||
|
||||
result = self._process_filter(jsonutils.loads(query), host_state)
|
||||
if isinstance(result, list):
|
||||
# If any succeeded, include the host
|
||||
result = any(result)
|
||||
if result:
|
||||
# Filter it out.
|
||||
return True
|
||||
return False
|
@ -1,45 +0,0 @@
|
||||
# Copyright (c) 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Scheduler host weights
|
||||
"""
|
||||
|
||||
|
||||
from openstack.common.scheduler import base_weight
|
||||
|
||||
|
||||
class WeighedHost(base_weight.WeighedObject):
|
||||
def to_dict(self):
|
||||
return {
|
||||
'weight': self.weight,
|
||||
'host': self.obj.host,
|
||||
}
|
||||
|
||||
def __repr__(self):
|
||||
return ("WeighedHost [host: %s, weight: %s]" %
|
||||
(self.obj.host, self.weight))
|
||||
|
||||
|
||||
class BaseHostWeigher(base_weight.BaseWeigher):
|
||||
"""Base class for host weights."""
|
||||
pass
|
||||
|
||||
|
||||
class HostWeightHandler(base_weight.BaseWeightHandler):
|
||||
object_class = WeighedHost
|
||||
|
||||
def __init__(self, namespace):
|
||||
super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace)
|
@ -1,333 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
import logging as std_logging
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common import eventlet_backdoor
|
||||
from openstack.common.gettextutils import _
|
||||
from openstack.common import importutils
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import threadgroup
|
||||
|
||||
|
||||
rpc = importutils.try_import('openstack.common.rpc')
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Launcher(object):
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services = threadgroup.ThreadGroup()
|
||||
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
@staticmethod
|
||||
def run_service(service):
|
||||
"""Start and wait for a service to finish.
|
||||
|
||||
:param service: service to run and wait for.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.start()
|
||||
service.wait()
|
||||
|
||||
def launch_service(self, service):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self._services.add_thread(self.run_service, service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Waits until all services have been stopped, and then returns.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services.wait()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super(SignalExit, self).__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
def _handle_signal(self, signo, frame):
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
raise SignalExit(signo)
|
||||
|
||||
def wait(self):
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
status = None
|
||||
try:
|
||||
super(ServiceLauncher, self).wait()
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
finally:
|
||||
if rpc:
|
||||
rpc.cleanup()
|
||||
self.stop()
|
||||
return status
|
||||
|
||||
|
||||
class ServiceWrapper(object):
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher(object):
|
||||
def __init__(self):
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
|
||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||
signal.signal(signal.SIGINT, self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read()
|
||||
|
||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process(self, service):
|
||||
# Setup child signal handlers differently
|
||||
def _sigterm(*args):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
raise SignalExit(signal.SIGTERM)
|
||||
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher()
|
||||
launcher.run_service(service)
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
status = 0
|
||||
try:
|
||||
self._child_process(wrap.service)
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
wrap.service.stop()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(.01)
|
||||
continue
|
||||
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
if self.sigcaught:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
||||
LOG.info(_('Caught %s, stopping children'), signame)
|
||||
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
self.tg.wait()
|
||||
|
||||
|
||||
def launch(service, workers=None):
|
||||
if workers:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
else:
|
||||
launcher = ServiceLauncher()
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
@ -1,80 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import ssl
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
|
||||
|
||||
ssl_opts = [
|
||||
cfg.StrOpt('ca_file',
|
||||
default=None,
|
||||
help="CA certificate file to use to verify "
|
||||
"connecting clients"),
|
||||
cfg.StrOpt('cert_file',
|
||||
default=None,
|
||||
help="Certificate file to use when starting "
|
||||
"the server securely"),
|
||||
cfg.StrOpt('key_file',
|
||||
default=None,
|
||||
help="Private key file to use when starting "
|
||||
"the server securely"),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ssl_opts, "ssl")
|
||||
|
||||
|
||||
def is_enabled():
|
||||
cert_file = CONF.ssl.cert_file
|
||||
key_file = CONF.ssl.key_file
|
||||
ca_file = CONF.ssl.ca_file
|
||||
use_ssl = cert_file or key_file
|
||||
|
||||
if cert_file and not os.path.exists(cert_file):
|
||||
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
|
||||
|
||||
if ca_file and not os.path.exists(ca_file):
|
||||
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
|
||||
|
||||
if key_file and not os.path.exists(key_file):
|
||||
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
|
||||
|
||||
if use_ssl and (not cert_file or not key_file):
|
||||
raise RuntimeError(_("When running server in SSL mode, you must "
|
||||
"specify both a cert_file and key_file "
|
||||
"option value in your configuration file"))
|
||||
|
||||
return use_ssl
|
||||
|
||||
|
||||
def wrap(sock):
|
||||
ssl_kwargs = {
|
||||
'server_side': True,
|
||||
'certfile': CONF.ssl.cert_file,
|
||||
'keyfile': CONF.ssl.key_file,
|
||||
'cert_reqs': ssl.CERT_NONE,
|
||||
}
|
||||
|
||||
if CONF.ssl.ca_file:
|
||||
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
|
||||
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
return ssl.wrap_socket(sock, **ssl_kwargs)
|
@ -1,218 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import unicodedata
|
||||
|
||||
import six
|
||||
|
||||
from openstack.common.gettextutils import _
|
||||
|
||||
|
||||
# Used for looking up extensions of text
|
||||
# to their 'multiplied' byte amount
|
||||
BYTE_MULTIPLIERS = {
|
||||
'': 1,
|
||||
't': 1024 ** 4,
|
||||
'g': 1024 ** 3,
|
||||
'm': 1024 ** 2,
|
||||
'k': 1024,
|
||||
}
|
||||
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
|
||||
|
||||
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||
|
||||
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
|
||||
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
|
||||
|
||||
|
||||
def int_from_bool_as_string(subject):
|
||||
"""Interpret a string as a boolean and return either 1 or 0.
|
||||
|
||||
Any string value in:
|
||||
|
||||
('True', 'true', 'On', 'on', '1')
|
||||
|
||||
is interpreted as a boolean True.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing
|
||||
"""
|
||||
return bool_from_string(subject) and 1 or 0
|
||||
|
||||
|
||||
def bool_from_string(subject, strict=False):
|
||||
"""Interpret a string as a boolean.
|
||||
|
||||
A case-insensitive match is performed such that strings matching 't',
|
||||
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
||||
`strict=False`, anything else is considered False.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing.
|
||||
|
||||
If `strict=True`, unrecognized values, including None, will raise a
|
||||
ValueError which is useful when parsing values passed in from an API call.
|
||||
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
|
||||
"""
|
||||
if not isinstance(subject, six.string_types):
|
||||
subject = str(subject)
|
||||
|
||||
lowered = subject.strip().lower()
|
||||
|
||||
if lowered in TRUE_STRINGS:
|
||||
return True
|
||||
elif lowered in FALSE_STRINGS:
|
||||
return False
|
||||
elif strict:
|
||||
acceptable = ', '.join(
|
||||
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
|
||||
msg = _("Unrecognized value '%(val)s', acceptable values are:"
|
||||
" %(acceptable)s") % {'val': subject,
|
||||
'acceptable': acceptable}
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def safe_decode(text, incoming=None, errors='strict'):
|
||||
"""Decodes incoming str using `incoming` if they're not already unicode.
|
||||
|
||||
:param incoming: Text's current encoding
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: text or a unicode `incoming` encoded
|
||||
representation of it.
|
||||
:raises TypeError: If text is not an isntance of str
|
||||
"""
|
||||
if not isinstance(text, six.string_types):
|
||||
raise TypeError("%s can't be decoded" % type(text))
|
||||
|
||||
if isinstance(text, six.text_type):
|
||||
return text
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
|
||||
try:
|
||||
return text.decode(incoming, errors)
|
||||
except UnicodeDecodeError:
|
||||
# Note(flaper87) If we get here, it means that
|
||||
# sys.stdin.encoding / sys.getdefaultencoding
|
||||
# didn't return a suitable encoding to decode
|
||||
# text. This happens mostly when global LANG
|
||||
# var is not set correctly and there's no
|
||||
# default encoding. In this case, most likely
|
||||
# python will use ASCII or ANSI encoders as
|
||||
# default encodings but they won't be capable
|
||||
# of decoding non-ASCII characters.
|
||||
#
|
||||
# Also, UTF-8 is being used since it's an ASCII
|
||||
# extension.
|
||||
return text.decode('utf-8', errors)
|
||||
|
||||
|
||||
def safe_encode(text, incoming=None,
|
||||
encoding='utf-8', errors='strict'):
|
||||
"""Encodes incoming str/unicode using `encoding`.
|
||||
|
||||
If incoming is not specified, text is expected to be encoded with
|
||||
current python's default encoding. (`sys.getdefaultencoding`)
|
||||
|
||||
:param incoming: Text's current encoding
|
||||
:param encoding: Expected encoding for text (Default UTF-8)
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: text or a bytestring `encoding` encoded
|
||||
representation of it.
|
||||
:raises TypeError: If text is not an isntance of str
|
||||
"""
|
||||
if not isinstance(text, six.string_types):
|
||||
raise TypeError("%s can't be encoded" % type(text))
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
|
||||
if isinstance(text, six.text_type):
|
||||
return text.encode(encoding, errors)
|
||||
elif text and encoding != incoming:
|
||||
# Decode text before encoding it with `encoding`
|
||||
text = safe_decode(text, incoming, errors)
|
||||
return text.encode(encoding, errors)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def to_bytes(text, default=0):
|
||||
"""Converts a string into an integer of bytes.
|
||||
|
||||
Looks at the last characters of the text to determine
|
||||
what conversion is needed to turn the input text into a byte number.
|
||||
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
|
||||
|
||||
:param text: String input for bytes size conversion.
|
||||
:param default: Default return value when text is blank.
|
||||
|
||||
"""
|
||||
match = BYTE_REGEX.search(text)
|
||||
if match:
|
||||
magnitude = int(match.group(1))
|
||||
mult_key_org = match.group(2)
|
||||
if not mult_key_org:
|
||||
return magnitude
|
||||
elif text:
|
||||
msg = _('Invalid string format: %s') % text
|
||||
raise TypeError(msg)
|
||||
else:
|
||||
return default
|
||||
mult_key = mult_key_org.lower().replace('b', '', 1)
|
||||
multiplier = BYTE_MULTIPLIERS.get(mult_key)
|
||||
if multiplier is None:
|
||||
msg = _('Unknown byte multiplier: %s') % mult_key_org
|
||||
raise TypeError(msg)
|
||||
return magnitude * multiplier
|
||||
|
||||
|
||||
def to_slug(value, incoming=None, errors="strict"):
|
||||
"""Normalize string.
|
||||
|
||||
Convert to lowercase, remove non-word characters, and convert spaces
|
||||
to hyphens.
|
||||
|
||||
Inspired by Django's `slugify` filter.
|
||||
|
||||
:param value: Text to slugify
|
||||
:param incoming: Text's current encoding
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: slugified unicode representation of `value`
|
||||
:raises TypeError: If text is not an instance of str
|
||||
"""
|
||||
value = safe_decode(value, incoming, errors)
|
||||
# NOTE(aababilov): no need to use safe_(encode|decode) here:
|
||||
# encodings are always "ascii", error handling is always "ignore"
|
||||
# and types are always known (first: unicode; second: str)
|
||||
value = unicodedata.normalize("NFKD", value).encode(
|
||||
"ascii", "ignore").decode("ascii")
|
||||
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
|
||||
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
@ -1,121 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from eventlet import greenlet
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
|
||||
from openstack.common import log as logging
|
||||
from openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
kwargs['group'].thread_done(kwargs['thread'])
|
||||
|
||||
|
||||
class Thread(object):
|
||||
"""Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group):
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
def stop(self):
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
"""The point of the ThreadGroup classis to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, thread_pool_size=10):
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
self.timers.append(timer)
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def stop(self):
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
self.timers = []
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
current = greenthread.getcurrent()
|
||||
for x in self.threads:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
x.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
@ -1,187 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Time related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
|
||||
import iso8601
|
||||
|
||||
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
"""Parse time from ISO 8601 format."""
|
||||
try:
|
||||
return iso8601.parse_date(timestr)
|
||||
except iso8601.ParseError as e:
|
||||
raise ValueError(e.message)
|
||||
except TypeError as e:
|
||||
raise ValueError(e.message)
|
||||
|
||||
|
||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Returns formatted utcnow."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
return at.strftime(fmt)
|
||||
|
||||
|
||||
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Turn a formatted time back into a datetime."""
|
||||
return datetime.datetime.strptime(timestr, fmt)
|
||||
|
||||
|
||||
def normalize_time(timestamp):
|
||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||
offset = timestamp.utcoffset()
|
||||
if offset is None:
|
||||
return timestamp
|
||||
return timestamp.replace(tzinfo=None) - offset
|
||||
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, basestring):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, basestring):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def utcnow_ts():
|
||||
"""Timestamp version of our utcnow function."""
|
||||
return calendar.timegm(utcnow().timetuple())
|
||||
|
||||
|
||||
def utcnow():
|
||||
"""Overridable version of utils.utcnow."""
|
||||
if utcnow.override_time:
|
||||
try:
|
||||
return utcnow.override_time.pop(0)
|
||||
except AttributeError:
|
||||
return utcnow.override_time
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp."""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
||||
"""Overrides utils.utcnow.
|
||||
|
||||
Make it return a constant time or a list thereof, one at a time.
|
||||
"""
|
||||
utcnow.override_time = override_time
|
||||
|
||||
|
||||
def advance_time_delta(timedelta):
|
||||
"""Advance overridden time using a datetime.timedelta."""
|
||||
assert(not utcnow.override_time is None)
|
||||
try:
|
||||
for dt in utcnow.override_time:
|
||||
dt += timedelta
|
||||
except TypeError:
|
||||
utcnow.override_time += timedelta
|
||||
|
||||
|
||||
def advance_time_seconds(seconds):
|
||||
"""Advance overridden time by seconds."""
|
||||
advance_time_delta(datetime.timedelta(0, seconds))
|
||||
|
||||
|
||||
def clear_time_override():
|
||||
"""Remove the overridden time."""
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def marshall_now(now=None):
|
||||
"""Make an rpc-safe datetime with microseconds.
|
||||
|
||||
Note: tzinfo is stripped, but not required for relative times.
|
||||
"""
|
||||
if not now:
|
||||
now = utcnow()
|
||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||
minute=now.minute, second=now.second,
|
||||
microsecond=now.microsecond)
|
||||
|
||||
|
||||
def unmarshall_time(tyme):
|
||||
"""Unmarshall a datetime dict."""
|
||||
return datetime.datetime(day=tyme['day'],
|
||||
month=tyme['month'],
|
||||
year=tyme['year'],
|
||||
hour=tyme['hour'],
|
||||
minute=tyme['minute'],
|
||||
second=tyme['second'],
|
||||
microsecond=tyme['microsecond'])
|
||||
|
||||
|
||||
def delta_seconds(before, after):
|
||||
"""Return the difference between two timing objects.
|
||||
|
||||
Compute the difference in seconds between two date, time, or
|
||||
datetime objects (as a float, to microsecond resolution).
|
||||
"""
|
||||
delta = after - before
|
||||
try:
|
||||
return delta.total_seconds()
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
@ -1,39 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
UUID related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
@ -1,74 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from xml.dom import minidom
|
||||
from xml.parsers import expat
|
||||
from xml import sax
|
||||
from xml.sax import expatreader
|
||||
|
||||
|
||||
class ProtectedExpatParser(expatreader.ExpatParser):
|
||||
"""An expat parser which disables DTD's and entities by default."""
|
||||
|
||||
def __init__(self, forbid_dtd=True, forbid_entities=True,
|
||||
*args, **kwargs):
|
||||
# Python 2.x old style class
|
||||
expatreader.ExpatParser.__init__(self, *args, **kwargs)
|
||||
self.forbid_dtd = forbid_dtd
|
||||
self.forbid_entities = forbid_entities
|
||||
|
||||
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
|
||||
raise ValueError("Inline DTD forbidden")
|
||||
|
||||
def entity_decl(self, entityName, is_parameter_entity, value, base,
|
||||
systemId, publicId, notationName):
|
||||
raise ValueError("<!ENTITY> entity declaration forbidden")
|
||||
|
||||
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
|
||||
# expat 1.2
|
||||
raise ValueError("<!ENTITY> unparsed entity forbidden")
|
||||
|
||||
def external_entity_ref(self, context, base, systemId, publicId):
|
||||
raise ValueError("<!ENTITY> external entity forbidden")
|
||||
|
||||
def notation_decl(self, name, base, sysid, pubid):
|
||||
raise ValueError("<!ENTITY> notation forbidden")
|
||||
|
||||
def reset(self):
|
||||
expatreader.ExpatParser.reset(self)
|
||||
if self.forbid_dtd:
|
||||
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
|
||||
self._parser.EndDoctypeDeclHandler = None
|
||||
if self.forbid_entities:
|
||||
self._parser.EntityDeclHandler = self.entity_decl
|
||||
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
|
||||
self._parser.ExternalEntityRefHandler = self.external_entity_ref
|
||||
self._parser.NotationDeclHandler = self.notation_decl
|
||||
try:
|
||||
self._parser.SkippedEntityHandler = None
|
||||
except AttributeError:
|
||||
# some pyexpat versions do not support SkippedEntity
|
||||
pass
|
||||
|
||||
|
||||
def safe_minidom_parse_string(xml_string):
|
||||
"""Parse an XML string using minidom safely.
|
||||
|
||||
"""
|
||||
try:
|
||||
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
|
||||
except sax.SAXParseException:
|
||||
raise expat.ExpatError()
|
Loading…
Reference in New Issue
Block a user