Update Oslo

New versions of openstack.common files.

Change-Id: I70fc4846252d1a0612007e7140ba2c21ca891bb2
This commit is contained in:
François Rossigneux 2013-07-04 17:37:05 +02:00
parent ceae8641a1
commit 83a76bff67
36 changed files with 2393 additions and 659 deletions

View File

@ -0,0 +1,83 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from climate.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None

View File

@ -16,8 +16,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import print_function
import errno
import gc import gc
import os
import pprint import pprint
import socket
import sys import sys
import traceback import traceback
@ -26,18 +31,38 @@ import eventlet.backdoor
import greenlet import greenlet
from oslo.config import cfg from oslo.config import cfg
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
help_for_backdoor_port = 'Acceptable ' + \
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
'listening on a random tcp port number, <port> results in ' + \
'listening on the specified port number and not enabling backdoor' + \
'if it is in use and <start>:<end> results in listening on the ' + \
'smallest unused port number within the specified range of port ' + \
'numbers. The chosen port is displayed in the service\'s log file.'
eventlet_backdoor_opts = [ eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port', cfg.StrOpt('backdoor_port',
default=None, default=None,
help='port for eventlet backdoor to listen') help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
] ]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts) CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this(): def _dont_use_this():
print "Don't use this, just disconnect instead" print("Don't use this, just disconnect instead")
def _find_objects(t): def _find_objects(t):
@ -46,16 +71,43 @@ def _find_objects(t):
def _print_greenthreads(): def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)): for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print i, gt print(i, gt)
traceback.print_stack(gt.gr_frame) traceback.print_stack(gt.gr_frame)
print print()
def _print_nativethreads(): def _print_nativethreads():
for threadId, stack in sys._current_frames().items(): for threadId, stack in sys._current_frames().items():
print threadId print(threadId)
traceback.print_stack(stack) traceback.print_stack(stack)
print print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled(): def initialize_if_enabled():
@ -70,6 +122,8 @@ def initialize_if_enabled():
if CONF.backdoor_port is None: if CONF.backdoor_port is None:
return None return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of # NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites # the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint # the __builtin__._ that gettext sets. Let's switch to using pprint
@ -80,8 +134,13 @@ def initialize_if_enabled():
pprint.pprint(val) pprint.pprint(val)
sys.displayhook = displayhook sys.displayhook = displayhook
sock = eventlet.listen(('localhost', CONF.backdoor_port)) sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1] port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals) locals=backdoor_locals)
return port return port

View File

@ -19,16 +19,15 @@
Exception related utilities. Exception related utilities.
""" """
import contextlib
import logging import logging
import sys import sys
import time
import traceback import traceback
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
@contextlib.contextmanager class save_and_reraise_exception(object):
def save_and_reraise_exception():
"""Save current exception, run some code and then re-raise. """Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None In some cases the exception context can be cleared, resulting in None
@ -40,12 +39,60 @@ def save_and_reraise_exception():
To work around this, we save the exception state, run handler code, and To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised. saved exception is logged and the new exception is re-raised.
"""
type_, value, tb = sys.exc_info() In some cases the caller may not want to re-raise the exception, and
try: for those circumstances this context provides a reraise flag that
yield can be used to suppress the exception. For example:
except Exception: except Exception:
logging.error(_('Original exception being dropped: %s'), with save_and_reraise_exception() as ctxt:
traceback.format_exception(type_, value, tb)) decide_if_need_reraise()
raise if not should_be_reraised:
raise type_, value, tb ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
raise self.type_, self.value, self.tb
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
if exc.message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
exc.message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = exc.message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func

View File

@ -0,0 +1,110 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
from climate.openstack.common import excutils
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path):
"""Delete a file, but ignore file not found error.
:param path: File to delete
"""
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)

View File

@ -2,6 +2,7 @@
# Copyright 2012 Red Hat, Inc. # Copyright 2012 Red Hat, Inc.
# All Rights Reserved. # All Rights Reserved.
# Copyright 2013 IBM Corp.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -23,11 +24,203 @@ Usual usage in an openstack.common module:
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
""" """
import copy
import gettext import gettext
import logging.handlers
import os
import UserString
_localedir = os.environ.get('climate'.upper() + '_LOCALEDIR')
t = gettext.translation('openstack-common', 'locale', fallback=True) _t = gettext.translation('climate', localedir=_localedir, fallback=True)
def _(msg): def _(msg):
return t.ugettext(msg) return _t.ugettext(msg)
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
"""
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)
"""
Lazy gettext functionality.
The following is an attempt to introduce a deferred way
to do translations on messages in OpenStack. We attempt to
override the standard _() function and % (format string) operation
to build Message objects that can later be translated when we have
more information. Also included is an example LogHandler that
translates Messages to an associated locale, effectively allowing
many logs, each with their own locale.
"""
def get_lazy_gettext(domain):
"""Assemble and return a lazy gettext function for a given domain.
Factory method for a project/module to get a lazy gettext function
for its own translation domain (i.e. nova, glance, cinder, etc.)
"""
def _lazy_gettext(msg):
"""Create and return a Message object.
Message encapsulates a string so that we can translate it later when
needed.
"""
return Message(msg, domain)
return _lazy_gettext
class Message(UserString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self.params = None
self.locale = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
full_msg = (self._left_extra_msg +
lang.ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return unicode(full_msg)
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
else:
self.params = copy.deepcopy(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', 'locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return UserString.UserString.__getattribute__(self, name)
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)

View File

@ -24,7 +24,7 @@ import traceback
def import_class(import_str): def import_class(import_str):
"""Returns a class from a string including module and class""" """Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.') mod_str, _sep, class_str = import_str.rpartition('.')
try: try:
__import__(mod_str) __import__(mod_str)
@ -41,8 +41,9 @@ def import_object(import_str, *args, **kwargs):
def import_object_ns(name_space, import_str, *args, **kwargs): def import_object_ns(name_space, import_str, *args, **kwargs):
""" """Tries to import object from default namespace.
Import a class and return an instance of it, first by trying
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to to find the class in a default namespace, then failing back to
a full path if not found in the default namespace. a full path if not found in the default namespace.
""" """

View File

@ -38,11 +38,24 @@ import functools
import inspect import inspect
import itertools import itertools
import json import json
import types
import xmlrpclib import xmlrpclib
import netaddr
import six
from climate.openstack.common import timeutils from climate.openstack.common import timeutils
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True, def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3): level=0, max_depth=3):
"""Convert a complex object into primitives. """Convert a complex object into primitives.
@ -58,19 +71,32 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
Therefore, convert_instances=True is lossy ... be aware. Therefore, convert_instances=True is lossy ... be aware.
""" """
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, # handle obvious types first - order of basic types determined by running
inspect.isfunction, inspect.isgeneratorfunction, # full tests on nova project, resulting in the following counts:
inspect.isgenerator, inspect.istraceback, inspect.isframe, # 572754 <type 'NoneType'>
inspect.iscode, inspect.isbuiltin, inspect.isroutine, # 460353 <type 'int'>
inspect.isabstract] # 379632 <type 'unicode'>
for test in nasty: # 274610 <type 'str'>
if test(value): # 199918 <type 'dict'>
return unicode(value) # 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
# value of itertools.count doesn't get caught by inspects if isinstance(value, datetime.datetime):
# above and results in infinite loop when list(value) is called. if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count: if type(value) == itertools.count:
return unicode(value) return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround, # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that # tests that raise an exception in a mocked method that
@ -91,17 +117,18 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
convert_datetime=convert_datetime, convert_datetime=convert_datetime,
level=level, level=level,
max_depth=max_depth) max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but # It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly # for our purposes, make it a datetime type which is explicitly
# handled # handled
if isinstance(value, xmlrpclib.DateTime): if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6]) value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)): if convert_datetime and isinstance(value, datetime.datetime):
return [recursive(v) for v in value]
elif isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value) return timeutils.strtime(value)
elif hasattr(value, 'iteritems'): elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1) return recursive(dict(value.iteritems()), level=level + 1)
@ -111,12 +138,16 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# Likely an instance of something. Watch for cycles. # Likely an instance of something. Watch for cycles.
# Ignore class member vars. # Ignore class member vars.
return recursive(value.__dict__, level=level + 1) return recursive(value.__dict__, level=level + 1)
elif isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else: else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value return value
except TypeError: except TypeError:
# Class objects are tricky since they may define something like # Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list(). # __iter__ defined but it isn't callable as list().
return unicode(value) return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs): def dumps(value, default=to_primitive, **kwargs):

View File

@ -29,6 +29,7 @@ It also allows setting of formatting information through conf.
""" """
import ConfigParser
import cStringIO import cStringIO
import inspect import inspect
import itertools import itertools
@ -36,19 +37,17 @@ import logging
import logging.config import logging.config
import logging.handlers import logging.handlers
import os import os
import stat
import sys import sys
import traceback import traceback
from oslo.config import cfg from oslo.config import cfg
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import importutils
from climate.openstack.common import jsonutils from climate.openstack.common import jsonutils
from climate.openstack.common import local from climate.openstack.common import local
from climate.openstack.common import notifier
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [ common_cli_opts = [
@ -73,11 +72,13 @@ logging_cli_opts = [
'documentation for details on logging configuration ' 'documentation for details on logging configuration '
'files.'), 'files.'),
cfg.StrOpt('log-format', cfg.StrOpt('log-format',
default=_DEFAULT_LOG_FORMAT, default=None,
metavar='FORMAT', metavar='FORMAT',
help='A logging.Formatter log message format string which may ' help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. ' 'use any of the available logging.LogRecord attributes. '
'Default: %(default)s'), 'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format', cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT, default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT', metavar='DATE_FORMAT',
@ -103,17 +104,14 @@ logging_cli_opts = [
generic_log_opts = [ generic_log_opts = [
cfg.BoolOpt('use_stderr', cfg.BoolOpt('use_stderr',
default=True, default=True,
help='Log output to standard error'), help='Log output to standard error')
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode used when creating log files'),
] ]
log_opts = [ log_opts = [
cfg.StrOpt('logging_context_format_string', cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s ' default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'[%(request_id)s %(user)s %(tenant)s] %(instance)s' '%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(message)s', '%(instance)s%(message)s',
help='format string to use for log messages with context'), help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string', cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
@ -210,7 +208,27 @@ def _get_log_file_path(binary=None):
return '%s.log' % (os.path.join(logdir, binary),) return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter): class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string): def __init__(self, logger, project_name, version_string):
@ -218,8 +236,9 @@ class ContextAdapter(logging.LoggerAdapter):
self.project = project_name self.project = project_name
self.version = version_string self.version = version_string
def audit(self, msg, *args, **kwargs): @property
self.log(logging.AUDIT, msg, *args, **kwargs) def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs): def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg stdmsg = _("Deprecated: %s") % msg
@ -303,17 +322,6 @@ class JSONFormatter(logging.Formatter):
return jsonutils.dumps(message) return jsonutils.dumps(message)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('climate.openstack.common.notifier.log_notifier' in
CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
def _create_logging_excepthook(product_name): def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb): def logging_excepthook(type, value, tb):
extra = {} extra = {}
@ -323,10 +331,30 @@ def _create_logging_excepthook(product_name):
return logging_excepthook return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name): def setup(product_name):
"""Setup logging.""" """Setup logging."""
if CONF.log_config: if CONF.log_config:
logging.config.fileConfig(CONF.log_config) _load_log_config(CONF.log_config)
else: else:
_setup_logging_from_conf() _setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name) sys.excepthook = _create_logging_excepthook(product_name)
@ -378,11 +406,6 @@ def _setup_logging_from_conf():
filelog = logging.handlers.WatchedFileHandler(logpath) filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog) log_root.addHandler(filelog)
mode = int(CONF.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if CONF.use_stderr: if CONF.use_stderr:
streamlog = ColorHandler() streamlog = ColorHandler()
log_root.addHandler(streamlog) log_root.addHandler(streamlog)
@ -394,15 +417,22 @@ def _setup_logging_from_conf():
log_root.addHandler(streamlog) log_root.addHandler(streamlog)
if CONF.publish_errors: if CONF.publish_errors:
log_root.addHandler(PublishErrorsHandler(logging.ERROR)) handler = importutils.import_object(
"climate.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers: for handler in log_root.handlers:
datefmt = CONF.log_date_format # NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format: if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format, handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt)) datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else: else:
handler.setFormatter(LegacyFormatter(datefmt=datefmt)) handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug: if CONF.debug:
log_root.setLevel(logging.DEBUG) log_root.setLevel(logging.DEBUG)
@ -411,14 +441,11 @@ def _setup_logging_from_conf():
else: else:
log_root.setLevel(logging.WARNING) log_root.setLevel(logging.WARNING)
level = logging.NOTSET
for pair in CONF.default_log_levels: for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=') mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name) level = logging.getLevelName(level_name)
logger = logging.getLogger(mod) logger = logging.getLogger(mod)
logger.setLevel(level) logger.setLevel(level)
for handler in log_root.handlers:
logger.addHandler(handler)
_loggers = {} _loggers = {}
@ -431,6 +458,16 @@ def getLogger(name='unknown', version='unknown'):
return _loggers[name] return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object): class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs.""" """A thin wrapper that responds to `write` and logs."""
@ -442,7 +479,7 @@ class WritableLogger(object):
self.logger.log(self.level, msg) self.logger.log(self.level, msg)
class LegacyFormatter(logging.Formatter): class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags. """A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string The flags used to set format strings are: logging_context_format_string

View File

@ -46,12 +46,23 @@ class LoopingCallDone(Exception):
self.retvalue = retvalue self.retvalue = retvalue
class LoopingCall(object): class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw): def __init__(self, f=None, *args, **kw):
self.args = args self.args = args
self.kw = kw self.kw = kw
self.f = f self.f = f
self._running = False self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None): def start(self, interval, initial_delay=None):
self._running = True self._running = True
@ -73,11 +84,11 @@ class LoopingCall(object):
LOG.warn(_('task run outlasted interval by %s sec') % LOG.warn(_('task run outlasted interval by %s sec') %
-delay) -delay)
greenthread.sleep(delay if delay > 0 else 0) greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone, e: except LoopingCallDone as e:
self.stop() self.stop()
done.send(e.retvalue) done.send(e.retvalue)
except Exception: except Exception:
LOG.exception(_('in looping call')) LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info()) done.send_exception(*sys.exc_info())
return return
else: else:
@ -88,8 +99,49 @@ class LoopingCall(object):
greenthread.spawn_n(_inner) greenthread.spawn_n(_inner)
return self.done return self.done
def stop(self):
self._running = False
def wait(self): # TODO(mikal): this class name is deprecated in Havana and should be removed
return self.done.wait() # in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done

View File

@ -19,14 +19,12 @@
Network-related utilities and helper functions. Network-related utilities and helper functions.
""" """
import logging import urlparse
LOG = logging.getLogger(__name__)
def parse_host_port(address, default_port=None): def parse_host_port(address, default_port=None):
""" """Interpret a string as a host:port pair.
Interpret a string as a host:port pair.
An IPv6 address MUST be escaped if accompanied by a port, An IPv6 address MUST be escaped if accompanied by a port,
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
means both [2001:db8:85a3::8a2e:370:7334] and means both [2001:db8:85a3::8a2e:370:7334] and
@ -66,3 +64,18 @@ def parse_host_port(address, default_port=None):
port = default_port port = default_port
return (host, None if port is None else int(port)) return (host, None if port is None else int(port))
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
This function papers over Python issue9374 when needed.
The parameters are the same as urlparse.urlsplit.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(
url, scheme, allow_fragments)
if allow_fragments and '#' in path:
path, fragment = path.split('#', 1)
if '?' in path:
path, query = path.split('?', 1)
return urlparse.SplitResult(scheme, netloc, path, query, fragment)

View File

@ -30,7 +30,6 @@ LOG = logging.getLogger(__name__)
notifier_opts = [ notifier_opts = [
cfg.MultiStrOpt('notification_driver', cfg.MultiStrOpt('notification_driver',
default=[], default=[],
deprecated_name='list_notifier_drivers',
help='Driver or drivers to handle sending notifications'), help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level', cfg.StrOpt('default_notification_level',
default='INFO', default='INFO',
@ -57,7 +56,7 @@ class BadPriorityException(Exception):
def notify_decorator(name, fn): def notify_decorator(name, fn):
""" decorator for notify which is used from utils.monkey_patch() """Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function :param name: name of the function
:param function: - object of the function :param function: - object of the function

View File

@ -24,7 +24,9 @@ CONF = cfg.CONF
def notify(_context, message): def notify(_context, message):
"""Notifies the recipient of the desired event given the model. """Notifies the recipient of the desired event given the model.
Log notifications using openstack's default logging system"""
Log notifications using openstack's default logging system.
"""
priority = message.get('priority', priority = message.get('priority',
CONF.default_notification_level) CONF.default_notification_level)

View File

@ -15,5 +15,5 @@
def notify(_context, message): def notify(_context, message):
"""Notifies the recipient of the desired event given the model""" """Notifies the recipient of the desired event given the model."""
pass pass

View File

@ -31,7 +31,7 @@ CONF.register_opt(notification_topic_opt)
def notify(context, message): def notify(context, message):
"""Sends a notification via RPC""" """Sends a notification via RPC."""
if not context: if not context:
context = req_context.get_admin_context() context = req_context.get_admin_context()
priority = message.get('priority', priority = message.get('priority',

View File

@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message): def notify(context, message):
"""Sends a notification via RPC""" """Sends a notification via RPC."""
if not context: if not context:
context = req_context.get_admin_context() context = req_context.get_admin_context()
priority = message.get('priority', priority = message.get('priority',

View File

@ -0,0 +1,846 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from oslo.config import cfg
import six
from climate.openstack.common import fileutils
from climate.openstack.common.gettextutils import _
from climate.openstack.common import jsonutils
from climate.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
self.rules = Rules(rules)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
if overwrite:
self.rules = Rules(rules)
else:
self.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False

View File

@ -26,13 +26,13 @@ For some wrappers that add message versioning to rpc, see:
""" """
import inspect import inspect
import logging
from oslo.config import cfg from oslo.config import cfg
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import importutils from climate.openstack.common import importutils
from climate.openstack.common import local from climate.openstack.common import local
from climate.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -34,10 +34,6 @@ from eventlet import greenpool
from eventlet import pools from eventlet import pools
from eventlet import queue from eventlet import queue
from eventlet import semaphore from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from climate.openstack.common import excutils from climate.openstack.common import excutils
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
@ -46,16 +42,6 @@ from climate.openstack.common import log as logging
from climate.openstack.common.rpc import common as rpc_common from climate.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id' UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -83,7 +69,7 @@ class Pool(pools.Pool):
# is the above "while loop" gets all the cached connections from the # is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool # pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the # leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run # pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py # time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and # just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem. # the leakage is not a problem.
@ -102,19 +88,19 @@ def get_connection_pool(conf, connection_cls):
class ConnectionContext(rpc_common.Connection): class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of """The class that is actually returned to the create_connection() caller.
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new This is essentially a wrapper around Connection that supports 'with'.
Connection, or one from a pool. The function will also catch It can also return a new Connection, or one from a pool.
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so The function will also catch when an instance of this class is to be
forth without making the caller be responsible for catching deleted. With that we can return Connections to the pool on exceptions
them. If possible the function makes sure to return a and so forth without making the caller be responsible for catching them.
connection to the pool. If possible the function makes sure to return a connection to the pool.
""" """
def __init__(self, conf, connection_pool, pooled=True, server_params=None): def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool""" """Create a new connection, or get one from the pool."""
self.connection = None self.connection = None
self.conf = conf self.conf = conf
self.connection_pool = connection_pool self.connection_pool = connection_pool
@ -127,7 +113,7 @@ class ConnectionContext(rpc_common.Connection):
self.pooled = pooled self.pooled = pooled
def __enter__(self): def __enter__(self):
"""When with ConnectionContext() is used, return self""" """When with ConnectionContext() is used, return self."""
return self return self
def _done(self): def _done(self):
@ -165,17 +151,19 @@ class ConnectionContext(rpc_common.Connection):
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name) self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name): def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback, self.connection.join_consumer_pool(callback,
pool_name, pool_name,
topic, topic,
exchange_name) exchange_name,
ack_on_error)
def consume_in_thread(self): def consume_in_thread(self):
self.connection.consume_in_thread() self.connection.consume_in_thread()
def __getattr__(self, key): def __getattr__(self, key):
"""Proxy all other calls to the Connection instance""" """Proxy all other calls to the Connection instance."""
if self.connection: if self.connection:
return getattr(self.connection, key) return getattr(self.connection, key)
else: else:
@ -183,7 +171,7 @@ class ConnectionContext(rpc_common.Connection):
class ReplyProxy(ConnectionContext): class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """ """Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool): def __init__(self, conf, connection_pool):
self._call_waiters = {} self._call_waiters = {}
self._num_call_waiters = 0 self._num_call_waiters = 0
@ -197,8 +185,10 @@ class ReplyProxy(ConnectionContext):
msg_id = message_data.pop('_msg_id', None) msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id) waiter = self._call_waiters.get(msg_id)
if not waiter: if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s' LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %s') % (msg_id, message_data)) ', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else: else:
waiter.put(message_data) waiter.put(message_data)
@ -231,12 +221,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure = rpc_common.serialize_remote_exception(failure, failure = rpc_common.serialize_remote_exception(failure,
log_failure) log_failure)
try: msg = {'result': reply, 'failure': failure}
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending: if ending:
msg['ending'] = True msg['ending'] = True
_add_unique_id(msg) _add_unique_id(msg)
@ -251,7 +236,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
class RpcContext(rpc_common.CommonRpcContext): class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call""" """Context that supports replying to a rpc.call."""
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None) self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None) self.reply_q = kwargs.pop('reply_q', None)
@ -338,8 +323,9 @@ def _add_unique_id(msg):
class _ThreadPoolWithWait(object): class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by """Base class for a delayed invocation manager.
the Connection class to start up green threads
Used by the Connection class to start up green threads
to handle incoming messages. to handle incoming messages.
""" """
@ -354,12 +340,14 @@ class _ThreadPoolWithWait(object):
class CallbackWrapper(_ThreadPoolWithWait): class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green """Wraps a straight callback.
thread.
Allows it to be invoked in a green thread.
""" """
def __init__(self, conf, callback, connection_pool): def __init__(self, conf, callback, connection_pool):
""" """Initiates CallbackWrapper object.
:param conf: cfg.CONF instance :param conf: cfg.CONF instance
:param callback: a callable (probably a function) :param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by :param connection_pool: connection pool as returned by
@ -408,15 +396,17 @@ class ProxyCallback(_ThreadPoolWithWait):
ctxt = unpack_context(self.conf, message_data) ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method') method = message_data.get('method')
args = message_data.get('args', {}) args = message_data.get('args', {})
version = message_data.get('version', None) version = message_data.get('version')
namespace = message_data.get('namespace')
if not method: if not method:
LOG.warn(_('no method for message: %s') % message_data) LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data, ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool) connection_pool=self.connection_pool)
return return
self.pool.spawn_n(self._process_data, ctxt, version, method, args) self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, args): def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread. """Process a message in a new thread.
If the proxy object we have has a dispatch method If the proxy object we have has a dispatch method
@ -427,7 +417,8 @@ class ProxyCallback(_ThreadPoolWithWait):
""" """
ctxt.update_store() ctxt.update_store()
try: try:
rval = self.proxy.dispatch(ctxt, version, method, **args) rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator # Check if the result was a generator
if inspect.isgenerator(rval): if inspect.isgenerator(rval):
for x in rval: for x in rval:
@ -487,7 +478,7 @@ class MulticallProxyWaiter(object):
return result return result
def __iter__(self): def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag""" """Return a result until we get a reply with an 'ending' flag."""
if self._done: if self._done:
raise StopIteration raise StopIteration
while True: while True:
@ -495,7 +486,6 @@ class MulticallProxyWaiter(object):
data = self._dataqueue.get(timeout=self._timeout) data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data) result = self._process_data(data)
except queue.Empty: except queue.Empty:
LOG.exception(_('Timed out waiting for RPC response.'))
self.done() self.done()
raise rpc_common.Timeout() raise rpc_common.Timeout()
except Exception: except Exception:
@ -510,61 +500,8 @@ class MulticallProxyWaiter(object):
yield result yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool): def create_connection(conf, new, connection_pool):
"""Create a connection""" """Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new) return ConnectionContext(conf, connection_pool, pooled=not new)
@ -573,14 +510,6 @@ _reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool): def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times.""" """Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic) LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id}) msg.update({'_msg_id': msg_id})
@ -588,21 +517,13 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
_add_unique_id(msg) _add_unique_id(msg)
pack_context(msg, context) pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause with _reply_proxy_create_sem:
# in Havana. if not connection_pool.reply_proxy:
if not conf.amqp_rpc_single_reply_queue: connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
conn = ConnectionContext(conf, connection_pool) msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallWaiter(conf, conn, timeout) wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
conn.declare_direct_consumer(msg_id, wait_msg) with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg return wait_msg
@ -662,7 +583,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope):
pack_context(msg, context) pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn: with ConnectionContext(conf, connection_pool) as conn:
if envelope: if envelope:
msg = rpc_common.serialize_msg(msg, force_envelope=True) msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg) conn.notify_send(topic, msg)

View File

@ -22,6 +22,7 @@ import sys
import traceback import traceback
from oslo.config import cfg from oslo.config import cfg
import six
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import importutils from climate.openstack.common import importutils
@ -69,9 +70,7 @@ _RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version' _VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message' _MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
# TODO(russellb) Turn this on after Grizzly.
_SEND_RPC_ENVELOPE = False
class RPCException(Exception): class RPCException(Exception):
@ -122,7 +121,26 @@ class Timeout(RPCException):
This exception is raised if the rpc_response_timeout is reached while This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side. waiting for a response from the remote side.
""" """
message = _("Timeout while waiting on RPC response.") message = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException): class DuplicateMessageError(RPCException):
@ -143,6 +161,10 @@ class UnsupportedRpcEnvelopeVersion(RPCException):
"not supported by this endpoint.") "not supported by this endpoint.")
class RpcVersionCapError(RPCException):
message = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object): class Connection(object):
"""A connection, returned by rpc.create_connection(). """A connection, returned by rpc.create_connection().
@ -202,9 +224,9 @@ class Connection(object):
raise NotImplementedError() raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name): def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers.
the specified exchange.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message. Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than A message will be delivered to multiple pools, if more than
@ -262,7 +284,7 @@ def _safe_log(log_func, msg, msg_data):
for elem in arg[:-1]: for elem in arg[:-1]:
d = d[elem] d = d[elem]
d[arg[-1]] = '<SANITIZED>' d[arg[-1]] = '<SANITIZED>'
except KeyError, e: except KeyError as e:
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
{'item': arg, {'item': arg,
'err': e}) 'err': e})
@ -285,17 +307,27 @@ def serialize_remote_exception(failure_info, log_failure=True):
tb = traceback.format_exception(*failure_info) tb = traceback.format_exception(*failure_info)
failure = failure_info[1] failure = failure_info[1]
if log_failure: if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb) LOG.error(tb)
kwargs = {} kwargs = {}
if hasattr(failure, 'kwargs'): if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = { data = {
'class': str(failure.__class__.__name__), 'class': cls_name,
'module': str(failure.__class__.__module__), 'module': mod_name,
'message': unicode(failure), 'message': six.text_type(failure),
'tb': tb, 'tb': tb,
'args': failure.args, 'args': failure.args,
'kwargs': kwargs 'kwargs': kwargs
@ -325,14 +357,15 @@ def deserialize_remote_exception(conf, data):
if not issubclass(klass, Exception): if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions") raise TypeError("Can only deserialize Exceptions")
failure = klass(**failure.get('kwargs', {})) failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError): except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace) return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure) ex_type = type(failure)
str_override = lambda self: message str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override}) {'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try: try:
# NOTE(ameade): Dynamically create a new exception type and swap it in # NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined # as the new type for the exception. This only works on user defined
@ -394,10 +427,11 @@ class CommonRpcContext(object):
class ClientException(Exception): class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be """Encapsulates actual exception expected to be hit by a RPC proxy object.
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the Merely instantiating it records the current exception information, which
RPC client without exceptional logging.""" will be passed back to the RPC client without exceptional logging.
"""
def __init__(self): def __init__(self):
self._exc_info = sys.exc_info() self._exc_info = sys.exc_info()
@ -405,7 +439,7 @@ class ClientException(Exception):
def catch_client_exception(exceptions, func, *args, **kwargs): def catch_client_exception(exceptions, func, *args, **kwargs):
try: try:
return func(*args, **kwargs) return func(*args, **kwargs)
except Exception, e: except Exception as e:
if type(e) in exceptions: if type(e) in exceptions:
raise ClientException() raise ClientException()
else: else:
@ -414,11 +448,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs):
def client_exceptions(*exceptions): def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions. """Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal, of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.""" ClientException, which is used internally by the RPC layer.
"""
def outer(func): def outer(func):
def inner(*args, **kwargs): def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs) return catch_client_exception(exceptions, func, *args, **kwargs)
@ -441,10 +477,7 @@ def version_is_compatible(imp_version, version):
return True return True
def serialize_msg(raw_msg, force_envelope=False): def serialize_msg(raw_msg):
if not _SEND_RPC_ENVELOPE and not force_envelope:
return raw_msg
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format. # information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,

View File

@ -84,6 +84,7 @@ minimum version that supports the new parameter should be specified.
""" """
from climate.openstack.common.rpc import common as rpc_common from climate.openstack.common.rpc import common as rpc_common
from climate.openstack.common.rpc import serializer as rpc_serializer
class RpcDispatcher(object): class RpcDispatcher(object):
@ -93,23 +94,48 @@ class RpcDispatcher(object):
contains a list of underlying managers that have an API_VERSION attribute. contains a list of underlying managers that have an API_VERSION attribute.
""" """
def __init__(self, callbacks): def __init__(self, callbacks, serializer=None):
"""Initialize the rpc dispatcher. """Initialize the rpc dispatcher.
:param callbacks: List of proxy objects that are an instance :param callbacks: List of proxy objects that are an instance
of a class with rpc methods exposed. Each proxy of a class with rpc methods exposed. Each proxy
object should have an RPC_API_VERSION attribute. object should have an RPC_API_VERSION attribute.
:param serializer: The Serializer object that will be used to
deserialize arguments before the method call and
to serialize the result after it returns.
""" """
self.callbacks = callbacks self.callbacks = callbacks
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcDispatcher, self).__init__() super(RpcDispatcher, self).__init__()
def dispatch(self, ctxt, version, method, **kwargs): def _deserialize_args(self, context, kwargs):
"""Helper method called to deserialize args before dispatch.
This calls our serializer on each argument, returning a new set of
args that have been deserialized.
:param context: The request context
:param kwargs: The arguments to be deserialized
:returns: A new set of deserialized args
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
def dispatch(self, ctxt, version, method, namespace, **kwargs):
"""Dispatch a message based on a requested version. """Dispatch a message based on a requested version.
:param ctxt: The request context :param ctxt: The request context
:param version: The requested API version from the incoming message :param version: The requested API version from the incoming message
:param method: The method requested to be called by the incoming :param method: The method requested to be called by the incoming
message. message.
:param namespace: The namespace for the requested method. If None,
the dispatcher will look for a method on a callback
object with no namespace set.
:param kwargs: A dict of keyword arguments to be passed to the method. :param kwargs: A dict of keyword arguments to be passed to the method.
:returns: Whatever is returned by the underlying method that gets :returns: Whatever is returned by the underlying method that gets
@ -120,17 +146,31 @@ class RpcDispatcher(object):
had_compatible = False had_compatible = False
for proxyobj in self.callbacks: for proxyobj in self.callbacks:
if hasattr(proxyobj, 'RPC_API_VERSION'): # Check for namespace compatibility
try:
cb_namespace = proxyobj.RPC_API_NAMESPACE
except AttributeError:
cb_namespace = None
if namespace != cb_namespace:
continue
# Check for version compatibility
try:
rpc_api_version = proxyobj.RPC_API_VERSION rpc_api_version = proxyobj.RPC_API_VERSION
else: except AttributeError:
rpc_api_version = '1.0' rpc_api_version = '1.0'
is_compatible = rpc_common.version_is_compatible(rpc_api_version, is_compatible = rpc_common.version_is_compatible(rpc_api_version,
version) version)
had_compatible = had_compatible or is_compatible had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method): if not hasattr(proxyobj, method):
continue continue
if is_compatible: if is_compatible:
return getattr(proxyobj, method)(ctxt, **kwargs) kwargs = self._deserialize_args(ctxt, kwargs)
result = getattr(proxyobj, method)(ctxt, **kwargs)
return self.serializer.serialize_entity(ctxt, result)
if had_compatible: if had_compatible:
raise AttributeError("No such RPC function '%s'" % method) raise AttributeError("No such RPC function '%s'" % method)

View File

@ -57,13 +57,14 @@ class Consumer(object):
self.topic = topic self.topic = topic
self.proxy = proxy self.proxy = proxy
def call(self, context, version, method, args, timeout): def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event() done = eventlet.event.Event()
def _inner(): def _inner():
ctxt = RpcContext.from_dict(context.to_dict()) ctxt = RpcContext.from_dict(context.to_dict())
try: try:
rval = self.proxy.dispatch(context, version, method, **args) rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = [] res = []
# Caller might have called ctxt.reply() manually # Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response: for (reply, failure) in ctxt._response:
@ -121,7 +122,7 @@ class Connection(object):
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return Connection() return Connection()
@ -140,13 +141,15 @@ def multicall(conf, context, topic, msg, timeout=None):
return return
args = msg.get('args', {}) args = msg.get('args', {})
version = msg.get('version', None) version = msg.get('version', None)
namespace = msg.get('namespace', None)
try: try:
consumer = CONSUMERS[topic][0] consumer = CONSUMERS[topic][0]
except (KeyError, IndexError): except (KeyError, IndexError):
return iter([None]) return iter([None])
else: else:
return consumer.call(context, version, method, args, timeout) return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None): def call(conf, context, topic, msg, timeout=None):
@ -176,16 +179,17 @@ def cleanup():
def fanout_cast(conf, context, topic, msg): def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic""" """Cast to all consumers of a topic."""
check_serialize(msg) check_serialize(msg)
method = msg.get('method') method = msg.get('method')
if not method: if not method:
return return
args = msg.get('args', {}) args = msg.get('args', {})
version = msg.get('version', None) version = msg.get('version', None)
namespace = msg.get('namespace', None)
for consumer in CONSUMERS.get(topic, []): for consumer in CONSUMERS.get(topic, []):
try: try:
consumer.call(context, version, method, args, None) consumer.call(context, version, method, namespace, args, None)
except Exception: except Exception:
pass pass

View File

@ -18,7 +18,6 @@ import functools
import itertools import itertools
import socket import socket
import ssl import ssl
import sys
import time import time
import uuid import uuid
@ -30,6 +29,7 @@ import kombu.entity
import kombu.messaging import kombu.messaging
from oslo.config import cfg from oslo.config import cfg
from climate.openstack.common import excutils
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import network_utils from climate.openstack.common import network_utils
from climate.openstack.common.rpc import amqp as rpc_amqp from climate.openstack.common.rpc import amqp as rpc_amqp
@ -129,15 +129,46 @@ class ConsumerBase(object):
self.tag = str(tag) self.tag = str(tag)
self.kwargs = kwargs self.kwargs = kwargs
self.queue = None self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel) self.reconnect(channel)
def reconnect(self, channel): def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect""" """Re-declare the queue after a rabbit reconnect."""
self.channel = channel self.channel = channel
self.kwargs['channel'] = channel self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs) self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare() self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
Rejection is better than waiting for the message to timeout.
Rejected messages are immediately requeued.
"""
ack_msg = False
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
ack_msg = True
except Exception:
if self.ack_on_error:
ack_msg = True
LOG.exception(_("Failed to process message"
" ... skipping it."))
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
finally:
if ack_msg:
message.ack()
else:
message.reject()
def consume(self, *args, **kwargs): def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will """Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the start the flow of messages from the queue. Using the
@ -150,8 +181,6 @@ class ConsumerBase(object):
If kwargs['nowait'] is True, then this call will block until If kwargs['nowait'] is True, then this call will block until
a message is read. a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
""" """
options = {'consumer_tag': self.tag} options = {'consumer_tag': self.tag}
@ -162,21 +191,15 @@ class ConsumerBase(object):
def _callback(raw_message): def _callback(raw_message):
message = self.channel.message_to_python(raw_message) message = self.channel.message_to_python(raw_message)
try: self._callback_handler(message, callback)
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options) self.queue.consume(*args, callback=_callback, **options)
def cancel(self): def cancel(self):
"""Cancel the consuming from the queue, if it has started""" """Cancel the consuming from the queue, if it has started."""
try: try:
self.queue.cancel(self.tag) self.queue.cancel(self.tag)
except KeyError, e: except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug # NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag: if str(e) != "u'%s'" % self.tag:
raise raise
@ -184,7 +207,7 @@ class ConsumerBase(object):
class DirectConsumer(ConsumerBase): class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'""" """Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue. """Init a 'direct' queue.
@ -216,7 +239,7 @@ class DirectConsumer(ConsumerBase):
class TopicConsumer(ConsumerBase): class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'""" """Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None, def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs): exchange_name=None, **kwargs):
@ -253,7 +276,7 @@ class TopicConsumer(ConsumerBase):
class FanoutConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'""" """Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs): def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue. """Init a 'fanout' queue.
@ -286,7 +309,7 @@ class FanoutConsumer(ConsumerBase):
class Publisher(object): class Publisher(object):
"""Base Publisher class""" """Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs): def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key, """Init the Publisher class with the exchange_name, routing_key,
@ -298,7 +321,7 @@ class Publisher(object):
self.reconnect(channel) self.reconnect(channel)
def reconnect(self, channel): def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection""" """Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name, self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs) **self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange, self.producer = kombu.messaging.Producer(exchange=self.exchange,
@ -306,7 +329,7 @@ class Publisher(object):
routing_key=self.routing_key) routing_key=self.routing_key)
def send(self, msg, timeout=None): def send(self, msg, timeout=None):
"""Send a message""" """Send a message."""
if timeout: if timeout:
# #
# AMQP TTL is in milliseconds when set in the header. # AMQP TTL is in milliseconds when set in the header.
@ -317,7 +340,7 @@ class Publisher(object):
class DirectPublisher(Publisher): class DirectPublisher(Publisher):
"""Publisher class for 'direct'""" """Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs): def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher. """init a 'direct' publisher.
@ -333,7 +356,7 @@ class DirectPublisher(Publisher):
class TopicPublisher(Publisher): class TopicPublisher(Publisher):
"""Publisher class for 'topic'""" """Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher. """init a 'topic' publisher.
@ -352,7 +375,7 @@ class TopicPublisher(Publisher):
class FanoutPublisher(Publisher): class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'""" """Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher. """init a 'fanout' publisher.
@ -367,7 +390,7 @@ class FanoutPublisher(Publisher):
class NotifyPublisher(TopicPublisher): class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'""" """Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
@ -447,8 +470,9 @@ class Connection(object):
self.reconnect() self.reconnect()
def _fetch_ssl_params(self): def _fetch_ssl_params(self):
"""Handles fetching what ssl params """Handles fetching what ssl params should be used for the connection
should be used for the connection (if any)""" (if any).
"""
ssl_params = dict() ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket # http://docs.python.org/library/ssl.html - ssl.wrap_socket
@ -520,7 +544,7 @@ class Connection(object):
return return
except (IOError, self.connection_errors) as e: except (IOError, self.connection_errors) as e:
pass pass
except Exception, e: except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib # NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport # to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for # connection_errors in the case of a timeout waiting for
@ -536,13 +560,11 @@ class Connection(object):
log_info.update(params) log_info.update(params)
if self.max_retries and attempt == self.max_retries: if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on ' msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d ' '%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info) 'tries: %(err_str)s') % log_info
# NOTE(comstud): Copied from original code. There's LOG.error(msg)
# really no better recourse because if this was a queue we raise rpc_common.RPCException(msg)
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1: if attempt == 1:
sleep_time = self.interval_start or 1 sleep_time = self.interval_start or 1
@ -561,10 +583,10 @@ class Connection(object):
while True: while True:
try: try:
return method(*args, **kwargs) return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e: except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback: if error_callback:
error_callback(e) error_callback(e)
except Exception, e: except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib # NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport # to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for # connection_errors in the case of a timeout waiting for
@ -578,18 +600,18 @@ class Connection(object):
self.reconnect() self.reconnect()
def get_channel(self): def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues""" """Convenience call for bin/clear_rabbit_queues."""
return self.channel return self.channel
def close(self): def close(self):
"""Close/release this connection""" """Close/release this connection."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.connection.release() self.connection.release()
self.connection = None self.connection = None
def reset(self): def reset(self):
"""Reset a connection so it can be used again""" """Reset a connection so it can be used again."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.channel.close() self.channel.close()
@ -618,7 +640,7 @@ class Connection(object):
return self.ensure(_connect_error, _declare_consumer) return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None): def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers""" """Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True} info = {'do_consume': True}
@ -634,8 +656,8 @@ class Connection(object):
def _consume(): def _consume():
if info['do_consume']: if info['do_consume']:
queues_head = self.consumers[:-1] queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] queues_tail = self.consumers[-1] # fanout
for queue in queues_head: for queue in queues_head:
queue.consume(nowait=True) queue.consume(nowait=True)
queues_tail.consume(nowait=False) queues_tail.consume(nowait=False)
@ -648,7 +670,7 @@ class Connection(object):
yield self.ensure(_error_callback, _consume) yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self): def cancel_consumer_thread(self):
"""Cancel a consumer thread""" """Cancel a consumer thread."""
if self.consumer_thread is not None: if self.consumer_thread is not None:
self.consumer_thread.kill() self.consumer_thread.kill()
try: try:
@ -663,7 +685,7 @@ class Connection(object):
proxy_cb.wait() proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class""" """Send to a publisher based on the publisher class."""
def _error_callback(exc): def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': str(exc)}
@ -684,36 +706,37 @@ class Connection(object):
self.declare_consumer(DirectConsumer, topic, callback) self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None, def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer.""" """Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer, self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name, name=queue_name,
exchange_name=exchange_name, exchange_name=exchange_name,
ack_on_error=ack_on_error,
), ),
topic, callback) topic, callback)
def declare_fanout_consumer(self, topic, callback): def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer""" """Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback) self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg): def direct_send(self, msg_id, msg):
"""Send a 'direct' message""" """Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg) self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None): def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message""" """Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout) self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg): def fanout_send(self, topic, msg):
"""Send a 'fanout' message""" """Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg) self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs): def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic""" """Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None): def consume(self, limit=None):
"""Consume from all queues/consumers""" """Consume from all queues/consumers."""
it = self.iterconsume(limit=limit) it = self.iterconsume(limit=limit)
while True: while True:
try: try:
@ -722,7 +745,8 @@ class Connection(object):
return return
def consume_in_thread(self): def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread""" """Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread(): def _consumer_thread():
try: try:
self.consume() self.consume()
@ -733,7 +757,7 @@ class Connection(object):
return self.consumer_thread return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False): def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object""" """Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -745,7 +769,7 @@ class Connection(object):
self.declare_topic_consumer(topic, proxy_cb) self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object""" """Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -753,7 +777,7 @@ class Connection(object):
self.declare_topic_consumer(topic, proxy_cb, pool_name) self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers for a given topic from
the specified exchange. the specified exchange.
@ -774,11 +798,12 @@ class Connection(object):
topic=topic, topic=topic,
exchange_name=exchange_name, exchange_name=exchange_name,
callback=callback_wrapper, callback=callback_wrapper,
ack_on_error=ack_on_error,
) )
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return rpc_amqp.create_connection( return rpc_amqp.create_connection(
conf, new, conf, new,
rpc_amqp.get_connection_pool(conf, Connection)) rpc_amqp.get_connection_pool(conf, Connection))

View File

@ -24,6 +24,7 @@ import eventlet
import greenlet import greenlet
from oslo.config import cfg from oslo.config import cfg
from climate.openstack.common import excutils
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import importutils from climate.openstack.common import importutils
from climate.openstack.common import jsonutils from climate.openstack.common import jsonutils
@ -31,6 +32,7 @@ from climate.openstack.common import log as logging
from climate.openstack.common.rpc import amqp as rpc_amqp from climate.openstack.common.rpc import amqp as rpc_amqp
from climate.openstack.common.rpc import common as rpc_common from climate.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging") qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
@ -40,8 +42,8 @@ qpid_opts = [
cfg.StrOpt('qpid_hostname', cfg.StrOpt('qpid_hostname',
default='localhost', default='localhost',
help='Qpid broker hostname'), help='Qpid broker hostname'),
cfg.StrOpt('qpid_port', cfg.IntOpt('qpid_port',
default='5672', default=5672,
help='Qpid broker port'), help='Qpid broker port'),
cfg.ListOpt('qpid_hosts', cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'], default=['$qpid_hostname:$qpid_port'],
@ -69,6 +71,8 @@ qpid_opts = [
cfg.CONF.register_opts(qpid_opts) cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
class ConsumerBase(object): class ConsumerBase(object):
"""Consumer base class.""" """Consumer base class."""
@ -115,31 +119,59 @@ class ConsumerBase(object):
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session) self.connect(session)
def connect(self, session):
"""Declare the reciever on connect."""
self._declare_receiver(session)
def reconnect(self, session): def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect""" """Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session self.session = session
self.receiver = session.receiver(self.address) self.receiver = session.receiver(self.address)
self.receiver.capacity = 1 self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self): def consume(self):
"""Fetch the message and pass it to the callback object""" """Fetch the message and pass it to the callback object."""
message = self.receiver.fetch() message = self.receiver.fetch()
try: try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content) msg = rpc_common.deserialize_msg(message.content)
self.callback(msg) self.callback(msg)
except Exception: except Exception:
LOG.exception(_("Failed to process message... skipping it.")) LOG.exception(_("Failed to process message... skipping it."))
finally: finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message) self.session.acknowledge(message)
def get_receiver(self): def get_receiver(self):
return self.receiver return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase): class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'""" """Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback): def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue. """Init a 'direct' queue.
@ -157,7 +189,7 @@ class DirectConsumer(ConsumerBase):
class TopicConsumer(ConsumerBase): class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'""" """Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None, def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None): exchange_name=None):
@ -177,7 +209,7 @@ class TopicConsumer(ConsumerBase):
class FanoutConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'""" """Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback): def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue. """Init a 'fanout' queue.
@ -186,6 +218,7 @@ class FanoutConsumer(ConsumerBase):
'topic' is the topic to listen on 'topic' is the topic to listen on
'callback' is the callback to call when messages are received 'callback' is the callback to call when messages are received
""" """
self.conf = conf
super(FanoutConsumer, self).__init__( super(FanoutConsumer, self).__init__(
session, callback, session, callback,
@ -194,9 +227,21 @@ class FanoutConsumer(ConsumerBase):
"%s_fanout_%s" % (topic, uuid.uuid4().hex), "%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True}) {"exclusive": True})
def reconnect(self, session):
topic = self.get_node_name()
params = {
'session': session,
'topic': topic,
'callback': self.callback,
}
self.__init__(conf=self.conf, **params)
super(FanoutConsumer, self).reconnect(session)
class Publisher(object): class Publisher(object):
"""Base Publisher class""" """Base Publisher class."""
def __init__(self, session, node_name, node_opts=None): def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key, """Init the Publisher class with the exchange_name, routing_key,
@ -225,16 +270,43 @@ class Publisher(object):
self.reconnect(session) self.reconnect(session)
def reconnect(self, session): def reconnect(self, session):
"""Re-establish the Sender after a reconnection""" """Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address) self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg): def send(self, msg):
"""Send a message""" """Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg) self.sender.send(msg)
class DirectPublisher(Publisher): class DirectPublisher(Publisher):
"""Publisher class for 'direct'""" """Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id): def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher.""" """Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id, super(DirectPublisher, self).__init__(session, msg_id,
@ -242,7 +314,7 @@ class DirectPublisher(Publisher):
class TopicPublisher(Publisher): class TopicPublisher(Publisher):
"""Publisher class for 'topic'""" """Publisher class for 'topic'."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'topic' publisher. """init a 'topic' publisher.
""" """
@ -252,7 +324,7 @@ class TopicPublisher(Publisher):
class FanoutPublisher(Publisher): class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'""" """Publisher class for 'fanout'."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'fanout' publisher. """init a 'fanout' publisher.
""" """
@ -262,7 +334,7 @@ class FanoutPublisher(Publisher):
class NotifyPublisher(Publisher): class NotifyPublisher(Publisher):
"""Publisher class for notifications""" """Publisher class for notifications."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'topic' publisher. """init a 'topic' publisher.
""" """
@ -320,7 +392,7 @@ class Connection(object):
# Reconnection is done by self.reconnect() # Reconnection is done by self.reconnect()
self.connection.reconnect = False self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer): def _register_consumer(self, consumer):
@ -330,23 +402,24 @@ class Connection(object):
return self.consumers[str(receiver)] return self.consumers[str(receiver)]
def reconnect(self): def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues""" """Handles reconnecting and re-establishing sessions and queues."""
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
attempt = 0 attempt = 0
delay = 1 delay = 1
while True: while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)] broker = self.brokers[attempt % len(self.brokers)]
attempt += 1 attempt += 1
try: try:
self.connection_create(broker) self.connection_create(broker)
self.connection.open() self.connection.open()
except qpid_exceptions.ConnectionError, e: except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay) msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. " msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict "Sleeping %(delay)s seconds") % msg_dict
@ -374,20 +447,26 @@ class Connection(object):
try: try:
return method(*args, **kwargs) return method(*args, **kwargs)
except (qpid_exceptions.Empty, except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError), e: qpid_exceptions.ConnectionError) as e:
if error_callback: if error_callback:
error_callback(e) error_callback(e)
self.reconnect() self.reconnect()
def close(self): def close(self):
"""Close/release this connection""" """Close/release this connection."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.connection.close() try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None self.connection = None
def reset(self): def reset(self):
"""Reset a connection so it can be used again""" """Reset a connection so it can be used again."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.session.close() self.session.close()
@ -411,7 +490,7 @@ class Connection(object):
return self.ensure(_connect_error, _declare_consumer) return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None): def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers""" """Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc): def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty): if isinstance(exc, qpid_exceptions.Empty):
@ -435,7 +514,7 @@ class Connection(object):
yield self.ensure(_error_callback, _consume) yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self): def cancel_consumer_thread(self):
"""Cancel a consumer thread""" """Cancel a consumer thread."""
if self.consumer_thread is not None: if self.consumer_thread is not None:
self.consumer_thread.kill() self.consumer_thread.kill()
try: try:
@ -450,7 +529,7 @@ class Connection(object):
proxy_cb.wait() proxy_cb.wait()
def publisher_send(self, cls, topic, msg): def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class""" """Send to a publisher based on the publisher class."""
def _connect_error(exc): def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': str(exc)}
@ -480,15 +559,15 @@ class Connection(object):
topic, callback) topic, callback)
def declare_fanout_consumer(self, topic, callback): def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer""" """Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback) self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg): def direct_send(self, msg_id, msg):
"""Send a 'direct' message""" """Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg) self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None): def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message""" """Send a 'topic' message."""
# #
# We want to create a message with attributes, e.g. a TTL. We # We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer # don't really need to keep 'msg' in its JSON format any longer
@ -503,15 +582,15 @@ class Connection(object):
self.publisher_send(TopicPublisher, topic, qpid_message) self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg): def fanout_send(self, topic, msg):
"""Send a 'fanout' message""" """Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg) self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs): def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic""" """Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg) self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None): def consume(self, limit=None):
"""Consume from all queues/consumers""" """Consume from all queues/consumers."""
it = self.iterconsume(limit=limit) it = self.iterconsume(limit=limit)
while True: while True:
try: try:
@ -520,7 +599,8 @@ class Connection(object):
return return
def consume_in_thread(self): def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread""" """Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread(): def _consumer_thread():
try: try:
self.consume() self.consume()
@ -531,7 +611,7 @@ class Connection(object):
return self.consumer_thread return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False): def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object""" """Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -547,7 +627,7 @@ class Connection(object):
return consumer return consumer
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object""" """Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -561,7 +641,7 @@ class Connection(object):
return consumer return consumer
def join_consumer_pool(self, callback, pool_name, topic, def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers for a given topic from
the specified exchange. the specified exchange.
@ -590,7 +670,7 @@ class Connection(object):
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return rpc_amqp.create_connection( return rpc_amqp.create_connection(
conf, new, conf, new,
rpc_amqp.get_connection_pool(conf, Connection)) rpc_amqp.get_connection_pool(conf, Connection))

View File

@ -30,7 +30,6 @@ from climate.openstack.common import excutils
from climate.openstack.common.gettextutils import _ from climate.openstack.common.gettextutils import _
from climate.openstack.common import importutils from climate.openstack.common import importutils
from climate.openstack.common import jsonutils from climate.openstack.common import jsonutils
from climate.openstack.common import processutils as utils
from climate.openstack.common.rpc import common as rpc_common from climate.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq') zmq = importutils.try_import('eventlet.green.zmq')
@ -85,8 +84,8 @@ matchmaker = None # memoized matchmaker object
def _serialize(data): def _serialize(data):
""" """Serialization wrapper.
Serialization wrapper
We prefer using JSON, but it cannot encode all types. We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data. Error if a developer passes us bad data.
""" """
@ -98,18 +97,15 @@ def _serialize(data):
def _deserialize(data): def _deserialize(data):
""" """Deserialization wrapper."""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data) LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data) return jsonutils.loads(data)
class ZmqSocket(object): class ZmqSocket(object):
""" """A tiny wrapper around ZeroMQ.
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement). Can be used as a Context (supports the 'with' statement).
""" """
@ -180,7 +176,7 @@ class ZmqSocket(object):
return return
# We must unsubscribe, or we'll leak descriptors. # We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0: if self.subscriptions:
for f in self.subscriptions: for f in self.subscriptions:
try: try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f) self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
@ -199,29 +195,27 @@ class ZmqSocket(object):
LOG.error("ZeroMQ socket could not be closed.") LOG.error("ZeroMQ socket could not be closed.")
self.sock = None self.sock = None
def recv(self): def recv(self, **kwargs):
if not self.can_recv: if not self.can_recv:
raise RPCException(_("You cannot recv on this socket.")) raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart() return self.sock.recv_multipart(**kwargs)
def send(self, data): def send(self, data, **kwargs):
if not self.can_send: if not self.can_send:
raise RPCException(_("You cannot send on this socket.")) raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data) self.sock.send_multipart(data, **kwargs)
class ZmqClient(object): class ZmqClient(object):
"""Client for ZMQ sockets.""" """Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False): def __init__(self, addr):
if socket_type is None: self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False): def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0 msg_id = msg_id or 0
if not (envelope or rpc_common._SEND_RPC_ENVELOPE): if not envelope:
self.outq.send(map(bytes, self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data)))) (msg_id, topic, 'cast', _serialize(data))))
return return
@ -276,12 +270,13 @@ class InternalContext(object):
try: try:
result = proxy.dispatch( result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args']) ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies) return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit: except greenlet.GreenletExit:
# ignore these since they are just from shutdowns # ignore these since they are just from shutdowns
pass pass
except rpc_common.ClientException, e: except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") % LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1]) e._exc_info[1])
return {'exc': return {'exc':
@ -295,11 +290,16 @@ class InternalContext(object):
def reply(self, ctx, proxy, def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None): msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call.""" """Reply to a casted call."""
# Our real method is curried into msg['args'] # NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply( response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]), self._get_response(ctx, proxy, topic, payload),
ctx.replies) ctx.replies)
LOG.debug(_("Sending reply")) LOG.debug(_("Sending reply"))
@ -346,20 +346,18 @@ class ConsumerBase(object):
return return
proxy.dispatch(ctx, data['version'], proxy.dispatch(ctx, data['version'],
data['method'], **data['args']) data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase): class ZmqBaseReactor(ConsumerBase):
""" """A consumer class implementing a centralized casting broker (PULL-PUSH).
A consumer class implementing a
centralized casting broker (PULL-PUSH) Used for RoundRobin requests.
for RoundRobin requests.
""" """
def __init__(self, conf): def __init__(self, conf):
super(ZmqBaseReactor, self).__init__() super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {} self.proxies = {}
self.threads = [] self.threads = []
self.sockets = [] self.sockets = []
@ -367,9 +365,8 @@ class ZmqBaseReactor(ConsumerBase):
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None, def register(self, proxy, in_addr, zmq_type_in,
zmq_type_out=None, in_bind=True, out_bind=True, in_bind=True, subscribe=None):
subscribe=None):
LOG.info(_("Registering reactor")) LOG.info(_("Registering reactor"))
@ -385,21 +382,6 @@ class ZmqBaseReactor(ConsumerBase):
LOG.info(_("In reactor registered")) LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self): def consume_in_thread(self):
def _consume(sock): def _consume(sock):
LOG.info(_("Consuming socket")) LOG.info(_("Consuming socket"))
@ -424,10 +406,9 @@ class ZmqBaseReactor(ConsumerBase):
class ZmqProxy(ZmqBaseReactor): class ZmqProxy(ZmqBaseReactor):
""" """A consumer class implementing a topic-based proxy.
A consumer class implementing a
topic-based proxy, forwarding to Forwards to IPC sockets.
IPC sockets.
""" """
def __init__(self, conf): def __init__(self, conf):
@ -440,11 +421,8 @@ class ZmqProxy(ZmqBaseReactor):
def consume(self, sock): def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv(copy=False)
data = sock.recv() topic = data[1].bytes
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'): if topic.startswith('fanout~'):
sock_type = zmq.PUB sock_type = zmq.PUB
@ -486,9 +464,7 @@ class ZmqProxy(ZmqBaseReactor):
while(True): while(True):
data = self.topic_proxy[topic].get() data = self.topic_proxy[topic].get()
out_sock.send(data) out_sock.send(data, copy=False)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event() wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation) eventlet.spawn(publisher, wait_sock_creation)
@ -501,37 +477,34 @@ class ZmqProxy(ZmqBaseReactor):
try: try:
self.topic_proxy[topic].put_nowait(data) self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full: except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic " LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic}) "%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self): def consume_in_thread(self):
"""Runs the ZmqProxy service""" """Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \ consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address, (CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port) CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None) consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir): try:
try: os.makedirs(ipc_dir)
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) except os.error:
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), if not os.path.isdir(ipc_dir):
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") % LOG.error(_("Required IPC directory does not exist at"
(ipc_dir, )) " %s") % (ipc_dir, ))
try: try:
self.register(consumption_proxy, self.register(consumption_proxy,
consume_in, consume_in,
zmq.PULL, zmq.PULL)
out_bind=True)
except zmq.ZMQError: except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. " LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use.")) "Socket may already be in use."))
@ -541,8 +514,9 @@ class ZmqProxy(ZmqBaseReactor):
def unflatten_envelope(packenv): def unflatten_envelope(packenv):
"""Unflattens the RPC envelope. """Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4} Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
""" """
i = iter(packenv) i = iter(packenv)
h = {} h = {}
@ -555,10 +529,9 @@ def unflatten_envelope(packenv):
class ZmqReactor(ZmqBaseReactor): class ZmqReactor(ZmqBaseReactor):
""" """A consumer class implementing a consumer for messages.
A consumer class implementing a
consumer for messages. Can also be Can also be used as a 1:1 proxy
used as a 1:1 proxy
""" """
def __init__(self, conf): def __init__(self, conf):
@ -568,11 +541,6 @@ class ZmqReactor(ZmqBaseReactor):
#TODO(ewindisch): use zero-copy (i.e. references, not copying) #TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv() data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock] proxy = self.proxies[sock]
@ -685,8 +653,8 @@ def _call(addr, context, topic, msg, timeout=None,
'method': '-reply', 'method': '-reply',
'args': { 'args': {
'msg_id': msg_id, 'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic, 'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg] 'msg': [mcontext, msg]
} }
} }
@ -745,10 +713,9 @@ def _call(addr, context, topic, msg, timeout=None,
def _multi_send(method, context, topic, msg, timeout=None, def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None): envelope=False, _msg_id=None):
""" """Wraps the sending of messages.
Wraps the sending of messages,
dispatches to the matchmaker and sends Dispatches to the matchmaker and sends message to all relevant hosts.
message to all relevant hosts.
""" """
conf = CONF conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
@ -757,7 +724,7 @@ def _multi_send(method, context, topic, msg, timeout=None,
LOG.debug(_("Sending message(s) to: %s"), queues) LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results # Don't stack if we have no matchmaker results
if len(queues) == 0: if not queues:
LOG.warn(_("No matchmaker results. Not casting.")) LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle # While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie. # this exception and a timeout isn't too big a lie.
@ -805,8 +772,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
def notify(conf, context, topic, msg, envelope): def notify(conf, context, topic, msg, envelope):
""" """Send notification event.
Send notification event.
Notifications are sent to topic-priority. Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority. This differs from the AMQP drivers which send to topic.priority.
""" """
@ -840,6 +807,11 @@ def _get_ctxt():
def _get_matchmaker(*args, **kwargs): def _get_matchmaker(*args, **kwargs):
global matchmaker global matchmaker
if not matchmaker: if not matchmaker:
matchmaker = importutils.import_object( mm = CONF.rpc_zmq_matchmaker
CONF.rpc_zmq_matchmaker, *args, **kwargs) if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker return matchmaker

View File

@ -19,8 +19,6 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
""" """
import contextlib import contextlib
import itertools
import json
import eventlet import eventlet
from oslo.config import cfg from oslo.config import cfg
@ -30,15 +28,11 @@ from climate.openstack.common import log as logging
matchmaker_opts = [ matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('matchmaker_ringfile',
default='/etc/nova/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
cfg.IntOpt('matchmaker_heartbeat_freq', cfg.IntOpt('matchmaker_heartbeat_freq',
default='300', default=300,
help='Heartbeat frequency'), help='Heartbeat frequency'),
cfg.IntOpt('matchmaker_heartbeat_ttl', cfg.IntOpt('matchmaker_heartbeat_ttl',
default='600', default=600,
help='Heartbeat time-to-live.'), help='Heartbeat time-to-live.'),
] ]
@ -54,8 +48,8 @@ class MatchMakerException(Exception):
class Exchange(object): class Exchange(object):
""" """Implements lookups.
Implements lookups.
Subclass this to support hashtables, dns, etc. Subclass this to support hashtables, dns, etc.
""" """
def __init__(self): def __init__(self):
@ -66,9 +60,7 @@ class Exchange(object):
class Binding(object): class Binding(object):
""" """A binding on which to perform a lookup."""
A binding on which to perform a lookup.
"""
def __init__(self): def __init__(self):
pass pass
@ -77,10 +69,10 @@ class Binding(object):
class MatchMakerBase(object): class MatchMakerBase(object):
""" """Match Maker Base Class.
Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a Build off HeartbeatMatchMakerBase if building a heartbeat-capable
heartbeat-capable MatchMaker. MatchMaker.
""" """
def __init__(self): def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true # Array of tuples. Index [2] toggles negation, [3] is last-if-true
@ -90,58 +82,47 @@ class MatchMakerBase(object):
'registration or heartbeat.') 'registration or heartbeat.')
def register(self, key, host): def register(self, key, host):
""" """Register a host on a backend.
Register a host on a backend.
Heartbeats, if applicable, may keepalive registration. Heartbeats, if applicable, may keepalive registration.
""" """
pass pass
def ack_alive(self, key, host): def ack_alive(self, key, host):
""" """Acknowledge that a key.host is alive.
Acknowledge that a key.host is alive.
Used internally for updating heartbeats, Used internally for updating heartbeats, but may also be used
but may also be used publically to acknowledge publically to acknowledge a system is alive (i.e. rpc message
a system is alive (i.e. rpc message successfully successfully sent to host)
sent to host)
""" """
pass pass
def is_alive(self, topic, host): def is_alive(self, topic, host):
""" """Checks if a host is alive."""
Checks if a host is alive.
"""
pass pass
def expire(self, topic, host): def expire(self, topic, host):
""" """Explicitly expire a host's registration."""
Explicitly expire a host's registration.
"""
pass pass
def send_heartbeats(self): def send_heartbeats(self):
""" """Send all heartbeats.
Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread, Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method. which loops this method.
""" """
pass pass
def unregister(self, key, host): def unregister(self, key, host):
""" """Unregister a topic."""
Unregister a topic.
"""
pass pass
def start_heartbeat(self): def start_heartbeat(self):
""" """Spawn heartbeat greenthread."""
Spawn heartbeat greenthread.
"""
pass pass
def stop_heartbeat(self): def stop_heartbeat(self):
""" """Destroys the heartbeat greenthread."""
Destroys the heartbeat greenthread.
"""
pass pass
def add_binding(self, binding, rule, last=True): def add_binding(self, binding, rule, last=True):
@ -168,10 +149,10 @@ class MatchMakerBase(object):
class HeartbeatMatchMakerBase(MatchMakerBase): class HeartbeatMatchMakerBase(MatchMakerBase):
""" """Base for a heart-beat capable MatchMaker.
Base for a heart-beat capable MatchMaker.
Provides common methods for registering, Provides common methods for registering, unregistering, and maintaining
unregistering, and maintaining heartbeats. heartbeats.
""" """
def __init__(self): def __init__(self):
self.hosts = set() self.hosts = set()
@ -181,8 +162,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
super(HeartbeatMatchMakerBase, self).__init__() super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self): def send_heartbeats(self):
""" """Send all heartbeats.
Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread, Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method. which loops this method.
""" """
@ -190,32 +171,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.ack_alive(key, host) self.ack_alive(key, host)
def ack_alive(self, key, host): def ack_alive(self, key, host):
""" """Acknowledge that a host.topic is alive.
Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, Used internally for updating heartbeats, but may also be used
but may also be used publically to acknowledge publically to acknowledge a system is alive (i.e. rpc message
a system is alive (i.e. rpc message successfully successfully sent to host)
sent to host)
""" """
raise NotImplementedError("Must implement ack_alive") raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host): def backend_register(self, key, host):
""" """Implements registration logic.
Implements registration logic.
Called by register(self,key,host) Called by register(self,key,host)
""" """
raise NotImplementedError("Must implement backend_register") raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host): def backend_unregister(self, key, key_host):
""" """Implements de-registration logic.
Implements de-registration logic.
Called by unregister(self,key,host) Called by unregister(self,key,host)
""" """
raise NotImplementedError("Must implement backend_unregister") raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host): def register(self, key, host):
""" """Register a host on a backend.
Register a host on a backend.
Heartbeats, if applicable, may keepalive registration. Heartbeats, if applicable, may keepalive registration.
""" """
self.hosts.add(host) self.hosts.add(host)
@ -227,25 +207,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.ack_alive(key, host) self.ack_alive(key, host)
def unregister(self, key, host): def unregister(self, key, host):
""" """Unregister a topic."""
Unregister a topic.
"""
if (key, host) in self.host_topic: if (key, host) in self.host_topic:
del self.host_topic[(key, host)] del self.host_topic[(key, host)]
self.hosts.discard(host) self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host))) self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self): def start_heartbeat(self):
""" """Implementation of MatchMakerBase.start_heartbeat.
Implementation of MatchMakerBase.start_heartbeat
Launches greenthread looping send_heartbeats(), Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations. between iterations.
""" """
if len(self.hosts) == 0: if not self.hosts:
raise MatchMakerException( raise MatchMakerException(
_("Register before starting heartbeat.")) _("Register before starting heartbeat."))
@ -257,16 +236,14 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self._heart = eventlet.spawn(do_heartbeat) self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self): def stop_heartbeat(self):
""" """Destroys the heartbeat greenthread."""
Destroys the heartbeat greenthread.
"""
if self._heart: if self._heart:
self._heart.kill() self._heart.kill()
class DirectBinding(Binding): class DirectBinding(Binding):
""" """Specifies a host in the key via a '.' character.
Specifies a host in the key via a '.' character
Although dots are used in the key, the behavior here is Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct. that it maps directly to a host, thus direct.
""" """
@ -277,8 +254,8 @@ class DirectBinding(Binding):
class TopicBinding(Binding): class TopicBinding(Binding):
""" """Where a 'bare' key without dots.
Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots, AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior that of a topic exchange (whereas where there are dots, behavior
@ -304,67 +281,6 @@ class StubExchange(Exchange):
return [(key, None)] return [(key, None)]
class RingExchange(Exchange):
"""
Match Maker where hosts are loaded from a static file containing
a hashmap (JSON formatted).
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
if key in self.ring0:
return True
return False
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class LocalhostExchange(Exchange): class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local.""" """Exchange where all direct topics are local."""
def __init__(self, host='localhost'): def __init__(self, host='localhost'):
@ -376,8 +292,8 @@ class LocalhostExchange(Exchange):
class DirectExchange(Exchange): class DirectExchange(Exchange):
""" """Exchange where all topic keys are split, sending to second half.
Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host" i.e. "compute.host" sends a message to "compute.host" running on "host"
""" """
def __init__(self): def __init__(self):
@ -388,20 +304,9 @@ class DirectExchange(Exchange):
return [(key, e)] return [(key, e)]
class MatchMakerRing(MatchMakerBase):
"""
Match Maker where hosts are loaded from a static hashmap.
"""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
class MatchMakerLocalhost(MatchMakerBase): class MatchMakerLocalhost(MatchMakerBase):
""" """Match Maker where all bare topics resolve to localhost.
Match Maker where all bare topics resolve to localhost.
Useful for testing. Useful for testing.
""" """
def __init__(self, host='localhost'): def __init__(self, host='localhost'):
@ -412,13 +317,13 @@ class MatchMakerLocalhost(MatchMakerBase):
class MatchMakerStub(MatchMakerBase): class MatchMakerStub(MatchMakerBase):
""" """Match Maker where topics are untouched.
Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues. Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq) Will not work where knowledge of hosts is known (i.e. zeromq)
""" """
def __init__(self): def __init__(self):
super(MatchMakerLocalhost, self).__init__() super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange())

View File

@ -55,8 +55,8 @@ class RedisExchange(mm_common.Exchange):
class RedisTopicExchange(RedisExchange): class RedisTopicExchange(RedisExchange):
""" """Exchange where all topic keys are split, sending to second half.
Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host" i.e. "compute.host" sends a message to "compute" running on "host"
""" """
def run(self, topic): def run(self, topic):
@ -77,9 +77,7 @@ class RedisTopicExchange(RedisExchange):
class RedisFanoutExchange(RedisExchange): class RedisFanoutExchange(RedisExchange):
""" """Return a list of all hosts."""
Return a list of all hosts.
"""
def run(self, topic): def run(self, topic):
topic = topic.split('~', 1)[1] topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic) hosts = self.redis.smembers(topic)
@ -90,9 +88,7 @@ class RedisFanoutExchange(RedisExchange):
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
""" """MatchMaker registering and looking-up hosts with a Redis server."""
MatchMaker registering and looking-up hosts with a Redis server.
"""
def __init__(self): def __init__(self):
super(MatchMakerRedis, self).__init__() super(MatchMakerRedis, self).__init__()

View File

@ -0,0 +1,110 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import itertools
import json
from oslo.config import cfg
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
from climate.openstack.common.rpc import matchmaker as mm
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('ringfile',
deprecated_name='matchmaker_ringfile',
deprecated_group='DEFAULT',
default='/etc/oslo/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
LOG = logging.getLogger(__name__)
class RingExchange(mm.Exchange):
"""Match Maker where hosts are loaded from a static JSON formatted file.
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ring.ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
if key in self.ring0:
return True
return False
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class MatchMakerRing(mm.MatchMakerBase):
"""Match Maker where hosts are loaded from a static hashmap."""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))

View File

@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc. # Copyright 2012-2013 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -23,6 +23,8 @@ For more information about rpc API version numbers, see:
from climate.openstack.common import rpc from climate.openstack.common import rpc
from climate.openstack.common.rpc import common as rpc_common
from climate.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object): class RpcProxy(object):
@ -34,16 +36,28 @@ class RpcProxy(object):
rpc API. rpc API.
""" """
def __init__(self, topic, default_version): # The default namespace, which can be overriden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy. """Initialize an RpcProxy.
:param topic: The topic to use for all messages. :param topic: The topic to use for all messages.
:param default_version: The default API version to request in all :param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message outgoing messages. This can be overridden on a per-message
basis. basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
""" """
self.topic = topic self.topic = topic
self.default_version = default_version self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__() super(RpcProxy, self).__init__()
def _set_version(self, msg, vers): def _set_version(self, msg, vers):
@ -52,15 +66,44 @@ class RpcProxy(object):
:param msg: The message having a version added to it. :param msg: The message having a version added to it.
:param vers: The version number to add to the message. :param vers: The version number to add to the message.
""" """
msg['version'] = vers if vers else self.default_version v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version=self.version_cap)
msg['version'] = v
def _get_topic(self, topic): def _get_topic(self, topic):
"""Return the topic to use for a message.""" """Return the topic to use for a message."""
return topic if topic else self.topic return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod @staticmethod
def make_msg(method, **kwargs): def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'args': kwargs} return {'method': method, 'namespace': namespace, 'args': kwargs}
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None): def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method. """rpc.call() a remote method.
@ -68,16 +111,23 @@ class RpcProxy(object):
:param context: The request context :param context: The request context
:param msg: The message to send, including the method and args. :param msg: The message to send, including the method and args.
:param topic: Override the topic for this message. :param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the :param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be response. If no timeout is specified, a default timeout will be
used that is usually sufficient. used that is usually sufficient.
:param version: (Optional) Override the requested API version in this
message.
:returns: The return value from the remote method. :returns: The return value from the remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
return rpc.call(context, self._get_topic(topic), msg, timeout) msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None): def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method. """rpc.multicall() a remote method.
@ -85,17 +135,24 @@ class RpcProxy(object):
:param context: The request context :param context: The request context
:param msg: The message to send, including the method and args. :param msg: The message to send, including the method and args.
:param topic: Override the topic for this message. :param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the :param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be response. If no timeout is specified, a default timeout will be
used that is usually sufficient. used that is usually sufficient.
:param version: (Optional) Override the requested API version in this
message.
:returns: An iterator that lets you process each of the returned values :returns: An iterator that lets you process each of the returned values
from the remote method as they arrive. from the remote method as they arrive.
""" """
self._set_version(msg, version) self._set_version(msg, version)
return rpc.multicall(context, self._get_topic(topic), msg, timeout) msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None): def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method. """rpc.cast() a remote method.
@ -110,6 +167,7 @@ class RpcProxy(object):
remote method. remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg) rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None): def fanout_cast(self, context, msg, topic=None, version=None):
@ -125,6 +183,7 @@ class RpcProxy(object):
from the remote method. from the remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg) rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None, def cast_to_server(self, context, server_params, msg, topic=None,
@ -143,6 +202,7 @@ class RpcProxy(object):
return values. return values.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None, def fanout_cast_to_server(self, context, server_params, msg, topic=None,
@ -161,5 +221,6 @@ class RpcProxy(object):
return values. return values.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params, rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg) self._get_topic(topic), msg)

View File

@ -0,0 +1,52 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
class Serializer(object):
"""Generic (de-)serialization definition base class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity

View File

@ -30,7 +30,8 @@ LOG = logging.getLogger(__name__)
class Service(service.Service): class Service(service.Service):
"""Service object for binaries running on hosts. """Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.""" A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None): def __init__(self, host, topic, manager=None):
super(Service, self).__init__() super(Service, self).__init__()
self.host = host self.host = host

View File

@ -0,0 +1,41 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from climate.openstack.common import log as logging
from climate.openstack.common import rpc
from climate.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()

View File

@ -52,7 +52,7 @@ class Launcher(object):
""" """
self._services = threadgroup.ThreadGroup() self._services = threadgroup.ThreadGroup()
eventlet_backdoor.initialize_if_enabled() self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod @staticmethod
def run_service(service): def run_service(service):
@ -72,6 +72,7 @@ class Launcher(object):
:returns: None :returns: None
""" """
service.backdoor_port = self.backdoor_port
self._services.add_thread(self.run_service, service) self._services.add_thread(self.run_service, service)
def stop(self): def stop(self):
@ -270,7 +271,7 @@ class ProcessLauncher(object):
return wrap return wrap
def wait(self): def wait(self):
"""Loop waiting on children to die and respawning as necessary""" """Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:')) LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG) CONF.log_opt_values(LOG, std_logging.DEBUG)

View File

@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs): def _thread_done(gt, *args, **kwargs):
""" Callback function to be passed to GreenThread.link() when we spawn() """Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if. Calls the :class:`ThreadGroup` to notify if.
""" """
@ -34,7 +34,7 @@ def _thread_done(gt, *args, **kwargs):
class Thread(object): class Thread(object):
""" Wrapper around a greenthread, that holds a reference to the """Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list. it has done so it can be removed from the threads list.
""" """
@ -50,7 +50,7 @@ class Thread(object):
class ThreadGroup(object): class ThreadGroup(object):
""" The point of the ThreadGroup classis to: """The point of the ThreadGroup classis to:
* keep track of timers and greenthreads (making it easier to stop them * keep track of timers and greenthreads (making it easier to stop them
when need be). when need be).
@ -61,9 +61,16 @@ class ThreadGroup(object):
self.threads = [] self.threads = []
self.timers = [] self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None, def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs): *args, **kwargs):
pulse = loopingcall.LoopingCall(callback, *args, **kwargs) pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval, pulse.start(interval=interval,
initial_delay=initial_delay) initial_delay=initial_delay)
self.timers.append(pulse) self.timers.append(pulse)

View File

@ -23,6 +23,7 @@ import calendar
import datetime import datetime
import iso8601 import iso8601
import six
# ISO 8601 extended time format with microseconds # ISO 8601 extended time format with microseconds
@ -32,7 +33,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False): def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format""" """Stringify time in ISO 8601 format."""
if not at: if not at:
at = utcnow() at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT st = at.strftime(_ISO8601_TIME_FORMAT
@ -44,7 +45,7 @@ def isotime(at=None, subsecond=False):
def parse_isotime(timestr): def parse_isotime(timestr):
"""Parse time from ISO 8601 format""" """Parse time from ISO 8601 format."""
try: try:
return iso8601.parse_date(timestr) return iso8601.parse_date(timestr)
except iso8601.ParseError as e: except iso8601.ParseError as e:
@ -66,7 +67,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
def normalize_time(timestamp): def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object""" """Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset() offset = timestamp.utcoffset()
if offset is None: if offset is None:
return timestamp return timestamp
@ -75,14 +76,14 @@ def normalize_time(timestamp):
def is_older_than(before, seconds): def is_older_than(before, seconds):
"""Return True if before is older than seconds.""" """Return True if before is older than seconds."""
if isinstance(before, basestring): if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None) before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds) return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds): def is_newer_than(after, seconds):
"""Return True if after is newer than seconds.""" """Return True if after is newer than seconds."""
if isinstance(after, basestring): if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None) after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds) return after - utcnow() > datetime.timedelta(seconds=seconds)
@ -103,7 +104,7 @@ def utcnow():
def iso8601_from_timestamp(timestamp): def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp""" """Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp)) return isotime(datetime.datetime.utcfromtimestamp(timestamp))
@ -111,9 +112,9 @@ utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()): def set_time_override(override_time=datetime.datetime.utcnow()):
""" """Overrides utils.utcnow.
Override utils.utcnow to return a constant time or a list thereof,
one at a time. Make it return a constant time or a list thereof, one at a time.
""" """
utcnow.override_time = override_time utcnow.override_time = override_time
@ -141,7 +142,8 @@ def clear_time_override():
def marshall_now(now=None): def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds. """Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.""" Note: tzinfo is stripped, but not required for relative times.
"""
if not now: if not now:
now = utcnow() now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
@ -161,7 +163,8 @@ def unmarshall_time(tyme):
def delta_seconds(before, after): def delta_seconds(before, after):
""" """Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution). datetime objects (as a float, to microsecond resolution).
""" """
@ -174,8 +177,7 @@ def delta_seconds(before, after):
def is_soon(dt, window): def is_soon(dt, window):
""" """Determines if time is going to happen in the next window seconds.
Determines if time is going to happen in the next window seconds.
:params dt: the time :params dt: the time
:params window: minimum seconds to remain to consider the time not soon :params window: minimum seconds to remain to consider the time not soon

View File

@ -1,4 +1,6 @@
# Copyright 2012 Red Hat, Inc. # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,17 +15,25 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""
UUID related utilities and helper functions.
"""
from climate.openstack.common.gettextutils import _ import uuid
from climate.openstack.common import log as logging
from climate.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message): def generate_uuid():
"""Deprecated in Grizzly. Please use rpc_notifier instead.""" return str(uuid.uuid4())
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead.")) def is_uuid_like(val):
rpc_notifier.notify(context, message) """Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False

View File

@ -1,3 +1,3 @@
[DEFAULT] [DEFAULT]
modules=rpc,service,setup,gettextutils,importutils,local,eventlet_backdoor,log,jsonutils,timeutils,notifier,threadgroup,loopingcall,network_utils,excutils modules=rpc,service,setup,gettextutils,importutils,local,eventlet_backdoor,log,jsonutils,policy,timeutils,notifier,threadgroup,loopingcall,network_utils,excutils
base=climate base=climate