Update Oslo
New versions of openstack.common files. Change-Id: I70fc4846252d1a0612007e7140ba2c21ca891bb2
This commit is contained in:
parent
ceae8641a1
commit
83a76bff67
83
climate/openstack/common/context.py
Normal file
83
climate/openstack/common/context.py
Normal file
@ -0,0 +1,83 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
|
||||
from climate.openstack.common import uuidutils
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-%s' % uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""Helper class to represent useful information about a request context.
|
||||
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None):
|
||||
self.auth_token = auth_token
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_token,
|
||||
'request_id': self.request_id}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted=False):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
@ -16,8 +16,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import errno
|
||||
import gc
|
||||
import os
|
||||
import pprint
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
@ -26,18 +31,38 @@ import eventlet.backdoor
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
help_for_backdoor_port = 'Acceptable ' + \
|
||||
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
|
||||
'listening on a random tcp port number, <port> results in ' + \
|
||||
'listening on the specified port number and not enabling backdoor' + \
|
||||
'if it is in use and <start>:<end> results in listening on the ' + \
|
||||
'smallest unused port number within the specified range of port ' + \
|
||||
'numbers. The chosen port is displayed in the service\'s log file.'
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.IntOpt('backdoor_port',
|
||||
cfg.StrOpt('backdoor_port',
|
||||
default=None,
|
||||
help='port for eventlet backdoor to listen')
|
||||
help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventletBackdoorConfigValueError(Exception):
|
||||
def __init__(self, port_range, help_msg, ex):
|
||||
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||
'%(help)s' %
|
||||
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||
self.port_range = port_range
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print "Don't use this, just disconnect instead"
|
||||
print("Don't use this, just disconnect instead")
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
@ -46,16 +71,43 @@ def _find_objects(t):
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print i, gt
|
||||
print(i, gt)
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print
|
||||
print()
|
||||
|
||||
|
||||
def _print_nativethreads():
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
print threadId
|
||||
print(threadId)
|
||||
traceback.print_stack(stack)
|
||||
print
|
||||
print()
|
||||
|
||||
|
||||
def _parse_port_range(port_range):
|
||||
if ':' not in port_range:
|
||||
start, end = port_range, port_range
|
||||
else:
|
||||
start, end = port_range.split(':', 1)
|
||||
try:
|
||||
start, end = int(start), int(end)
|
||||
if end < start:
|
||||
raise ValueError
|
||||
return start, end
|
||||
except ValueError as ex:
|
||||
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||
help_for_backdoor_port)
|
||||
|
||||
|
||||
def _listen(host, start_port, end_port, listen_func):
|
||||
try_port = start_port
|
||||
while True:
|
||||
try:
|
||||
return listen_func((host, try_port))
|
||||
except socket.error as exc:
|
||||
if (exc.errno != errno.EADDRINUSE or
|
||||
try_port >= end_port):
|
||||
raise
|
||||
try_port += 1
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
@ -70,6 +122,8 @@ def initialize_if_enabled():
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
@ -80,8 +134,13 @@ def initialize_if_enabled():
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = eventlet.listen(('localhost', CONF.backdoor_port))
|
||||
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||
|
||||
# In the case of backdoor port being zero, a port number is assigned by
|
||||
# listen(). In any case, pull the port number out here.
|
||||
port = sock.getsockname()[1]
|
||||
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||
{'port': port, 'pid': os.getpid()})
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
||||
|
@ -19,16 +19,15 @@
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def save_and_reraise_exception():
|
||||
class save_and_reraise_exception(object):
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
@ -40,12 +39,60 @@ def save_and_reraise_exception():
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
"""
|
||||
type_, value, tb = sys.exc_info()
|
||||
try:
|
||||
yield
|
||||
|
||||
In some cases the caller may not want to re-raise the exception, and
|
||||
for those circumstances this context provides a reraise flag that
|
||||
can be used to suppress the exception. For example:
|
||||
|
||||
except Exception:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(type_, value, tb))
|
||||
raise
|
||||
raise type_, value, tb
|
||||
with save_and_reraise_exception() as ctxt:
|
||||
decide_if_need_reraise()
|
||||
if not should_be_reraised:
|
||||
ctxt.reraise = False
|
||||
"""
|
||||
def __init__(self):
|
||||
self.reraise = True
|
||||
|
||||
def __enter__(self):
|
||||
self.type_, self.value, self.tb, = sys.exc_info()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if exc_type is not None:
|
||||
logging.error(_('Original exception being dropped: %s'),
|
||||
traceback.format_exception(self.type_,
|
||||
self.value,
|
||||
self.tb))
|
||||
return False
|
||||
if self.reraise:
|
||||
raise self.type_, self.value, self.tb
|
||||
|
||||
|
||||
def forever_retry_uncaught_exceptions(infunc):
|
||||
def inner_func(*args, **kwargs):
|
||||
last_log_time = 0
|
||||
last_exc_message = None
|
||||
exc_count = 0
|
||||
while True:
|
||||
try:
|
||||
return infunc(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
if exc.message == last_exc_message:
|
||||
exc_count += 1
|
||||
else:
|
||||
exc_count = 1
|
||||
# Do not log any more frequently than once a minute unless
|
||||
# the exception message changes
|
||||
cur_time = int(time.time())
|
||||
if (cur_time - last_log_time > 60 or
|
||||
exc.message != last_exc_message):
|
||||
logging.exception(
|
||||
_('Unexpected exception occurred %d time(s)... '
|
||||
'retrying.') % exc_count)
|
||||
last_log_time = cur_time
|
||||
last_exc_message = exc.message
|
||||
exc_count = 0
|
||||
# This should be a very rare event. In case it isn't, do
|
||||
# a sleep.
|
||||
time.sleep(1)
|
||||
return inner_func
|
||||
|
110
climate/openstack/common/fileutils.py
Normal file
110
climate/openstack/common/fileutils.py
Normal file
@ -0,0 +1,110 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import os
|
||||
|
||||
from climate.openstack.common import excutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_FILE_CACHE = {}
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def read_cached_file(filename, force_reload=False):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param force_reload: Whether to reload the file.
|
||||
:returns: A tuple with a boolean specifying if the data is fresh
|
||||
or not.
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if force_reload and filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
|
||||
reloaded = False
|
||||
mtime = os.path.getmtime(filename)
|
||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||
|
||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||
LOG.debug(_("Reloading cached file %s") % filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
reloaded = True
|
||||
return (reloaded, cache_info['data'])
|
||||
|
||||
|
||||
def delete_if_exists(path):
|
||||
"""Delete a file, but ignore file not found error.
|
||||
|
||||
:param path: File to delete
|
||||
"""
|
||||
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def remove_path_on_error(path):
|
||||
"""Protect code that wants to operate on PATH atomically.
|
||||
Any exception will cause PATH to be removed.
|
||||
|
||||
:param path: File to work with
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
delete_if_exists(path)
|
||||
|
||||
|
||||
def file_open(*args, **kwargs):
|
||||
"""Open file
|
||||
|
||||
see built-in file() documentation for more details
|
||||
|
||||
Note: The reason this is kept in a separate module is to easily
|
||||
be able to provide a stub module that doesn't alter system
|
||||
state at all (for unit tests)
|
||||
"""
|
||||
return file(*args, **kwargs)
|
@ -2,6 +2,7 @@
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -23,11 +24,203 @@ Usual usage in an openstack.common module:
|
||||
from climate.openstack.common.gettextutils import _
|
||||
"""
|
||||
|
||||
import copy
|
||||
import gettext
|
||||
import logging.handlers
|
||||
import os
|
||||
import UserString
|
||||
|
||||
|
||||
t = gettext.translation('openstack-common', 'locale', fallback=True)
|
||||
_localedir = os.environ.get('climate'.upper() + '_LOCALEDIR')
|
||||
_t = gettext.translation('climate', localedir=_localedir, fallback=True)
|
||||
|
||||
|
||||
def _(msg):
|
||||
return t.ugettext(msg)
|
||||
return _t.ugettext(msg)
|
||||
|
||||
|
||||
def install(domain):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
"""
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||
unicode=True)
|
||||
|
||||
|
||||
"""
|
||||
Lazy gettext functionality.
|
||||
|
||||
The following is an attempt to introduce a deferred way
|
||||
to do translations on messages in OpenStack. We attempt to
|
||||
override the standard _() function and % (format string) operation
|
||||
to build Message objects that can later be translated when we have
|
||||
more information. Also included is an example LogHandler that
|
||||
translates Messages to an associated locale, effectively allowing
|
||||
many logs, each with their own locale.
|
||||
"""
|
||||
|
||||
|
||||
def get_lazy_gettext(domain):
|
||||
"""Assemble and return a lazy gettext function for a given domain.
|
||||
|
||||
Factory method for a project/module to get a lazy gettext function
|
||||
for its own translation domain (i.e. nova, glance, cinder, etc.)
|
||||
"""
|
||||
|
||||
def _lazy_gettext(msg):
|
||||
"""Create and return a Message object.
|
||||
|
||||
Message encapsulates a string so that we can translate it later when
|
||||
needed.
|
||||
"""
|
||||
return Message(msg, domain)
|
||||
|
||||
return _lazy_gettext
|
||||
|
||||
|
||||
class Message(UserString.UserString, object):
|
||||
"""Class used to encapsulate translatable messages."""
|
||||
def __init__(self, msg, domain):
|
||||
# _msg is the gettext msgid and should never change
|
||||
self._msg = msg
|
||||
self._left_extra_msg = ''
|
||||
self._right_extra_msg = ''
|
||||
self.params = None
|
||||
self.locale = None
|
||||
self.domain = domain
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
# NOTE(mrodden): this should always resolve to a unicode string
|
||||
# that best represents the state of the message currently
|
||||
|
||||
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
|
||||
if self.locale:
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
languages=[self.locale],
|
||||
fallback=True)
|
||||
else:
|
||||
# use system locale for translations
|
||||
lang = gettext.translation(self.domain,
|
||||
localedir=localedir,
|
||||
fallback=True)
|
||||
|
||||
full_msg = (self._left_extra_msg +
|
||||
lang.ugettext(self._msg) +
|
||||
self._right_extra_msg)
|
||||
|
||||
if self.params is not None:
|
||||
full_msg = full_msg % self.params
|
||||
|
||||
return unicode(full_msg)
|
||||
|
||||
def _save_parameters(self, other):
|
||||
# we check for None later to see if
|
||||
# we actually have parameters to inject,
|
||||
# so encapsulate if our parameter is actually None
|
||||
if other is None:
|
||||
self.params = (other, )
|
||||
else:
|
||||
self.params = copy.deepcopy(other)
|
||||
|
||||
return self
|
||||
|
||||
# overrides to be more string-like
|
||||
def __unicode__(self):
|
||||
return self.data
|
||||
|
||||
def __str__(self):
|
||||
return self.data.encode('utf-8')
|
||||
|
||||
def __getstate__(self):
|
||||
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
|
||||
'domain', 'params', 'locale']
|
||||
new_dict = self.__dict__.fromkeys(to_copy)
|
||||
for attr in to_copy:
|
||||
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
|
||||
|
||||
return new_dict
|
||||
|
||||
def __setstate__(self, state):
|
||||
for (k, v) in state.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
# operator overloads
|
||||
def __add__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._right_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __radd__(self, other):
|
||||
copied = copy.deepcopy(self)
|
||||
copied._left_extra_msg += other.__str__()
|
||||
return copied
|
||||
|
||||
def __mod__(self, other):
|
||||
# do a format string to catch and raise
|
||||
# any possible KeyErrors from missing parameters
|
||||
self.data % other
|
||||
copied = copy.deepcopy(self)
|
||||
return copied._save_parameters(other)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.data * other
|
||||
|
||||
def __rmul__(self, other):
|
||||
return other * self.data
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.data[key]
|
||||
|
||||
def __getslice__(self, start, end):
|
||||
return self.data.__getslice__(start, end)
|
||||
|
||||
def __getattribute__(self, name):
|
||||
# NOTE(mrodden): handle lossy operations that we can't deal with yet
|
||||
# These override the UserString implementation, since UserString
|
||||
# uses our __class__ attribute to try and build a new message
|
||||
# after running the inner data string through the operation.
|
||||
# At that point, we have lost the gettext message id and can just
|
||||
# safely resolve to a string instead.
|
||||
ops = ['capitalize', 'center', 'decode', 'encode',
|
||||
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
|
||||
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
|
||||
if name in ops:
|
||||
return getattr(self.data, name)
|
||||
else:
|
||||
return UserString.UserString.__getattribute__(self, name)
|
||||
|
||||
|
||||
class LocaleHandler(logging.Handler):
|
||||
"""Handler that can have a locale associated to translate Messages.
|
||||
|
||||
A quick example of how to utilize the Message class above.
|
||||
LocaleHandler takes a locale and a target logging.Handler object
|
||||
to forward LogRecord objects to after translating the internal Message.
|
||||
"""
|
||||
|
||||
def __init__(self, locale, target):
|
||||
"""Initialize a LocaleHandler
|
||||
|
||||
:param locale: locale to use for translating messages
|
||||
:param target: logging.Handler object to forward
|
||||
LogRecord objects to after translation
|
||||
"""
|
||||
logging.Handler.__init__(self)
|
||||
self.locale = locale
|
||||
self.target = target
|
||||
|
||||
def emit(self, record):
|
||||
if isinstance(record.msg, Message):
|
||||
# set the locale and resolve to a string
|
||||
record.msg.locale = self.locale
|
||||
|
||||
self.target.emit(record)
|
||||
|
@ -24,7 +24,7 @@ import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class"""
|
||||
"""Returns a class from a string including module and class."""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
@ -41,8 +41,9 @@ def import_object(import_str, *args, **kwargs):
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""
|
||||
Import a class and return an instance of it, first by trying
|
||||
"""Tries to import object from default namespace.
|
||||
|
||||
Imports a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
|
@ -38,11 +38,24 @@ import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
import types
|
||||
import xmlrpclib
|
||||
|
||||
import netaddr
|
||||
import six
|
||||
|
||||
from climate.openstack.common import timeutils
|
||||
|
||||
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
@ -58,19 +71,32 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
for test in nasty:
|
||||
if test(value):
|
||||
return unicode(value)
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by inspects
|
||||
# above and results in infinite loop when list(value) is called.
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return unicode(value)
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
@ -91,17 +117,18 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [recursive(v) for v in value]
|
||||
elif isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif convert_datetime and isinstance(value, datetime.datetime):
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
@ -111,12 +138,16 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
elif isinstance(value, netaddr.IPAddress):
|
||||
return six.text_type(value)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return unicode(value)
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
|
@ -29,6 +29,7 @@ It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import cStringIO
|
||||
import inspect
|
||||
import itertools
|
||||
@ -36,19 +37,17 @@ import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import importutils
|
||||
from climate.openstack.common import jsonutils
|
||||
from climate.openstack.common import local
|
||||
from climate.openstack.common import notifier
|
||||
|
||||
|
||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
@ -73,11 +72,13 @@ logging_cli_opts = [
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=_DEFAULT_LOG_FORMAT,
|
||||
default=None,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
@ -103,17 +104,14 @@ logging_cli_opts = [
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
help='Log output to standard error')
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
|
||||
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
|
||||
'%(message)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
@ -210,7 +208,27 @@ def _get_log_file_path(binary=None):
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
|
||||
class ContextAdapter(logging.LoggerAdapter):
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
@ -218,8 +236,9 @@ class ContextAdapter(logging.LoggerAdapter):
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
@ -303,17 +322,6 @@ class JSONFormatter(logging.Formatter):
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('climate.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
notifier.api.ERROR,
|
||||
dict(error=record.msg))
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(type, value, tb):
|
||||
extra = {}
|
||||
@ -323,10 +331,30 @@ def _create_logging_excepthook(product_name):
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config):
|
||||
try:
|
||||
logging.config.fileConfig(log_config)
|
||||
except ConfigParser.Error as exc:
|
||||
raise LogConfigError(log_config, str(exc))
|
||||
|
||||
|
||||
def setup(product_name):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config:
|
||||
logging.config.fileConfig(CONF.log_config)
|
||||
_load_log_config(CONF.log_config)
|
||||
else:
|
||||
_setup_logging_from_conf()
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
@ -378,11 +406,6 @@ def _setup_logging_from_conf():
|
||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||
log_root.addHandler(filelog)
|
||||
|
||||
mode = int(CONF.logfile_mode, 8)
|
||||
st = os.stat(logpath)
|
||||
if st.st_mode != (stat.S_IFREG | mode):
|
||||
os.chmod(logpath, mode)
|
||||
|
||||
if CONF.use_stderr:
|
||||
streamlog = ColorHandler()
|
||||
log_root.addHandler(streamlog)
|
||||
@ -394,15 +417,22 @@ def _setup_logging_from_conf():
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
|
||||
handler = importutils.import_object(
|
||||
"climate.openstack.common.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
datefmt = CONF.log_date_format
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
@ -411,14 +441,11 @@ def _setup_logging_from_conf():
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
level = logging.NOTSET
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
level = logging.getLevelName(level_name)
|
||||
logger = logging.getLogger(mod)
|
||||
logger.setLevel(level)
|
||||
for handler in log_root.handlers:
|
||||
logger.addHandler(handler)
|
||||
|
||||
_loggers = {}
|
||||
|
||||
@ -431,6 +458,16 @@ def getLogger(name='unknown', version='unknown'):
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""Returns lazy logger.
|
||||
|
||||
Creates a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created.
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
@ -442,7 +479,7 @@ class WritableLogger(object):
|
||||
self.logger.log(self.level, msg)
|
||||
|
||||
|
||||
class LegacyFormatter(logging.Formatter):
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
|
@ -46,12 +46,23 @@ class LoopingCallDone(Exception):
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCall(object):
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
@ -73,11 +84,11 @@ class LoopingCall(object):
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone, e:
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in looping call'))
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
@ -88,8 +99,49 @@ class LoopingCall(object):
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
||||
|
@ -19,14 +19,12 @@
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
import urlparse
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""
|
||||
Interpret a string as a host:port pair.
|
||||
"""Interpret a string as a host:port pair.
|
||||
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
@ -66,3 +64,18 @@ def parse_host_port(address, default_port=None):
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
||||
|
||||
|
||||
def urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||
This function papers over Python issue9374 when needed.
|
||||
|
||||
The parameters are the same as urlparse.urlsplit.
|
||||
"""
|
||||
scheme, netloc, path, query, fragment = urlparse.urlsplit(
|
||||
url, scheme, allow_fragments)
|
||||
if allow_fragments and '#' in path:
|
||||
path, fragment = path.split('#', 1)
|
||||
if '?' in path:
|
||||
path, query = path.split('?', 1)
|
||||
return urlparse.SplitResult(scheme, netloc, path, query, fragment)
|
||||
|
@ -30,7 +30,6 @@ LOG = logging.getLogger(__name__)
|
||||
notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
deprecated_name='list_notifier_drivers',
|
||||
help='Driver or drivers to handle sending notifications'),
|
||||
cfg.StrOpt('default_notification_level',
|
||||
default='INFO',
|
||||
@ -57,7 +56,7 @@ class BadPriorityException(Exception):
|
||||
|
||||
|
||||
def notify_decorator(name, fn):
|
||||
""" decorator for notify which is used from utils.monkey_patch()
|
||||
"""Decorator for notify which is used from utils.monkey_patch().
|
||||
|
||||
:param name: name of the function
|
||||
:param function: - object of the function
|
||||
|
@ -24,7 +24,9 @@ CONF = cfg.CONF
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model.
|
||||
Log notifications using openstack's default logging system"""
|
||||
|
||||
Log notifications using openstack's default logging system.
|
||||
"""
|
||||
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
|
@ -15,5 +15,5 @@
|
||||
|
||||
|
||||
def notify(_context, message):
|
||||
"""Notifies the recipient of the desired event given the model"""
|
||||
"""Notifies the recipient of the desired event given the model."""
|
||||
pass
|
||||
|
@ -31,7 +31,7 @@ CONF.register_opt(notification_topic_opt)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC"""
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
|
@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC"""
|
||||
"""Sends a notification via RPC."""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
|
846
climate/openstack/common/policy.py
Normal file
846
climate/openstack/common/policy.py
Normal file
@ -0,0 +1,846 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Common Policy Engine Implementation
|
||||
|
||||
Policies can be expressed in one of two forms: A list of lists, or a
|
||||
string written in the new policy language.
|
||||
|
||||
In the list-of-lists representation, each check inside the innermost
|
||||
list is combined as with an "and" conjunction--for that check to pass,
|
||||
all the specified checks must pass. These innermost lists are then
|
||||
combined as with an "or" conjunction. This is the original way of
|
||||
expressing policies, but there now exists a new way: the policy
|
||||
language.
|
||||
|
||||
In the policy language, each check is specified the same way as in the
|
||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
||||
the correct code to perform that check. However, conjunction
|
||||
operators are available, allowing for more expressiveness in crafting
|
||||
policies.
|
||||
|
||||
As an example, take the following rule, expressed in the list-of-lists
|
||||
representation::
|
||||
|
||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
||||
|
||||
In the policy language, this becomes::
|
||||
|
||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
||||
|
||||
The policy language also has the "not" operator, allowing a richer
|
||||
policy rule::
|
||||
|
||||
project_id:%(project_id)s and not role:dunce
|
||||
|
||||
Finally, two special policy checks should be mentioned; the policy
|
||||
check "@" will always accept an access, and the policy check "!" will
|
||||
always reject an access. (Note that if a rule is either the empty
|
||||
list ("[]") or the empty string, this is equivalent to the "@" policy
|
||||
check.) Of these, the "!" policy check is probably the most useful,
|
||||
as it allows particular rules to be explicitly disabled.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import re
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from climate.openstack.common import fileutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import jsonutils
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
policy_opts = [
|
||||
cfg.StrOpt('policy_file',
|
||||
default='policy.json',
|
||||
help=_('JSON file containing policy')),
|
||||
cfg.StrOpt('policy_default_rule',
|
||||
default='default',
|
||||
help=_('Rule enforced when requested rule is not found')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(policy_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_checks = {}
|
||||
|
||||
|
||||
class PolicyNotAuthorized(Exception):
|
||||
|
||||
def __init__(self, rule):
|
||||
msg = _("Policy doesn't allow %s to be performed.") % rule
|
||||
super(PolicyNotAuthorized, self).__init__(msg)
|
||||
|
||||
|
||||
class Rules(dict):
|
||||
"""A store for rules. Handles the default_rule setting directly."""
|
||||
|
||||
@classmethod
|
||||
def load_json(cls, data, default_rule=None):
|
||||
"""Allow loading of JSON rule data."""
|
||||
|
||||
# Suck in the JSON data and parse the rules
|
||||
rules = dict((k, parse_rule(v)) for k, v in
|
||||
jsonutils.loads(data).items())
|
||||
|
||||
return cls(rules, default_rule)
|
||||
|
||||
def __init__(self, rules=None, default_rule=None):
|
||||
"""Initialize the Rules store."""
|
||||
|
||||
super(Rules, self).__init__(rules or {})
|
||||
self.default_rule = default_rule
|
||||
|
||||
def __missing__(self, key):
|
||||
"""Implements the default rule handling."""
|
||||
|
||||
# If the default rule isn't actually defined, do something
|
||||
# reasonably intelligent
|
||||
if not self.default_rule or self.default_rule not in self:
|
||||
raise KeyError(key)
|
||||
|
||||
return self[self.default_rule]
|
||||
|
||||
def __str__(self):
|
||||
"""Dumps a string representation of the rules."""
|
||||
|
||||
# Start by building the canonical strings for the rules
|
||||
out_rules = {}
|
||||
for key, value in self.items():
|
||||
# Use empty string for singleton TrueCheck instances
|
||||
if isinstance(value, TrueCheck):
|
||||
out_rules[key] = ''
|
||||
else:
|
||||
out_rules[key] = str(value)
|
||||
|
||||
# Dump a pretty-printed JSON representation
|
||||
return jsonutils.dumps(out_rules, indent=4)
|
||||
|
||||
|
||||
class Enforcer(object):
|
||||
"""Responsible for loading and enforcing rules.
|
||||
|
||||
:param policy_file: Custom policy file to use, if none is
|
||||
specified, `CONF.policy_file` will be
|
||||
used.
|
||||
:param rules: Default dictionary / Rules to use. It will be
|
||||
considered just in the first instantiation. If
|
||||
`load_rules(True)`, `clear()` or `set_rules(True)`
|
||||
is called this will be overwritten.
|
||||
:param default_rule: Default rule to use, CONF.default_rule will
|
||||
be used if none is specified.
|
||||
"""
|
||||
|
||||
def __init__(self, policy_file=None, rules=None, default_rule=None):
|
||||
self.rules = Rules(rules)
|
||||
self.default_rule = default_rule or CONF.policy_default_rule
|
||||
|
||||
self.policy_path = None
|
||||
self.policy_file = policy_file or CONF.policy_file
|
||||
|
||||
def set_rules(self, rules, overwrite=True):
|
||||
"""Create a new Rules object based on the provided dict of rules.
|
||||
|
||||
:param rules: New rules to use. It should be an instance of dict.
|
||||
:param overwrite: Whether to overwrite current rules or update them
|
||||
with the new rules.
|
||||
"""
|
||||
|
||||
if not isinstance(rules, dict):
|
||||
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
||||
"got %s instead") % type(rules))
|
||||
|
||||
if overwrite:
|
||||
self.rules = Rules(rules)
|
||||
else:
|
||||
self.update(rules)
|
||||
|
||||
def clear(self):
|
||||
"""Clears Enforcer rules, policy's cache and policy's path."""
|
||||
self.set_rules({})
|
||||
self.policy_path = None
|
||||
|
||||
def load_rules(self, force_reload=False):
|
||||
"""Loads policy_path's rules.
|
||||
|
||||
Policy file is cached and will be reloaded if modified.
|
||||
|
||||
:param force_reload: Whether to overwrite current rules.
|
||||
"""
|
||||
|
||||
if not self.policy_path:
|
||||
self.policy_path = self._get_policy_path()
|
||||
|
||||
reloaded, data = fileutils.read_cached_file(self.policy_path,
|
||||
force_reload=force_reload)
|
||||
|
||||
if reloaded:
|
||||
rules = Rules.load_json(data, self.default_rule)
|
||||
self.set_rules(rules)
|
||||
LOG.debug(_("Rules successfully reloaded"))
|
||||
|
||||
def _get_policy_path(self):
|
||||
"""Locate the policy json data file.
|
||||
|
||||
:param policy_file: Custom policy file to locate.
|
||||
|
||||
:returns: The policy path
|
||||
|
||||
:raises: ConfigFilesNotFoundError if the file couldn't
|
||||
be located.
|
||||
"""
|
||||
policy_file = CONF.find_file(self.policy_file)
|
||||
|
||||
if policy_file:
|
||||
return policy_file
|
||||
|
||||
raise cfg.ConfigFilesNotFoundError(path=CONF.policy_file)
|
||||
|
||||
def enforce(self, rule, target, creds, do_raise=False,
|
||||
exc=None, *args, **kwargs):
|
||||
"""Checks authorization of a rule against the target and credentials.
|
||||
|
||||
:param rule: A string or BaseCheck instance specifying the rule
|
||||
to evaluate.
|
||||
:param target: As much information about the object being operated
|
||||
on as possible, as a dictionary.
|
||||
:param creds: As much information about the user performing the
|
||||
action as possible, as a dictionary.
|
||||
:param do_raise: Whether to raise an exception or not if check
|
||||
fails.
|
||||
:param exc: Class of the exception to raise if the check fails.
|
||||
Any remaining arguments passed to check() (both
|
||||
positional and keyword arguments) will be passed to
|
||||
the exception class. If not specified, PolicyNotAuthorized
|
||||
will be used.
|
||||
|
||||
:return: Returns False if the policy does not allow the action and
|
||||
exc is not provided; otherwise, returns a value that
|
||||
evaluates to True. Note: for rules using the "case"
|
||||
expression, this True value will be the specified string
|
||||
from the expression.
|
||||
"""
|
||||
|
||||
# NOTE(flaper87): Not logging target or creds to avoid
|
||||
# potential security issues.
|
||||
LOG.debug(_("Rule %s will be now enforced") % rule)
|
||||
|
||||
self.load_rules()
|
||||
|
||||
# Allow the rule to be a Check tree
|
||||
if isinstance(rule, BaseCheck):
|
||||
result = rule(target, creds, self)
|
||||
elif not self.rules:
|
||||
# No rules to reference means we're going to fail closed
|
||||
result = False
|
||||
else:
|
||||
try:
|
||||
# Evaluate the rule
|
||||
result = self.rules[rule](target, creds, self)
|
||||
except KeyError:
|
||||
LOG.debug(_("Rule [%s] doesn't exist") % rule)
|
||||
# If the rule doesn't exist, fail closed
|
||||
result = False
|
||||
|
||||
# If it is False, raise the exception if requested
|
||||
if do_raise and not result:
|
||||
if exc:
|
||||
raise exc(*args, **kwargs)
|
||||
|
||||
raise PolicyNotAuthorized(rule)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class BaseCheck(object):
|
||||
"""Abstract base class for Check classes."""
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def __str__(self):
|
||||
"""String representation of the Check tree rooted at this node."""
|
||||
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def __call__(self, target, cred):
|
||||
"""Triggers if instance of the class is called.
|
||||
|
||||
Performs the check. Returns False to reject the access or a
|
||||
true value (not necessary True) to accept the access.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FalseCheck(BaseCheck):
|
||||
"""A policy check that always returns False (disallow)."""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "!"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class TrueCheck(BaseCheck):
|
||||
"""A policy check that always returns True (allow)."""
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "@"
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy."""
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Check(BaseCheck):
|
||||
"""A base class to allow for user-defined policy checks."""
|
||||
|
||||
def __init__(self, kind, match):
|
||||
"""Initiates Check instance.
|
||||
|
||||
:param kind: The kind of the check, i.e., the field before the
|
||||
':'.
|
||||
:param match: The match of the check, i.e., the field after
|
||||
the ':'.
|
||||
"""
|
||||
|
||||
self.kind = kind
|
||||
self.match = match
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "%s:%s" % (self.kind, self.match)
|
||||
|
||||
|
||||
class NotCheck(BaseCheck):
|
||||
"""Implements the "not" logical operator.
|
||||
|
||||
A policy check that inverts the result of another policy check.
|
||||
"""
|
||||
|
||||
def __init__(self, rule):
|
||||
"""Initialize the 'not' check.
|
||||
|
||||
:param rule: The rule to negate. Must be a Check.
|
||||
"""
|
||||
|
||||
self.rule = rule
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "not %s" % self.rule
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Returns the logical inverse of the wrapped check.
|
||||
"""
|
||||
|
||||
return not self.rule(target, cred)
|
||||
|
||||
|
||||
class AndCheck(BaseCheck):
|
||||
"""Implements the "and" logical operator.
|
||||
|
||||
A policy check that requires that a list of other checks all return True.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""Initialize the 'and' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Requires that all rules accept in order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if not rule(target, cred):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def add_check(self, rule):
|
||||
"""Adds rule to be tested.
|
||||
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the AndCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
class OrCheck(BaseCheck):
|
||||
"""Implements the "or" operator.
|
||||
|
||||
A policy check that requires that at least one of a list of other
|
||||
checks returns True.
|
||||
"""
|
||||
|
||||
def __init__(self, rules):
|
||||
"""Initialize the 'or' check.
|
||||
|
||||
:param rules: A list of rules that will be tested.
|
||||
"""
|
||||
|
||||
self.rules = rules
|
||||
|
||||
def __str__(self):
|
||||
"""Return a string representation of this check."""
|
||||
|
||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
||||
|
||||
def __call__(self, target, cred):
|
||||
"""Check the policy.
|
||||
|
||||
Requires that at least one rule accept in order to return True.
|
||||
"""
|
||||
|
||||
for rule in self.rules:
|
||||
if rule(target, cred):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def add_check(self, rule):
|
||||
"""Adds rule to be tested.
|
||||
|
||||
Allows addition of another rule to the list of rules that will
|
||||
be tested. Returns the OrCheck object for convenience.
|
||||
"""
|
||||
|
||||
self.rules.append(rule)
|
||||
return self
|
||||
|
||||
|
||||
def _parse_check(rule):
|
||||
"""Parse a single base check rule into an appropriate Check object."""
|
||||
|
||||
# Handle the special checks
|
||||
if rule == '!':
|
||||
return FalseCheck()
|
||||
elif rule == '@':
|
||||
return TrueCheck()
|
||||
|
||||
try:
|
||||
kind, match = rule.split(':', 1)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to understand rule %s") % rule)
|
||||
# If the rule is invalid, we'll fail closed
|
||||
return FalseCheck()
|
||||
|
||||
# Find what implements the check
|
||||
if kind in _checks:
|
||||
return _checks[kind](kind, match)
|
||||
elif None in _checks:
|
||||
return _checks[None](kind, match)
|
||||
else:
|
||||
LOG.error(_("No handler for matches of kind %s") % kind)
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def _parse_list_rule(rule):
|
||||
"""Translates the old list-of-lists syntax into a tree of Check objects.
|
||||
|
||||
Provided for backwards compatibility.
|
||||
"""
|
||||
|
||||
# Empty rule defaults to True
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Outer list is joined by "or"; inner list by "and"
|
||||
or_list = []
|
||||
for inner_rule in rule:
|
||||
# Elide empty inner lists
|
||||
if not inner_rule:
|
||||
continue
|
||||
|
||||
# Handle bare strings
|
||||
if isinstance(inner_rule, basestring):
|
||||
inner_rule = [inner_rule]
|
||||
|
||||
# Parse the inner rules into Check objects
|
||||
and_list = [_parse_check(r) for r in inner_rule]
|
||||
|
||||
# Append the appropriate check to the or_list
|
||||
if len(and_list) == 1:
|
||||
or_list.append(and_list[0])
|
||||
else:
|
||||
or_list.append(AndCheck(and_list))
|
||||
|
||||
# If we have only one check, omit the "or"
|
||||
if not or_list:
|
||||
return FalseCheck()
|
||||
elif len(or_list) == 1:
|
||||
return or_list[0]
|
||||
|
||||
return OrCheck(or_list)
|
||||
|
||||
|
||||
# Used for tokenizing the policy language
|
||||
_tokenize_re = re.compile(r'\s+')
|
||||
|
||||
|
||||
def _parse_tokenize(rule):
|
||||
"""Tokenizer for the policy language.
|
||||
|
||||
Most of the single-character tokens are specified in the
|
||||
_tokenize_re; however, parentheses need to be handled specially,
|
||||
because they can appear inside a check string. Thankfully, those
|
||||
parentheses that appear inside a check string can never occur at
|
||||
the very beginning or end ("%(variable)s" is the correct syntax).
|
||||
"""
|
||||
|
||||
for tok in _tokenize_re.split(rule):
|
||||
# Skip empty tokens
|
||||
if not tok or tok.isspace():
|
||||
continue
|
||||
|
||||
# Handle leading parens on the token
|
||||
clean = tok.lstrip('(')
|
||||
for i in range(len(tok) - len(clean)):
|
||||
yield '(', '('
|
||||
|
||||
# If it was only parentheses, continue
|
||||
if not clean:
|
||||
continue
|
||||
else:
|
||||
tok = clean
|
||||
|
||||
# Handle trailing parens on the token
|
||||
clean = tok.rstrip(')')
|
||||
trail = len(tok) - len(clean)
|
||||
|
||||
# Yield the cleaned token
|
||||
lowered = clean.lower()
|
||||
if lowered in ('and', 'or', 'not'):
|
||||
# Special tokens
|
||||
yield lowered, clean
|
||||
elif clean:
|
||||
# Not a special token, but not composed solely of ')'
|
||||
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
||||
[('"', '"'), ("'", "'")]):
|
||||
# It's a quoted string
|
||||
yield 'string', tok[1:-1]
|
||||
else:
|
||||
yield 'check', _parse_check(clean)
|
||||
|
||||
# Yield the trailing parens
|
||||
for i in range(trail):
|
||||
yield ')', ')'
|
||||
|
||||
|
||||
class ParseStateMeta(type):
|
||||
"""Metaclass for the ParseState class.
|
||||
|
||||
Facilitates identifying reduction methods.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, cls_dict):
|
||||
"""Create the class.
|
||||
|
||||
Injects the 'reducers' list, a list of tuples matching token sequences
|
||||
to the names of the corresponding reduction methods.
|
||||
"""
|
||||
|
||||
reducers = []
|
||||
|
||||
for key, value in cls_dict.items():
|
||||
if not hasattr(value, 'reducers'):
|
||||
continue
|
||||
for reduction in value.reducers:
|
||||
reducers.append((reduction, key))
|
||||
|
||||
cls_dict['reducers'] = reducers
|
||||
|
||||
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
||||
|
||||
|
||||
def reducer(*tokens):
|
||||
"""Decorator for reduction methods.
|
||||
|
||||
Arguments are a sequence of tokens, in order, which should trigger running
|
||||
this reduction method.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
# Make sure we have a list of reducer sequences
|
||||
if not hasattr(func, 'reducers'):
|
||||
func.reducers = []
|
||||
|
||||
# Add the tokens to the list of reducer sequences
|
||||
func.reducers.append(list(tokens))
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ParseState(object):
|
||||
"""Implement the core of parsing the policy language.
|
||||
|
||||
Uses a greedy reduction algorithm to reduce a sequence of tokens into
|
||||
a single terminal, the value of which will be the root of the Check tree.
|
||||
|
||||
Note: error reporting is rather lacking. The best we can get with
|
||||
this parser formulation is an overall "parse failed" error.
|
||||
Fortunately, the policy language is simple enough that this
|
||||
shouldn't be that big a problem.
|
||||
"""
|
||||
|
||||
__metaclass__ = ParseStateMeta
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the ParseState."""
|
||||
|
||||
self.tokens = []
|
||||
self.values = []
|
||||
|
||||
def reduce(self):
|
||||
"""Perform a greedy reduction of the token stream.
|
||||
|
||||
If a reducer method matches, it will be executed, then the
|
||||
reduce() method will be called recursively to search for any more
|
||||
possible reductions.
|
||||
"""
|
||||
|
||||
for reduction, methname in self.reducers:
|
||||
if (len(self.tokens) >= len(reduction) and
|
||||
self.tokens[-len(reduction):] == reduction):
|
||||
# Get the reduction method
|
||||
meth = getattr(self, methname)
|
||||
|
||||
# Reduce the token stream
|
||||
results = meth(*self.values[-len(reduction):])
|
||||
|
||||
# Update the tokens and values
|
||||
self.tokens[-len(reduction):] = [r[0] for r in results]
|
||||
self.values[-len(reduction):] = [r[1] for r in results]
|
||||
|
||||
# Check for any more reductions
|
||||
return self.reduce()
|
||||
|
||||
def shift(self, tok, value):
|
||||
"""Adds one more token to the state. Calls reduce()."""
|
||||
|
||||
self.tokens.append(tok)
|
||||
self.values.append(value)
|
||||
|
||||
# Do a greedy reduce...
|
||||
self.reduce()
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
"""Obtain the final result of the parse.
|
||||
|
||||
Raises ValueError if the parse failed to reduce to a single result.
|
||||
"""
|
||||
|
||||
if len(self.values) != 1:
|
||||
raise ValueError("Could not parse rule")
|
||||
return self.values[0]
|
||||
|
||||
@reducer('(', 'check', ')')
|
||||
@reducer('(', 'and_expr', ')')
|
||||
@reducer('(', 'or_expr', ')')
|
||||
def _wrap_check(self, _p1, check, _p2):
|
||||
"""Turn parenthesized expressions into a 'check' token."""
|
||||
|
||||
return [('check', check)]
|
||||
|
||||
@reducer('check', 'and', 'check')
|
||||
def _make_and_expr(self, check1, _and, check2):
|
||||
"""Create an 'and_expr'.
|
||||
|
||||
Join two checks by the 'and' operator.
|
||||
"""
|
||||
|
||||
return [('and_expr', AndCheck([check1, check2]))]
|
||||
|
||||
@reducer('and_expr', 'and', 'check')
|
||||
def _extend_and_expr(self, and_expr, _and, check):
|
||||
"""Extend an 'and_expr' by adding one more check."""
|
||||
|
||||
return [('and_expr', and_expr.add_check(check))]
|
||||
|
||||
@reducer('check', 'or', 'check')
|
||||
def _make_or_expr(self, check1, _or, check2):
|
||||
"""Create an 'or_expr'.
|
||||
|
||||
Join two checks by the 'or' operator.
|
||||
"""
|
||||
|
||||
return [('or_expr', OrCheck([check1, check2]))]
|
||||
|
||||
@reducer('or_expr', 'or', 'check')
|
||||
def _extend_or_expr(self, or_expr, _or, check):
|
||||
"""Extend an 'or_expr' by adding one more check."""
|
||||
|
||||
return [('or_expr', or_expr.add_check(check))]
|
||||
|
||||
@reducer('not', 'check')
|
||||
def _make_not_expr(self, _not, check):
|
||||
"""Invert the result of another check."""
|
||||
|
||||
return [('check', NotCheck(check))]
|
||||
|
||||
|
||||
def _parse_text_rule(rule):
|
||||
"""Parses policy to the tree.
|
||||
|
||||
Translates a policy written in the policy language into a tree of
|
||||
Check objects.
|
||||
"""
|
||||
|
||||
# Empty rule means always accept
|
||||
if not rule:
|
||||
return TrueCheck()
|
||||
|
||||
# Parse the token stream
|
||||
state = ParseState()
|
||||
for tok, value in _parse_tokenize(rule):
|
||||
state.shift(tok, value)
|
||||
|
||||
try:
|
||||
return state.result
|
||||
except ValueError:
|
||||
# Couldn't parse the rule
|
||||
LOG.exception(_("Failed to understand rule %(rule)r") % locals())
|
||||
|
||||
# Fail closed
|
||||
return FalseCheck()
|
||||
|
||||
|
||||
def parse_rule(rule):
|
||||
"""Parses a policy rule into a tree of Check objects."""
|
||||
|
||||
# If the rule is a string, it's in the policy language
|
||||
if isinstance(rule, basestring):
|
||||
return _parse_text_rule(rule)
|
||||
return _parse_list_rule(rule)
|
||||
|
||||
|
||||
def register(name, func=None):
|
||||
"""Register a function or Check class as a policy check.
|
||||
|
||||
:param name: Gives the name of the check type, e.g., 'rule',
|
||||
'role', etc. If name is None, a default check type
|
||||
will be registered.
|
||||
:param func: If given, provides the function or class to register.
|
||||
If not given, returns a function taking one argument
|
||||
to specify the function or class to register,
|
||||
allowing use as a decorator.
|
||||
"""
|
||||
|
||||
# Perform the actual decoration by registering the function or
|
||||
# class. Returns the function or class for compliance with the
|
||||
# decorator interface.
|
||||
def decorator(func):
|
||||
_checks[name] = func
|
||||
return func
|
||||
|
||||
# If the function or class is given, do the registration
|
||||
if func:
|
||||
return decorator(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@register("rule")
|
||||
class RuleCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Recursively checks credentials based on the defined rules."""
|
||||
|
||||
try:
|
||||
return enforcer.rules[self.match](target, creds, enforcer)
|
||||
except KeyError:
|
||||
# We don't have any matching rule; fail closed
|
||||
return False
|
||||
|
||||
|
||||
@register("role")
|
||||
class RoleCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check that there is a matching role in the cred dict."""
|
||||
|
||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
||||
|
||||
|
||||
@register('http')
|
||||
class HttpCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check http: rules by calling to a remote server.
|
||||
|
||||
This example implementation simply verifies that the response
|
||||
is exactly 'True'.
|
||||
"""
|
||||
|
||||
url = ('http:' + self.match) % target
|
||||
data = {'target': jsonutils.dumps(target),
|
||||
'credentials': jsonutils.dumps(creds)}
|
||||
post_data = urllib.urlencode(data)
|
||||
f = urllib2.urlopen(url, post_data)
|
||||
return f.read() == "True"
|
||||
|
||||
|
||||
@register(None)
|
||||
class GenericCheck(Check):
|
||||
def __call__(self, target, creds, enforcer):
|
||||
"""Check an individual match.
|
||||
|
||||
Matches look like:
|
||||
|
||||
tenant:%(tenant_id)s
|
||||
role:compute:admin
|
||||
"""
|
||||
|
||||
# TODO(termie): do dict inspection via dot syntax
|
||||
match = self.match % target
|
||||
if self.kind in creds:
|
||||
return match == six.text_type(creds[self.kind])
|
||||
return False
|
@ -26,13 +26,13 @@ For some wrappers that add message versioning to rpc, see:
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import importutils
|
||||
from climate.openstack.common import local
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -34,10 +34,6 @@ from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from eventlet import queue
|
||||
from eventlet import semaphore
|
||||
# TODO(pekowsk): Remove import cfg and below comment in Havana.
|
||||
# This import should no longer be needed when the amqp_rpc_single_reply_queue
|
||||
# option is removed.
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common import excutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
@ -46,16 +42,6 @@ from climate.openstack.common import log as logging
|
||||
from climate.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
# TODO(pekowski): Remove this option in Havana.
|
||||
amqp_opts = [
|
||||
cfg.BoolOpt('amqp_rpc_single_reply_queue',
|
||||
default=False,
|
||||
help='Enable a fast single reply queue if using AMQP based '
|
||||
'RPC like RabbitMQ or Qpid.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(amqp_opts)
|
||||
|
||||
UNIQUE_ID = '_unique_id'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -83,7 +69,7 @@ class Pool(pools.Pool):
|
||||
# is the above "while loop" gets all the cached connections from the
|
||||
# pool and closes them, but never returns them to the pool, a pool
|
||||
# leak. The unit tests hang waiting for an item to be returned to the
|
||||
# pool. The unit tests get here via the teatDown() method. In the run
|
||||
# pool. The unit tests get here via the tearDown() method. In the run
|
||||
# time code, it gets here via cleanup() and only appears in service.py
|
||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
||||
# the leakage is not a problem.
|
||||
@ -102,19 +88,19 @@ def get_connection_pool(conf, connection_cls):
|
||||
|
||||
|
||||
class ConnectionContext(rpc_common.Connection):
|
||||
"""The class that is actually returned to the caller of
|
||||
create_connection(). This is essentially a wrapper around
|
||||
Connection that supports 'with'. It can also return a new
|
||||
Connection, or one from a pool. The function will also catch
|
||||
when an instance of this class is to be deleted. With that
|
||||
we can return Connections to the pool on exceptions and so
|
||||
forth without making the caller be responsible for catching
|
||||
them. If possible the function makes sure to return a
|
||||
connection to the pool.
|
||||
"""The class that is actually returned to the create_connection() caller.
|
||||
|
||||
This is essentially a wrapper around Connection that supports 'with'.
|
||||
It can also return a new Connection, or one from a pool.
|
||||
|
||||
The function will also catch when an instance of this class is to be
|
||||
deleted. With that we can return Connections to the pool on exceptions
|
||||
and so forth without making the caller be responsible for catching them.
|
||||
If possible the function makes sure to return a connection to the pool.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||
"""Create a new connection, or get one from the pool"""
|
||||
"""Create a new connection, or get one from the pool."""
|
||||
self.connection = None
|
||||
self.conf = conf
|
||||
self.connection_pool = connection_pool
|
||||
@ -127,7 +113,7 @@ class ConnectionContext(rpc_common.Connection):
|
||||
self.pooled = pooled
|
||||
|
||||
def __enter__(self):
|
||||
"""When with ConnectionContext() is used, return self"""
|
||||
"""When with ConnectionContext() is used, return self."""
|
||||
return self
|
||||
|
||||
def _done(self):
|
||||
@ -165,17 +151,19 @@ class ConnectionContext(rpc_common.Connection):
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
self.connection.create_worker(topic, proxy, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
|
||||
ack_on_error=True):
|
||||
self.connection.join_consumer_pool(callback,
|
||||
pool_name,
|
||||
topic,
|
||||
exchange_name)
|
||||
exchange_name,
|
||||
ack_on_error)
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.connection.consume_in_thread()
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""Proxy all other calls to the Connection instance"""
|
||||
"""Proxy all other calls to the Connection instance."""
|
||||
if self.connection:
|
||||
return getattr(self.connection, key)
|
||||
else:
|
||||
@ -183,7 +171,7 @@ class ConnectionContext(rpc_common.Connection):
|
||||
|
||||
|
||||
class ReplyProxy(ConnectionContext):
|
||||
""" Connection class for RPC replies / callbacks """
|
||||
"""Connection class for RPC replies / callbacks."""
|
||||
def __init__(self, conf, connection_pool):
|
||||
self._call_waiters = {}
|
||||
self._num_call_waiters = 0
|
||||
@ -197,8 +185,10 @@ class ReplyProxy(ConnectionContext):
|
||||
msg_id = message_data.pop('_msg_id', None)
|
||||
waiter = self._call_waiters.get(msg_id)
|
||||
if not waiter:
|
||||
LOG.warn(_('no calling threads waiting for msg_id : %s'
|
||||
', message : %s') % (msg_id, message_data))
|
||||
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
||||
', message : %(data)s'), {'msg_id': msg_id,
|
||||
'data': message_data})
|
||||
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
|
||||
else:
|
||||
waiter.put(message_data)
|
||||
|
||||
@ -231,12 +221,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
||||
failure = rpc_common.serialize_remote_exception(failure,
|
||||
log_failure)
|
||||
|
||||
try:
|
||||
msg = {'result': reply, 'failure': failure}
|
||||
except TypeError:
|
||||
msg = {'result': dict((k, repr(v))
|
||||
for k, v in reply.__dict__.iteritems()),
|
||||
'failure': failure}
|
||||
msg = {'result': reply, 'failure': failure}
|
||||
if ending:
|
||||
msg['ending'] = True
|
||||
_add_unique_id(msg)
|
||||
@ -251,7 +236,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call"""
|
||||
"""Context that supports replying to a rpc.call."""
|
||||
def __init__(self, **kwargs):
|
||||
self.msg_id = kwargs.pop('msg_id', None)
|
||||
self.reply_q = kwargs.pop('reply_q', None)
|
||||
@ -338,8 +323,9 @@ def _add_unique_id(msg):
|
||||
|
||||
|
||||
class _ThreadPoolWithWait(object):
|
||||
"""Base class for a delayed invocation manager used by
|
||||
the Connection class to start up green threads
|
||||
"""Base class for a delayed invocation manager.
|
||||
|
||||
Used by the Connection class to start up green threads
|
||||
to handle incoming messages.
|
||||
"""
|
||||
|
||||
@ -354,12 +340,14 @@ class _ThreadPoolWithWait(object):
|
||||
|
||||
|
||||
class CallbackWrapper(_ThreadPoolWithWait):
|
||||
"""Wraps a straight callback to allow it to be invoked in a green
|
||||
thread.
|
||||
"""Wraps a straight callback.
|
||||
|
||||
Allows it to be invoked in a green thread.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, callback, connection_pool):
|
||||
"""
|
||||
"""Initiates CallbackWrapper object.
|
||||
|
||||
:param conf: cfg.CONF instance
|
||||
:param callback: a callable (probably a function)
|
||||
:param connection_pool: connection pool as returned by
|
||||
@ -408,15 +396,17 @@ class ProxyCallback(_ThreadPoolWithWait):
|
||||
ctxt = unpack_context(self.conf, message_data)
|
||||
method = message_data.get('method')
|
||||
args = message_data.get('args', {})
|
||||
version = message_data.get('version', None)
|
||||
version = message_data.get('version')
|
||||
namespace = message_data.get('namespace')
|
||||
if not method:
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
ctxt.reply(_('No method for message: %s') % message_data,
|
||||
connection_pool=self.connection_pool)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
||||
namespace, args)
|
||||
|
||||
def _process_data(self, ctxt, version, method, args):
|
||||
def _process_data(self, ctxt, version, method, namespace, args):
|
||||
"""Process a message in a new thread.
|
||||
|
||||
If the proxy object we have has a dispatch method
|
||||
@ -427,7 +417,8 @@ class ProxyCallback(_ThreadPoolWithWait):
|
||||
"""
|
||||
ctxt.update_store()
|
||||
try:
|
||||
rval = self.proxy.dispatch(ctxt, version, method, **args)
|
||||
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
||||
**args)
|
||||
# Check if the result was a generator
|
||||
if inspect.isgenerator(rval):
|
||||
for x in rval:
|
||||
@ -487,7 +478,7 @@ class MulticallProxyWaiter(object):
|
||||
return result
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a reply with an 'ending" flag"""
|
||||
"""Return a result until we get a reply with an 'ending' flag."""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
@ -495,7 +486,6 @@ class MulticallProxyWaiter(object):
|
||||
data = self._dataqueue.get(timeout=self._timeout)
|
||||
result = self._process_data(data)
|
||||
except queue.Empty:
|
||||
LOG.exception(_('Timed out waiting for RPC response.'))
|
||||
self.done()
|
||||
raise rpc_common.Timeout()
|
||||
except Exception:
|
||||
@ -510,61 +500,8 @@ class MulticallProxyWaiter(object):
|
||||
yield result
|
||||
|
||||
|
||||
#TODO(pekowski): Remove MulticallWaiter() in Havana.
|
||||
class MulticallWaiter(object):
|
||||
def __init__(self, conf, connection, timeout):
|
||||
self._connection = connection
|
||||
self._iterator = connection.iterconsume(timeout=timeout or
|
||||
conf.rpc_response_timeout)
|
||||
self._result = None
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
return
|
||||
self._done = True
|
||||
self._iterator.close()
|
||||
self._iterator = None
|
||||
self._connection.close()
|
||||
|
||||
def __call__(self, data):
|
||||
"""The consume() callback will call this. Store the result."""
|
||||
self.msg_id_cache.check_duplicate_message(data)
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
failure)
|
||||
|
||||
elif data.get('ending', False):
|
||||
self._got_ending = True
|
||||
else:
|
||||
self._result = data['result']
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a 'None' response from consumer"""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
try:
|
||||
self._iterator.next()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.done()
|
||||
if self._got_ending:
|
||||
self.done()
|
||||
raise StopIteration
|
||||
result = self._result
|
||||
if isinstance(result, Exception):
|
||||
self.done()
|
||||
raise result
|
||||
yield result
|
||||
|
||||
|
||||
def create_connection(conf, new, connection_pool):
|
||||
"""Create a connection"""
|
||||
"""Create a connection."""
|
||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||
|
||||
|
||||
@ -573,14 +510,6 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Make a call that returns multiple times."""
|
||||
# TODO(pekowski): Remove all these comments in Havana.
|
||||
# For amqp_rpc_single_reply_queue = False,
|
||||
# Can't use 'with' for multicall, as it returns an iterator
|
||||
# that will continue to use the connection. When it's done,
|
||||
# connection.close() will get called which will put it back into
|
||||
# the pool
|
||||
# For amqp_rpc_single_reply_queue = True,
|
||||
# The 'with' statement is mandatory for closing the connection
|
||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
@ -588,21 +517,13 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
|
||||
# TODO(pekowski): Remove this flag and the code under the if clause
|
||||
# in Havana.
|
||||
if not conf.amqp_rpc_single_reply_queue:
|
||||
conn = ConnectionContext(conf, connection_pool)
|
||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
||||
with _reply_proxy_create_sem:
|
||||
if not connection_pool.reply_proxy:
|
||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||
else:
|
||||
with _reply_proxy_create_sem:
|
||||
if not connection_pool.reply_proxy:
|
||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||
return wait_msg
|
||||
|
||||
|
||||
@ -662,7 +583,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if envelope:
|
||||
msg = rpc_common.serialize_msg(msg, force_envelope=True)
|
||||
msg = rpc_common.serialize_msg(msg)
|
||||
conn.notify_send(topic, msg)
|
||||
|
||||
|
||||
|
@ -22,6 +22,7 @@ import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import importutils
|
||||
@ -69,9 +70,7 @@ _RPC_ENVELOPE_VERSION = '2.0'
|
||||
_VERSION_KEY = 'oslo.version'
|
||||
_MESSAGE_KEY = 'oslo.message'
|
||||
|
||||
|
||||
# TODO(russellb) Turn this on after Grizzly.
|
||||
_SEND_RPC_ENVELOPE = False
|
||||
_REMOTE_POSTFIX = '_Remote'
|
||||
|
||||
|
||||
class RPCException(Exception):
|
||||
@ -122,7 +121,26 @@ class Timeout(RPCException):
|
||||
This exception is raised if the rpc_response_timeout is reached while
|
||||
waiting for a response from the remote side.
|
||||
"""
|
||||
message = _("Timeout while waiting on RPC response.")
|
||||
message = _('Timeout while waiting on RPC response - '
|
||||
'topic: "%(topic)s", RPC method: "%(method)s" '
|
||||
'info: "%(info)s"')
|
||||
|
||||
def __init__(self, info=None, topic=None, method=None):
|
||||
"""Initiates Timeout object.
|
||||
|
||||
:param info: Extra info to convey to the user
|
||||
:param topic: The topic that the rpc call was sent to
|
||||
:param rpc_method_name: The name of the rpc method being
|
||||
called
|
||||
"""
|
||||
self.info = info
|
||||
self.topic = topic
|
||||
self.method = method
|
||||
super(Timeout, self).__init__(
|
||||
None,
|
||||
info=info or _('<unknown>'),
|
||||
topic=topic or _('<unknown>'),
|
||||
method=method or _('<unknown>'))
|
||||
|
||||
|
||||
class DuplicateMessageError(RPCException):
|
||||
@ -143,6 +161,10 @@ class UnsupportedRpcEnvelopeVersion(RPCException):
|
||||
"not supported by this endpoint.")
|
||||
|
||||
|
||||
class RpcVersionCapError(RPCException):
|
||||
message = _("Specified RPC version cap, %(version_cap)s, is too low")
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A connection, returned by rpc.create_connection().
|
||||
|
||||
@ -202,9 +224,9 @@ class Connection(object):
|
||||
raise NotImplementedError()
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
"""Register as a member of a group of consumers.
|
||||
|
||||
Uses given topic from the specified exchange.
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
@ -262,7 +284,7 @@ def _safe_log(log_func, msg, msg_data):
|
||||
for elem in arg[:-1]:
|
||||
d = d[elem]
|
||||
d[arg[-1]] = '<SANITIZED>'
|
||||
except KeyError, e:
|
||||
except KeyError as e:
|
||||
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
|
||||
{'item': arg,
|
||||
'err': e})
|
||||
@ -285,17 +307,27 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
||||
tb = traceback.format_exception(*failure_info)
|
||||
failure = failure_info[1]
|
||||
if log_failure:
|
||||
LOG.error(_("Returning exception %s to caller"), unicode(failure))
|
||||
LOG.error(_("Returning exception %s to caller"),
|
||||
six.text_type(failure))
|
||||
LOG.error(tb)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(failure, 'kwargs'):
|
||||
kwargs = failure.kwargs
|
||||
|
||||
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
||||
# exceptions. Lets turn it back into the original exception type.
|
||||
cls_name = str(failure.__class__.__name__)
|
||||
mod_name = str(failure.__class__.__module__)
|
||||
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
||||
mod_name.endswith(_REMOTE_POSTFIX)):
|
||||
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
||||
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
||||
|
||||
data = {
|
||||
'class': str(failure.__class__.__name__),
|
||||
'module': str(failure.__class__.__module__),
|
||||
'message': unicode(failure),
|
||||
'class': cls_name,
|
||||
'module': mod_name,
|
||||
'message': six.text_type(failure),
|
||||
'tb': tb,
|
||||
'args': failure.args,
|
||||
'kwargs': kwargs
|
||||
@ -325,14 +357,15 @@ def deserialize_remote_exception(conf, data):
|
||||
if not issubclass(klass, Exception):
|
||||
raise TypeError("Can only deserialize Exceptions")
|
||||
|
||||
failure = klass(**failure.get('kwargs', {}))
|
||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
ex_type = type(failure)
|
||||
str_override = lambda self: message
|
||||
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
|
||||
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
||||
{'__str__': str_override, '__unicode__': str_override})
|
||||
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
||||
try:
|
||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||
# as the new type for the exception. This only works on user defined
|
||||
@ -394,10 +427,11 @@ class CommonRpcContext(object):
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
"""This encapsulates some actual exception that is expected to be
|
||||
hit by an RPC proxy object. Merely instantiating it records the
|
||||
current exception information, which will be passed back to the
|
||||
RPC client without exceptional logging."""
|
||||
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
||||
|
||||
Merely instantiating it records the current exception information, which
|
||||
will be passed back to the RPC client without exceptional logging.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._exc_info = sys.exc_info()
|
||||
|
||||
@ -405,7 +439,7 @@ class ClientException(Exception):
|
||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
if type(e) in exceptions:
|
||||
raise ClientException()
|
||||
else:
|
||||
@ -414,11 +448,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||
|
||||
def client_exceptions(*exceptions):
|
||||
"""Decorator for manager methods that raise expected exceptions.
|
||||
|
||||
Marking a Manager method with this decorator allows the declaration
|
||||
of expected exceptions that the RPC layer should not consider fatal,
|
||||
and not log as if they were generated in a real error scenario. Note
|
||||
that this will cause listed exceptions to be wrapped in a
|
||||
ClientException, which is used internally by the RPC layer."""
|
||||
ClientException, which is used internally by the RPC layer.
|
||||
"""
|
||||
def outer(func):
|
||||
def inner(*args, **kwargs):
|
||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||
@ -441,10 +477,7 @@ def version_is_compatible(imp_version, version):
|
||||
return True
|
||||
|
||||
|
||||
def serialize_msg(raw_msg, force_envelope=False):
|
||||
if not _SEND_RPC_ENVELOPE and not force_envelope:
|
||||
return raw_msg
|
||||
|
||||
def serialize_msg(raw_msg):
|
||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
||||
# information about this format.
|
||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
||||
|
@ -84,6 +84,7 @@ minimum version that supports the new parameter should be specified.
|
||||
"""
|
||||
|
||||
from climate.openstack.common.rpc import common as rpc_common
|
||||
from climate.openstack.common.rpc import serializer as rpc_serializer
|
||||
|
||||
|
||||
class RpcDispatcher(object):
|
||||
@ -93,23 +94,48 @@ class RpcDispatcher(object):
|
||||
contains a list of underlying managers that have an API_VERSION attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, callbacks):
|
||||
def __init__(self, callbacks, serializer=None):
|
||||
"""Initialize the rpc dispatcher.
|
||||
|
||||
:param callbacks: List of proxy objects that are an instance
|
||||
of a class with rpc methods exposed. Each proxy
|
||||
object should have an RPC_API_VERSION attribute.
|
||||
:param serializer: The Serializer object that will be used to
|
||||
deserialize arguments before the method call and
|
||||
to serialize the result after it returns.
|
||||
"""
|
||||
self.callbacks = callbacks
|
||||
if serializer is None:
|
||||
serializer = rpc_serializer.NoOpSerializer()
|
||||
self.serializer = serializer
|
||||
super(RpcDispatcher, self).__init__()
|
||||
|
||||
def dispatch(self, ctxt, version, method, **kwargs):
|
||||
def _deserialize_args(self, context, kwargs):
|
||||
"""Helper method called to deserialize args before dispatch.
|
||||
|
||||
This calls our serializer on each argument, returning a new set of
|
||||
args that have been deserialized.
|
||||
|
||||
:param context: The request context
|
||||
:param kwargs: The arguments to be deserialized
|
||||
:returns: A new set of deserialized args
|
||||
"""
|
||||
new_kwargs = dict()
|
||||
for argname, arg in kwargs.iteritems():
|
||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
||||
arg)
|
||||
return new_kwargs
|
||||
|
||||
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
||||
"""Dispatch a message based on a requested version.
|
||||
|
||||
:param ctxt: The request context
|
||||
:param version: The requested API version from the incoming message
|
||||
:param method: The method requested to be called by the incoming
|
||||
message.
|
||||
:param namespace: The namespace for the requested method. If None,
|
||||
the dispatcher will look for a method on a callback
|
||||
object with no namespace set.
|
||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||
|
||||
:returns: Whatever is returned by the underlying method that gets
|
||||
@ -120,17 +146,31 @@ class RpcDispatcher(object):
|
||||
|
||||
had_compatible = False
|
||||
for proxyobj in self.callbacks:
|
||||
if hasattr(proxyobj, 'RPC_API_VERSION'):
|
||||
# Check for namespace compatibility
|
||||
try:
|
||||
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
||||
except AttributeError:
|
||||
cb_namespace = None
|
||||
|
||||
if namespace != cb_namespace:
|
||||
continue
|
||||
|
||||
# Check for version compatibility
|
||||
try:
|
||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||
else:
|
||||
except AttributeError:
|
||||
rpc_api_version = '1.0'
|
||||
|
||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
||||
version)
|
||||
had_compatible = had_compatible or is_compatible
|
||||
|
||||
if not hasattr(proxyobj, method):
|
||||
continue
|
||||
if is_compatible:
|
||||
return getattr(proxyobj, method)(ctxt, **kwargs)
|
||||
kwargs = self._deserialize_args(ctxt, kwargs)
|
||||
result = getattr(proxyobj, method)(ctxt, **kwargs)
|
||||
return self.serializer.serialize_entity(ctxt, result)
|
||||
|
||||
if had_compatible:
|
||||
raise AttributeError("No such RPC function '%s'" % method)
|
||||
|
@ -57,13 +57,14 @@ class Consumer(object):
|
||||
self.topic = topic
|
||||
self.proxy = proxy
|
||||
|
||||
def call(self, context, version, method, args, timeout):
|
||||
def call(self, context, version, method, namespace, args, timeout):
|
||||
done = eventlet.event.Event()
|
||||
|
||||
def _inner():
|
||||
ctxt = RpcContext.from_dict(context.to_dict())
|
||||
try:
|
||||
rval = self.proxy.dispatch(context, version, method, **args)
|
||||
rval = self.proxy.dispatch(context, version, method,
|
||||
namespace, **args)
|
||||
res = []
|
||||
# Caller might have called ctxt.reply() manually
|
||||
for (reply, failure) in ctxt._response:
|
||||
@ -121,7 +122,7 @@ class Connection(object):
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
"""Create a connection."""
|
||||
return Connection()
|
||||
|
||||
|
||||
@ -140,13 +141,15 @@ def multicall(conf, context, topic, msg, timeout=None):
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
try:
|
||||
consumer = CONSUMERS[topic][0]
|
||||
except (KeyError, IndexError):
|
||||
return iter([None])
|
||||
else:
|
||||
return consumer.call(context, version, method, args, timeout)
|
||||
return consumer.call(context, version, method, namespace, args,
|
||||
timeout)
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
@ -176,16 +179,17 @@ def cleanup():
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg):
|
||||
"""Cast to all consumers of a topic"""
|
||||
"""Cast to all consumers of a topic."""
|
||||
check_serialize(msg)
|
||||
method = msg.get('method')
|
||||
if not method:
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
for consumer in CONSUMERS.get(topic, []):
|
||||
try:
|
||||
consumer.call(context, version, method, args, None)
|
||||
consumer.call(context, version, method, namespace, args, None)
|
||||
except Exception:
|
||||
pass
|
||||
|
@ -18,7 +18,6 @@ import functools
|
||||
import itertools
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
@ -30,6 +29,7 @@ import kombu.entity
|
||||
import kombu.messaging
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common import excutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import network_utils
|
||||
from climate.openstack.common.rpc import amqp as rpc_amqp
|
||||
@ -129,15 +129,46 @@ class ConsumerBase(object):
|
||||
self.tag = str(tag)
|
||||
self.kwargs = kwargs
|
||||
self.queue = None
|
||||
self.ack_on_error = kwargs.get('ack_on_error', True)
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-declare the queue after a rabbit reconnect"""
|
||||
"""Re-declare the queue after a rabbit reconnect."""
|
||||
self.channel = channel
|
||||
self.kwargs['channel'] = channel
|
||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||
self.queue.declare()
|
||||
|
||||
def _callback_handler(self, message, callback):
|
||||
"""Call callback with deserialized message.
|
||||
|
||||
Messages that are processed without exception are ack'ed.
|
||||
|
||||
If the message processing generates an exception, it will be
|
||||
ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
|
||||
Rejection is better than waiting for the message to timeout.
|
||||
Rejected messages are immediately requeued.
|
||||
"""
|
||||
|
||||
ack_msg = False
|
||||
try:
|
||||
msg = rpc_common.deserialize_msg(message.payload)
|
||||
callback(msg)
|
||||
ack_msg = True
|
||||
except Exception:
|
||||
if self.ack_on_error:
|
||||
ack_msg = True
|
||||
LOG.exception(_("Failed to process message"
|
||||
" ... skipping it."))
|
||||
else:
|
||||
LOG.exception(_("Failed to process message"
|
||||
" ... will requeue."))
|
||||
finally:
|
||||
if ack_msg:
|
||||
message.ack()
|
||||
else:
|
||||
message.reject()
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
"""Actually declare the consumer on the amqp channel. This will
|
||||
start the flow of messages from the queue. Using the
|
||||
@ -150,8 +181,6 @@ class ConsumerBase(object):
|
||||
If kwargs['nowait'] is True, then this call will block until
|
||||
a message is read.
|
||||
|
||||
Messages will automatically be acked if the callback doesn't
|
||||
raise an exception
|
||||
"""
|
||||
|
||||
options = {'consumer_tag': self.tag}
|
||||
@ -162,21 +191,15 @@ class ConsumerBase(object):
|
||||
|
||||
def _callback(raw_message):
|
||||
message = self.channel.message_to_python(raw_message)
|
||||
try:
|
||||
msg = rpc_common.deserialize_msg(message.payload)
|
||||
callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
message.ack()
|
||||
self._callback_handler(message, callback)
|
||||
|
||||
self.queue.consume(*args, callback=_callback, **options)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the consuming from the queue, if it has started"""
|
||||
"""Cancel the consuming from the queue, if it has started."""
|
||||
try:
|
||||
self.queue.cancel(self.tag)
|
||||
except KeyError, e:
|
||||
except KeyError as e:
|
||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||
if str(e) != "u'%s'" % self.tag:
|
||||
raise
|
||||
@ -184,7 +207,7 @@ class ConsumerBase(object):
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
"""Queue/consumer class for 'direct'."""
|
||||
|
||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||
"""Init a 'direct' queue.
|
||||
@ -216,7 +239,7 @@ class DirectConsumer(ConsumerBase):
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
"""Consumer class for 'topic'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||
exchange_name=None, **kwargs):
|
||||
@ -253,7 +276,7 @@ class TopicConsumer(ConsumerBase):
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
"""Consumer class for 'fanout'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||
"""Init a 'fanout' queue.
|
||||
@ -286,7 +309,7 @@ class FanoutConsumer(ConsumerBase):
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
"""Base Publisher class."""
|
||||
|
||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
@ -298,7 +321,7 @@ class Publisher(object):
|
||||
self.reconnect(channel)
|
||||
|
||||
def reconnect(self, channel):
|
||||
"""Re-establish the Producer after a rabbit reconnection"""
|
||||
"""Re-establish the Producer after a rabbit reconnection."""
|
||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||
**self.kwargs)
|
||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||
@ -306,7 +329,7 @@ class Publisher(object):
|
||||
routing_key=self.routing_key)
|
||||
|
||||
def send(self, msg, timeout=None):
|
||||
"""Send a message"""
|
||||
"""Send a message."""
|
||||
if timeout:
|
||||
#
|
||||
# AMQP TTL is in milliseconds when set in the header.
|
||||
@ -317,7 +340,7 @@ class Publisher(object):
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
"""Publisher class for 'direct'."""
|
||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||
"""init a 'direct' publisher.
|
||||
|
||||
@ -333,7 +356,7 @@ class DirectPublisher(Publisher):
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
"""Publisher class for 'topic'."""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'topic' publisher.
|
||||
|
||||
@ -352,7 +375,7 @@ class TopicPublisher(Publisher):
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
"""Publisher class for 'fanout'."""
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
"""init a 'fanout' publisher.
|
||||
|
||||
@ -367,7 +390,7 @@ class FanoutPublisher(Publisher):
|
||||
|
||||
|
||||
class NotifyPublisher(TopicPublisher):
|
||||
"""Publisher class for 'notify'"""
|
||||
"""Publisher class for 'notify'."""
|
||||
|
||||
def __init__(self, conf, channel, topic, **kwargs):
|
||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
||||
@ -447,8 +470,9 @@ class Connection(object):
|
||||
self.reconnect()
|
||||
|
||||
def _fetch_ssl_params(self):
|
||||
"""Handles fetching what ssl params
|
||||
should be used for the connection (if any)"""
|
||||
"""Handles fetching what ssl params should be used for the connection
|
||||
(if any).
|
||||
"""
|
||||
ssl_params = dict()
|
||||
|
||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||
@ -520,7 +544,7 @@ class Connection(object):
|
||||
return
|
||||
except (IOError, self.connection_errors) as e:
|
||||
pass
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
@ -536,13 +560,11 @@ class Connection(object):
|
||||
log_info.update(params)
|
||||
|
||||
if self.max_retries and attempt == self.max_retries:
|
||||
LOG.error(_('Unable to connect to AMQP server on '
|
||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||
'tries: %(err_str)s') % log_info)
|
||||
# NOTE(comstud): Copied from original code. There's
|
||||
# really no better recourse because if this was a queue we
|
||||
# need to consume on, we have no way to consume anymore.
|
||||
sys.exit(1)
|
||||
msg = _('Unable to connect to AMQP server on '
|
||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||
'tries: %(err_str)s') % log_info
|
||||
LOG.error(msg)
|
||||
raise rpc_common.RPCException(msg)
|
||||
|
||||
if attempt == 1:
|
||||
sleep_time = self.interval_start or 1
|
||||
@ -561,10 +583,10 @@ class Connection(object):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (self.connection_errors, socket.timeout, IOError), e:
|
||||
except (self.connection_errors, socket.timeout, IOError) as e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
@ -578,18 +600,18 @@ class Connection(object):
|
||||
self.reconnect()
|
||||
|
||||
def get_channel(self):
|
||||
"""Convenience call for bin/clear_rabbit_queues"""
|
||||
"""Convenience call for bin/clear_rabbit_queues."""
|
||||
return self.channel
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
"""Close/release this connection."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.release()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
"""Reset a connection so it can be used again."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.channel.close()
|
||||
@ -618,7 +640,7 @@ class Connection(object):
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
"""Return an iterator that will consume from all queues/consumers."""
|
||||
|
||||
info = {'do_consume': True}
|
||||
|
||||
@ -634,8 +656,8 @@ class Connection(object):
|
||||
|
||||
def _consume():
|
||||
if info['do_consume']:
|
||||
queues_head = self.consumers[:-1]
|
||||
queues_tail = self.consumers[-1]
|
||||
queues_head = self.consumers[:-1] # not fanout.
|
||||
queues_tail = self.consumers[-1] # fanout
|
||||
for queue in queues_head:
|
||||
queue.consume(nowait=True)
|
||||
queues_tail.consume(nowait=False)
|
||||
@ -648,7 +670,7 @@ class Connection(object):
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
"""Cancel a consumer thread."""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
@ -663,7 +685,7 @@ class Connection(object):
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
"""Send to a publisher based on the publisher class."""
|
||||
|
||||
def _error_callback(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
@ -684,36 +706,37 @@ class Connection(object):
|
||||
self.declare_consumer(DirectConsumer, topic, callback)
|
||||
|
||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||
exchange_name=None):
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Create a 'topic' consumer."""
|
||||
self.declare_consumer(functools.partial(TopicConsumer,
|
||||
name=queue_name,
|
||||
exchange_name=exchange_name,
|
||||
ack_on_error=ack_on_error,
|
||||
),
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
"""Create a 'fanout' consumer."""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
"""Send a 'direct' message."""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message"""
|
||||
"""Send a 'topic' message."""
|
||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
"""Send a 'fanout' message."""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
"""Send a notify message on a topic."""
|
||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
"""Consume from all queues/consumers."""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
@ -722,7 +745,8 @@ class Connection(object):
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
"""Consumer from all queues/consumers in a greenthread."""
|
||||
@excutils.forever_retry_uncaught_exceptions
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
@ -733,7 +757,7 @@ class Connection(object):
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
"""Create a consumer that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
@ -745,7 +769,7 @@ class Connection(object):
|
||||
self.declare_topic_consumer(topic, proxy_cb)
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
"""Create a worker that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
@ -753,7 +777,7 @@ class Connection(object):
|
||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None):
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
@ -774,11 +798,12 @@ class Connection(object):
|
||||
topic=topic,
|
||||
exchange_name=exchange_name,
|
||||
callback=callback_wrapper,
|
||||
ack_on_error=ack_on_error,
|
||||
)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
"""Create a connection."""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
@ -24,6 +24,7 @@ import eventlet
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common import excutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import importutils
|
||||
from climate.openstack.common import jsonutils
|
||||
@ -31,6 +32,7 @@ from climate.openstack.common import log as logging
|
||||
from climate.openstack.common.rpc import amqp as rpc_amqp
|
||||
from climate.openstack.common.rpc import common as rpc_common
|
||||
|
||||
qpid_codec = importutils.try_import("qpid.codec010")
|
||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
||||
|
||||
@ -40,8 +42,8 @@ qpid_opts = [
|
||||
cfg.StrOpt('qpid_hostname',
|
||||
default='localhost',
|
||||
help='Qpid broker hostname'),
|
||||
cfg.StrOpt('qpid_port',
|
||||
default='5672',
|
||||
cfg.IntOpt('qpid_port',
|
||||
default=5672,
|
||||
help='Qpid broker port'),
|
||||
cfg.ListOpt('qpid_hosts',
|
||||
default=['$qpid_hostname:$qpid_port'],
|
||||
@ -69,6 +71,8 @@ qpid_opts = [
|
||||
|
||||
cfg.CONF.register_opts(qpid_opts)
|
||||
|
||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
"""Consumer base class."""
|
||||
@ -115,31 +119,59 @@ class ConsumerBase(object):
|
||||
|
||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||
|
||||
self.reconnect(session)
|
||||
self.connect(session)
|
||||
|
||||
def connect(self, session):
|
||||
"""Declare the reciever on connect."""
|
||||
self._declare_receiver(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-declare the receiver after a qpid reconnect"""
|
||||
"""Re-declare the receiver after a qpid reconnect."""
|
||||
self._declare_receiver(session)
|
||||
|
||||
def _declare_receiver(self, session):
|
||||
self.session = session
|
||||
self.receiver = session.receiver(self.address)
|
||||
self.receiver.capacity = 1
|
||||
|
||||
def _unpack_json_msg(self, msg):
|
||||
"""Load the JSON data in msg if msg.content_type indicates that it
|
||||
is necessary. Put the loaded data back into msg.content and
|
||||
update msg.content_type appropriately.
|
||||
|
||||
A Qpid Message containing a dict will have a content_type of
|
||||
'amqp/map', whereas one containing a string that needs to be converted
|
||||
back from JSON will have a content_type of JSON_CONTENT_TYPE.
|
||||
|
||||
:param msg: a Qpid Message object
|
||||
:returns: None
|
||||
"""
|
||||
if msg.content_type == JSON_CONTENT_TYPE:
|
||||
msg.content = jsonutils.loads(msg.content)
|
||||
msg.content_type = 'amqp/map'
|
||||
|
||||
def consume(self):
|
||||
"""Fetch the message and pass it to the callback object"""
|
||||
"""Fetch the message and pass it to the callback object."""
|
||||
message = self.receiver.fetch()
|
||||
try:
|
||||
self._unpack_json_msg(message)
|
||||
msg = rpc_common.deserialize_msg(message.content)
|
||||
self.callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
# TODO(sandy): Need support for optional ack_on_error.
|
||||
self.session.acknowledge(message)
|
||||
|
||||
def get_receiver(self):
|
||||
return self.receiver
|
||||
|
||||
def get_node_name(self):
|
||||
return self.address.split(';')[0]
|
||||
|
||||
|
||||
class DirectConsumer(ConsumerBase):
|
||||
"""Queue/consumer class for 'direct'"""
|
||||
"""Queue/consumer class for 'direct'."""
|
||||
|
||||
def __init__(self, conf, session, msg_id, callback):
|
||||
"""Init a 'direct' queue.
|
||||
@ -157,7 +189,7 @@ class DirectConsumer(ConsumerBase):
|
||||
|
||||
|
||||
class TopicConsumer(ConsumerBase):
|
||||
"""Consumer class for 'topic'"""
|
||||
"""Consumer class for 'topic'."""
|
||||
|
||||
def __init__(self, conf, session, topic, callback, name=None,
|
||||
exchange_name=None):
|
||||
@ -177,7 +209,7 @@ class TopicConsumer(ConsumerBase):
|
||||
|
||||
|
||||
class FanoutConsumer(ConsumerBase):
|
||||
"""Consumer class for 'fanout'"""
|
||||
"""Consumer class for 'fanout'."""
|
||||
|
||||
def __init__(self, conf, session, topic, callback):
|
||||
"""Init a 'fanout' queue.
|
||||
@ -186,6 +218,7 @@ class FanoutConsumer(ConsumerBase):
|
||||
'topic' is the topic to listen on
|
||||
'callback' is the callback to call when messages are received
|
||||
"""
|
||||
self.conf = conf
|
||||
|
||||
super(FanoutConsumer, self).__init__(
|
||||
session, callback,
|
||||
@ -194,9 +227,21 @@ class FanoutConsumer(ConsumerBase):
|
||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
||||
{"exclusive": True})
|
||||
|
||||
def reconnect(self, session):
|
||||
topic = self.get_node_name()
|
||||
params = {
|
||||
'session': session,
|
||||
'topic': topic,
|
||||
'callback': self.callback,
|
||||
}
|
||||
|
||||
self.__init__(conf=self.conf, **params)
|
||||
|
||||
super(FanoutConsumer, self).reconnect(session)
|
||||
|
||||
|
||||
class Publisher(object):
|
||||
"""Base Publisher class"""
|
||||
"""Base Publisher class."""
|
||||
|
||||
def __init__(self, session, node_name, node_opts=None):
|
||||
"""Init the Publisher class with the exchange_name, routing_key,
|
||||
@ -225,16 +270,43 @@ class Publisher(object):
|
||||
self.reconnect(session)
|
||||
|
||||
def reconnect(self, session):
|
||||
"""Re-establish the Sender after a reconnection"""
|
||||
"""Re-establish the Sender after a reconnection."""
|
||||
self.sender = session.sender(self.address)
|
||||
|
||||
def _pack_json_msg(self, msg):
|
||||
"""Qpid cannot serialize dicts containing strings longer than 65535
|
||||
characters. This function dumps the message content to a JSON
|
||||
string, which Qpid is able to handle.
|
||||
|
||||
:param msg: May be either a Qpid Message object or a bare dict.
|
||||
:returns: A Qpid Message with its content field JSON encoded.
|
||||
"""
|
||||
try:
|
||||
msg.content = jsonutils.dumps(msg.content)
|
||||
except AttributeError:
|
||||
# Need to have a Qpid message so we can set the content_type.
|
||||
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
||||
msg.content_type = JSON_CONTENT_TYPE
|
||||
return msg
|
||||
|
||||
def send(self, msg):
|
||||
"""Send a message"""
|
||||
"""Send a message."""
|
||||
try:
|
||||
# Check if Qpid can encode the message
|
||||
check_msg = msg
|
||||
if not hasattr(check_msg, 'content_type'):
|
||||
check_msg = qpid_messaging.Message(msg)
|
||||
content_type = check_msg.content_type
|
||||
enc, dec = qpid_messaging.message.get_codec(content_type)
|
||||
enc(check_msg.content)
|
||||
except qpid_codec.CodecException:
|
||||
# This means the message couldn't be serialized as a dict.
|
||||
msg = self._pack_json_msg(msg)
|
||||
self.sender.send(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
"""Publisher class for 'direct'"""
|
||||
"""Publisher class for 'direct'."""
|
||||
def __init__(self, conf, session, msg_id):
|
||||
"""Init a 'direct' publisher."""
|
||||
super(DirectPublisher, self).__init__(session, msg_id,
|
||||
@ -242,7 +314,7 @@ class DirectPublisher(Publisher):
|
||||
|
||||
|
||||
class TopicPublisher(Publisher):
|
||||
"""Publisher class for 'topic'"""
|
||||
"""Publisher class for 'topic'."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
@ -252,7 +324,7 @@ class TopicPublisher(Publisher):
|
||||
|
||||
|
||||
class FanoutPublisher(Publisher):
|
||||
"""Publisher class for 'fanout'"""
|
||||
"""Publisher class for 'fanout'."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'fanout' publisher.
|
||||
"""
|
||||
@ -262,7 +334,7 @@ class FanoutPublisher(Publisher):
|
||||
|
||||
|
||||
class NotifyPublisher(Publisher):
|
||||
"""Publisher class for notifications"""
|
||||
"""Publisher class for notifications."""
|
||||
def __init__(self, conf, session, topic):
|
||||
"""init a 'topic' publisher.
|
||||
"""
|
||||
@ -320,7 +392,7 @@ class Connection(object):
|
||||
# Reconnection is done by self.reconnect()
|
||||
self.connection.reconnect = False
|
||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||
self.connection.protocol = self.conf.qpid_protocol
|
||||
self.connection.transport = self.conf.qpid_protocol
|
||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||
|
||||
def _register_consumer(self, consumer):
|
||||
@ -330,23 +402,24 @@ class Connection(object):
|
||||
return self.consumers[str(receiver)]
|
||||
|
||||
def reconnect(self):
|
||||
"""Handles reconnecting and re-establishing sessions and queues"""
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid_exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
"""Handles reconnecting and re-establishing sessions and queues."""
|
||||
attempt = 0
|
||||
delay = 1
|
||||
while True:
|
||||
# Close the session if necessary
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid_exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
broker = self.brokers[attempt % len(self.brokers)]
|
||||
attempt += 1
|
||||
|
||||
try:
|
||||
self.connection_create(broker)
|
||||
self.connection.open()
|
||||
except qpid_exceptions.ConnectionError, e:
|
||||
except qpid_exceptions.ConnectionError as e:
|
||||
msg_dict = dict(e=e, delay=delay)
|
||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||
"Sleeping %(delay)s seconds") % msg_dict
|
||||
@ -374,20 +447,26 @@ class Connection(object):
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (qpid_exceptions.Empty,
|
||||
qpid_exceptions.ConnectionError), e:
|
||||
qpid_exceptions.ConnectionError) as e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
"""Close/release this connection."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.close()
|
||||
try:
|
||||
self.connection.close()
|
||||
except Exception:
|
||||
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
||||
# causes confusion; there's really nothing useful we can do with
|
||||
# them.
|
||||
pass
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
"""Reset a connection so it can be used again."""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.session.close()
|
||||
@ -411,7 +490,7 @@ class Connection(object):
|
||||
return self.ensure(_connect_error, _declare_consumer)
|
||||
|
||||
def iterconsume(self, limit=None, timeout=None):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
"""Return an iterator that will consume from all queues/consumers."""
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid_exceptions.Empty):
|
||||
@ -435,7 +514,7 @@ class Connection(object):
|
||||
yield self.ensure(_error_callback, _consume)
|
||||
|
||||
def cancel_consumer_thread(self):
|
||||
"""Cancel a consumer thread"""
|
||||
"""Cancel a consumer thread."""
|
||||
if self.consumer_thread is not None:
|
||||
self.consumer_thread.kill()
|
||||
try:
|
||||
@ -450,7 +529,7 @@ class Connection(object):
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
"""Send to a publisher based on the publisher class."""
|
||||
|
||||
def _connect_error(exc):
|
||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||
@ -480,15 +559,15 @@ class Connection(object):
|
||||
topic, callback)
|
||||
|
||||
def declare_fanout_consumer(self, topic, callback):
|
||||
"""Create a 'fanout' consumer"""
|
||||
"""Create a 'fanout' consumer."""
|
||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||
|
||||
def direct_send(self, msg_id, msg):
|
||||
"""Send a 'direct' message"""
|
||||
"""Send a 'direct' message."""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message"""
|
||||
"""Send a 'topic' message."""
|
||||
#
|
||||
# We want to create a message with attributes, e.g. a TTL. We
|
||||
# don't really need to keep 'msg' in its JSON format any longer
|
||||
@ -503,15 +582,15 @@ class Connection(object):
|
||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
"""Send a 'fanout' message."""
|
||||
self.publisher_send(FanoutPublisher, topic, msg)
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
"""Send a notify message on a topic."""
|
||||
self.publisher_send(NotifyPublisher, topic, msg)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
"""Consume from all queues/consumers."""
|
||||
it = self.iterconsume(limit=limit)
|
||||
while True:
|
||||
try:
|
||||
@ -520,7 +599,8 @@ class Connection(object):
|
||||
return
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Consumer from all queues/consumers in a greenthread"""
|
||||
"""Consumer from all queues/consumers in a greenthread."""
|
||||
@excutils.forever_retry_uncaught_exceptions
|
||||
def _consumer_thread():
|
||||
try:
|
||||
self.consume()
|
||||
@ -531,7 +611,7 @@ class Connection(object):
|
||||
return self.consumer_thread
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
"""Create a consumer that calls a method in a proxy object"""
|
||||
"""Create a consumer that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
@ -547,7 +627,7 @@ class Connection(object):
|
||||
return consumer
|
||||
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
"""Create a worker that calls a method in a proxy object"""
|
||||
"""Create a worker that calls a method in a proxy object."""
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
@ -561,7 +641,7 @@ class Connection(object):
|
||||
return consumer
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None):
|
||||
exchange_name=None, ack_on_error=True):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
@ -590,7 +670,7 @@ class Connection(object):
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
"""Create a connection."""
|
||||
return rpc_amqp.create_connection(
|
||||
conf, new,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
@ -30,7 +30,6 @@ from climate.openstack.common import excutils
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import importutils
|
||||
from climate.openstack.common import jsonutils
|
||||
from climate.openstack.common import processutils as utils
|
||||
from climate.openstack.common.rpc import common as rpc_common
|
||||
|
||||
zmq = importutils.try_import('eventlet.green.zmq')
|
||||
@ -85,8 +84,8 @@ matchmaker = None # memoized matchmaker object
|
||||
|
||||
|
||||
def _serialize(data):
|
||||
"""
|
||||
Serialization wrapper
|
||||
"""Serialization wrapper.
|
||||
|
||||
We prefer using JSON, but it cannot encode all types.
|
||||
Error if a developer passes us bad data.
|
||||
"""
|
||||
@ -98,18 +97,15 @@ def _serialize(data):
|
||||
|
||||
|
||||
def _deserialize(data):
|
||||
"""
|
||||
Deserialization wrapper
|
||||
"""
|
||||
"""Deserialization wrapper."""
|
||||
LOG.debug(_("Deserializing: %s"), data)
|
||||
return jsonutils.loads(data)
|
||||
|
||||
|
||||
class ZmqSocket(object):
|
||||
"""
|
||||
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
|
||||
and connection management.
|
||||
"""A tiny wrapper around ZeroMQ.
|
||||
|
||||
Simplifies the send/recv protocol and connection management.
|
||||
Can be used as a Context (supports the 'with' statement).
|
||||
"""
|
||||
|
||||
@ -180,7 +176,7 @@ class ZmqSocket(object):
|
||||
return
|
||||
|
||||
# We must unsubscribe, or we'll leak descriptors.
|
||||
if len(self.subscriptions) > 0:
|
||||
if self.subscriptions:
|
||||
for f in self.subscriptions:
|
||||
try:
|
||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
||||
@ -199,29 +195,27 @@ class ZmqSocket(object):
|
||||
LOG.error("ZeroMQ socket could not be closed.")
|
||||
self.sock = None
|
||||
|
||||
def recv(self):
|
||||
def recv(self, **kwargs):
|
||||
if not self.can_recv:
|
||||
raise RPCException(_("You cannot recv on this socket."))
|
||||
return self.sock.recv_multipart()
|
||||
return self.sock.recv_multipart(**kwargs)
|
||||
|
||||
def send(self, data):
|
||||
def send(self, data, **kwargs):
|
||||
if not self.can_send:
|
||||
raise RPCException(_("You cannot send on this socket."))
|
||||
self.sock.send_multipart(data)
|
||||
self.sock.send_multipart(data, **kwargs)
|
||||
|
||||
|
||||
class ZmqClient(object):
|
||||
"""Client for ZMQ sockets."""
|
||||
|
||||
def __init__(self, addr, socket_type=None, bind=False):
|
||||
if socket_type is None:
|
||||
socket_type = zmq.PUSH
|
||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
||||
def __init__(self, addr):
|
||||
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
||||
|
||||
def cast(self, msg_id, topic, data, envelope=False):
|
||||
def cast(self, msg_id, topic, data, envelope):
|
||||
msg_id = msg_id or 0
|
||||
|
||||
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
|
||||
if not envelope:
|
||||
self.outq.send(map(bytes,
|
||||
(msg_id, topic, 'cast', _serialize(data))))
|
||||
return
|
||||
@ -276,12 +270,13 @@ class InternalContext(object):
|
||||
|
||||
try:
|
||||
result = proxy.dispatch(
|
||||
ctx, data['version'], data['method'], **data['args'])
|
||||
ctx, data['version'], data['method'],
|
||||
data.get('namespace'), **data['args'])
|
||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||
except greenlet.GreenletExit:
|
||||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except rpc_common.ClientException, e:
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||
e._exc_info[1])
|
||||
return {'exc':
|
||||
@ -295,11 +290,16 @@ class InternalContext(object):
|
||||
def reply(self, ctx, proxy,
|
||||
msg_id=None, context=None, topic=None, msg=None):
|
||||
"""Reply to a casted call."""
|
||||
# Our real method is curried into msg['args']
|
||||
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
||||
# this may be able to be removed earlier than
|
||||
# 'I' if ConsumerBase.process were refactored.
|
||||
if type(msg) is list:
|
||||
payload = msg[-1]
|
||||
else:
|
||||
payload = msg
|
||||
|
||||
child_ctx = RpcContext.unmarshal(msg[0])
|
||||
response = ConsumerBase.normalize_reply(
|
||||
self._get_response(child_ctx, proxy, topic, msg[1]),
|
||||
self._get_response(ctx, proxy, topic, payload),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
@ -346,20 +346,18 @@ class ConsumerBase(object):
|
||||
return
|
||||
|
||||
proxy.dispatch(ctx, data['version'],
|
||||
data['method'], **data['args'])
|
||||
data['method'], data.get('namespace'), **data['args'])
|
||||
|
||||
|
||||
class ZmqBaseReactor(ConsumerBase):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
centralized casting broker (PULL-PUSH)
|
||||
for RoundRobin requests.
|
||||
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
||||
|
||||
Used for RoundRobin requests.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqBaseReactor, self).__init__()
|
||||
|
||||
self.mapping = {}
|
||||
self.proxies = {}
|
||||
self.threads = []
|
||||
self.sockets = []
|
||||
@ -367,9 +365,8 @@ class ZmqBaseReactor(ConsumerBase):
|
||||
|
||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
|
||||
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
|
||||
zmq_type_out=None, in_bind=True, out_bind=True,
|
||||
subscribe=None):
|
||||
def register(self, proxy, in_addr, zmq_type_in,
|
||||
in_bind=True, subscribe=None):
|
||||
|
||||
LOG.info(_("Registering reactor"))
|
||||
|
||||
@ -385,21 +382,6 @@ class ZmqBaseReactor(ConsumerBase):
|
||||
|
||||
LOG.info(_("In reactor registered"))
|
||||
|
||||
if not out_addr:
|
||||
return
|
||||
|
||||
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
|
||||
raise RPCException("Bad output socktype")
|
||||
|
||||
# Items push out.
|
||||
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
|
||||
|
||||
self.mapping[inq] = outq
|
||||
self.mapping[outq] = inq
|
||||
self.sockets.append(outq)
|
||||
|
||||
LOG.info(_("Out reactor registered"))
|
||||
|
||||
def consume_in_thread(self):
|
||||
def _consume(sock):
|
||||
LOG.info(_("Consuming socket"))
|
||||
@ -424,10 +406,9 @@ class ZmqBaseReactor(ConsumerBase):
|
||||
|
||||
|
||||
class ZmqProxy(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
topic-based proxy, forwarding to
|
||||
IPC sockets.
|
||||
"""A consumer class implementing a topic-based proxy.
|
||||
|
||||
Forwards to IPC sockets.
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
@ -440,11 +421,8 @@ class ZmqProxy(ZmqBaseReactor):
|
||||
def consume(self, sock):
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
topic = data[1]
|
||||
|
||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
||||
data = sock.recv(copy=False)
|
||||
topic = data[1].bytes
|
||||
|
||||
if topic.startswith('fanout~'):
|
||||
sock_type = zmq.PUB
|
||||
@ -486,9 +464,7 @@ class ZmqProxy(ZmqBaseReactor):
|
||||
|
||||
while(True):
|
||||
data = self.topic_proxy[topic].get()
|
||||
out_sock.send(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
|
||||
{'data': data})
|
||||
out_sock.send(data, copy=False)
|
||||
|
||||
wait_sock_creation = eventlet.event.Event()
|
||||
eventlet.spawn(publisher, wait_sock_creation)
|
||||
@ -501,37 +477,34 @@ class ZmqProxy(ZmqBaseReactor):
|
||||
|
||||
try:
|
||||
self.topic_proxy[topic].put_nowait(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
|
||||
{'data': data})
|
||||
except eventlet.queue.Full:
|
||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Runs the ZmqProxy service"""
|
||||
"""Runs the ZmqProxy service."""
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
consume_in = "tcp://%s:%s" % \
|
||||
(CONF.rpc_zmq_bind_address,
|
||||
CONF.rpc_zmq_port)
|
||||
consumption_proxy = InternalContext(None)
|
||||
|
||||
if not os.path.isdir(ipc_dir):
|
||||
try:
|
||||
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
|
||||
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
|
||||
ipc_dir, run_as_root=True)
|
||||
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
|
||||
except utils.ProcessExecutionError:
|
||||
try:
|
||||
os.makedirs(ipc_dir)
|
||||
except os.error:
|
||||
if not os.path.isdir(ipc_dir):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Could not create IPC directory %s") %
|
||||
(ipc_dir, ))
|
||||
|
||||
LOG.error(_("Required IPC directory does not exist at"
|
||||
" %s") % (ipc_dir, ))
|
||||
try:
|
||||
self.register(consumption_proxy,
|
||||
consume_in,
|
||||
zmq.PULL,
|
||||
out_bind=True)
|
||||
zmq.PULL)
|
||||
except zmq.ZMQError:
|
||||
if os.access(ipc_dir, os.X_OK):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Permission denied to IPC directory at"
|
||||
" %s") % (ipc_dir, ))
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
||||
"Socket may already be in use."))
|
||||
@ -541,8 +514,9 @@ class ZmqProxy(ZmqBaseReactor):
|
||||
|
||||
def unflatten_envelope(packenv):
|
||||
"""Unflattens the RPC envelope.
|
||||
Takes a list and returns a dictionary.
|
||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||
|
||||
Takes a list and returns a dictionary.
|
||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||
"""
|
||||
i = iter(packenv)
|
||||
h = {}
|
||||
@ -555,10 +529,9 @@ def unflatten_envelope(packenv):
|
||||
|
||||
|
||||
class ZmqReactor(ZmqBaseReactor):
|
||||
"""
|
||||
A consumer class implementing a
|
||||
consumer for messages. Can also be
|
||||
used as a 1:1 proxy
|
||||
"""A consumer class implementing a consumer for messages.
|
||||
|
||||
Can also be used as a 1:1 proxy
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
@ -568,11 +541,6 @@ class ZmqReactor(ZmqBaseReactor):
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||
if sock in self.mapping:
|
||||
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
|
||||
'data': data})
|
||||
self.mapping[sock].send(data)
|
||||
return
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
@ -685,8 +653,8 @@ def _call(addr, context, topic, msg, timeout=None,
|
||||
'method': '-reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'context': mcontext,
|
||||
'topic': reply_topic,
|
||||
# TODO(ewindisch): safe to remove mcontext in I.
|
||||
'msg': [mcontext, msg]
|
||||
}
|
||||
}
|
||||
@ -745,10 +713,9 @@ def _call(addr, context, topic, msg, timeout=None,
|
||||
|
||||
def _multi_send(method, context, topic, msg, timeout=None,
|
||||
envelope=False, _msg_id=None):
|
||||
"""
|
||||
Wraps the sending of messages,
|
||||
dispatches to the matchmaker and sends
|
||||
message to all relevant hosts.
|
||||
"""Wraps the sending of messages.
|
||||
|
||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||
"""
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
@ -757,7 +724,7 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
if len(queues) == 0:
|
||||
if not queues:
|
||||
LOG.warn(_("No matchmaker results. Not casting."))
|
||||
# While not strictly a timeout, callers know how to handle
|
||||
# this exception and a timeout isn't too big a lie.
|
||||
@ -805,8 +772,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""
|
||||
Send notification event.
|
||||
"""Send notification event.
|
||||
|
||||
Notifications are sent to topic-priority.
|
||||
This differs from the AMQP drivers which send to topic.priority.
|
||||
"""
|
||||
@ -840,6 +807,11 @@ def _get_ctxt():
|
||||
def _get_matchmaker(*args, **kwargs):
|
||||
global matchmaker
|
||||
if not matchmaker:
|
||||
matchmaker = importutils.import_object(
|
||||
CONF.rpc_zmq_matchmaker, *args, **kwargs)
|
||||
mm = CONF.rpc_zmq_matchmaker
|
||||
if mm.endswith('matchmaker.MatchMakerRing'):
|
||||
mm.replace('matchmaker', 'matchmaker_ring')
|
||||
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
|
||||
' %(new)s instead') % dict(
|
||||
orig=CONF.rpc_zmq_matchmaker, new=mm))
|
||||
matchmaker = importutils.import_object(mm, *args, **kwargs)
|
||||
return matchmaker
|
||||
|
@ -19,8 +19,6 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
import json
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
@ -30,15 +28,11 @@ from climate.openstack.common import log as logging
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
||||
cfg.StrOpt('matchmaker_ringfile',
|
||||
default='/etc/nova/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
||||
default='300',
|
||||
default=300,
|
||||
help='Heartbeat frequency'),
|
||||
cfg.IntOpt('matchmaker_heartbeat_ttl',
|
||||
default='600',
|
||||
default=600,
|
||||
help='Heartbeat time-to-live.'),
|
||||
]
|
||||
|
||||
@ -54,8 +48,8 @@ class MatchMakerException(Exception):
|
||||
|
||||
|
||||
class Exchange(object):
|
||||
"""
|
||||
Implements lookups.
|
||||
"""Implements lookups.
|
||||
|
||||
Subclass this to support hashtables, dns, etc.
|
||||
"""
|
||||
def __init__(self):
|
||||
@ -66,9 +60,7 @@ class Exchange(object):
|
||||
|
||||
|
||||
class Binding(object):
|
||||
"""
|
||||
A binding on which to perform a lookup.
|
||||
"""
|
||||
"""A binding on which to perform a lookup."""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@ -77,10 +69,10 @@ class Binding(object):
|
||||
|
||||
|
||||
class MatchMakerBase(object):
|
||||
"""
|
||||
Match Maker Base Class.
|
||||
Build off HeartbeatMatchMakerBase if building a
|
||||
heartbeat-capable MatchMaker.
|
||||
"""Match Maker Base Class.
|
||||
|
||||
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
||||
MatchMaker.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||
@ -90,58 +82,47 @@ class MatchMakerBase(object):
|
||||
'registration or heartbeat.')
|
||||
|
||||
def register(self, key, host):
|
||||
"""
|
||||
Register a host on a backend.
|
||||
"""Register a host on a backend.
|
||||
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
pass
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""
|
||||
Acknowledge that a key.host is alive.
|
||||
Used internally for updating heartbeats,
|
||||
but may also be used publically to acknowledge
|
||||
a system is alive (i.e. rpc message successfully
|
||||
sent to host)
|
||||
"""Acknowledge that a key.host is alive.
|
||||
|
||||
Used internally for updating heartbeats, but may also be used
|
||||
publically to acknowledge a system is alive (i.e. rpc message
|
||||
successfully sent to host)
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_alive(self, topic, host):
|
||||
"""
|
||||
Checks if a host is alive.
|
||||
"""
|
||||
"""Checks if a host is alive."""
|
||||
pass
|
||||
|
||||
def expire(self, topic, host):
|
||||
"""
|
||||
Explicitly expire a host's registration.
|
||||
"""
|
||||
"""Explicitly expire a host's registration."""
|
||||
pass
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""
|
||||
Send all heartbeats.
|
||||
"""Send all heartbeats.
|
||||
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
pass
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""
|
||||
Unregister a topic.
|
||||
"""
|
||||
"""Unregister a topic."""
|
||||
pass
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""
|
||||
Spawn heartbeat greenthread.
|
||||
"""
|
||||
"""Spawn heartbeat greenthread."""
|
||||
pass
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""
|
||||
Destroys the heartbeat greenthread.
|
||||
"""
|
||||
"""Destroys the heartbeat greenthread."""
|
||||
pass
|
||||
|
||||
def add_binding(self, binding, rule, last=True):
|
||||
@ -168,10 +149,10 @@ class MatchMakerBase(object):
|
||||
|
||||
|
||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
"""
|
||||
Base for a heart-beat capable MatchMaker.
|
||||
Provides common methods for registering,
|
||||
unregistering, and maintaining heartbeats.
|
||||
"""Base for a heart-beat capable MatchMaker.
|
||||
|
||||
Provides common methods for registering, unregistering, and maintaining
|
||||
heartbeats.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.hosts = set()
|
||||
@ -181,8 +162,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
super(HeartbeatMatchMakerBase, self).__init__()
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""
|
||||
Send all heartbeats.
|
||||
"""Send all heartbeats.
|
||||
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
@ -190,32 +171,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""
|
||||
Acknowledge that a host.topic is alive.
|
||||
Used internally for updating heartbeats,
|
||||
but may also be used publically to acknowledge
|
||||
a system is alive (i.e. rpc message successfully
|
||||
sent to host)
|
||||
"""Acknowledge that a host.topic is alive.
|
||||
|
||||
Used internally for updating heartbeats, but may also be used
|
||||
publically to acknowledge a system is alive (i.e. rpc message
|
||||
successfully sent to host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement ack_alive")
|
||||
|
||||
def backend_register(self, key, host):
|
||||
"""
|
||||
Implements registration logic.
|
||||
"""Implements registration logic.
|
||||
|
||||
Called by register(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_register")
|
||||
|
||||
def backend_unregister(self, key, key_host):
|
||||
"""
|
||||
Implements de-registration logic.
|
||||
"""Implements de-registration logic.
|
||||
|
||||
Called by unregister(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_unregister")
|
||||
|
||||
def register(self, key, host):
|
||||
"""
|
||||
Register a host on a backend.
|
||||
"""Register a host on a backend.
|
||||
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
self.hosts.add(host)
|
||||
@ -227,25 +207,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""
|
||||
Unregister a topic.
|
||||
"""
|
||||
"""Unregister a topic."""
|
||||
if (key, host) in self.host_topic:
|
||||
del self.host_topic[(key, host)]
|
||||
|
||||
self.hosts.discard(host)
|
||||
self.backend_unregister(key, '.'.join((key, host)))
|
||||
|
||||
LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host)))
|
||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||
{'key': key, 'host': host})
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""
|
||||
Implementation of MatchMakerBase.start_heartbeat
|
||||
"""Implementation of MatchMakerBase.start_heartbeat.
|
||||
|
||||
Launches greenthread looping send_heartbeats(),
|
||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
||||
between iterations.
|
||||
"""
|
||||
if len(self.hosts) == 0:
|
||||
if not self.hosts:
|
||||
raise MatchMakerException(
|
||||
_("Register before starting heartbeat."))
|
||||
|
||||
@ -257,16 +236,14 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
self._heart = eventlet.spawn(do_heartbeat)
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""
|
||||
Destroys the heartbeat greenthread.
|
||||
"""
|
||||
"""Destroys the heartbeat greenthread."""
|
||||
if self._heart:
|
||||
self._heart.kill()
|
||||
|
||||
|
||||
class DirectBinding(Binding):
|
||||
"""
|
||||
Specifies a host in the key via a '.' character
|
||||
"""Specifies a host in the key via a '.' character.
|
||||
|
||||
Although dots are used in the key, the behavior here is
|
||||
that it maps directly to a host, thus direct.
|
||||
"""
|
||||
@ -277,8 +254,8 @@ class DirectBinding(Binding):
|
||||
|
||||
|
||||
class TopicBinding(Binding):
|
||||
"""
|
||||
Where a 'bare' key without dots.
|
||||
"""Where a 'bare' key without dots.
|
||||
|
||||
AMQP generally considers topic exchanges to be those *with* dots,
|
||||
but we deviate here in terminology as the behavior here matches
|
||||
that of a topic exchange (whereas where there are dots, behavior
|
||||
@ -304,67 +281,6 @@ class StubExchange(Exchange):
|
||||
return [(key, None)]
|
||||
|
||||
|
||||
class RingExchange(Exchange):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static file containing
|
||||
a hashmap (JSON formatted).
|
||||
|
||||
__init__ takes optional ring dictionary argument, otherwise
|
||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(RingExchange, self).__init__()
|
||||
|
||||
if ring:
|
||||
self.ring = ring
|
||||
else:
|
||||
fh = open(CONF.matchmaker_ringfile, 'r')
|
||||
self.ring = json.load(fh)
|
||||
fh.close()
|
||||
|
||||
self.ring0 = {}
|
||||
for k in self.ring.keys():
|
||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||
|
||||
def _ring_has(self, key):
|
||||
if key in self.ring0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RoundRobinRingExchange(RingExchange):
|
||||
"""A Topic Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(RoundRobinRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
if not self._ring_has(key):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (key, )
|
||||
)
|
||||
return []
|
||||
host = next(self.ring0[key])
|
||||
return [(key + '.' + host, host)]
|
||||
|
||||
|
||||
class FanoutRingExchange(RingExchange):
|
||||
"""Fanout Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(FanoutRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
# Assume starts with "fanout~", strip it for lookup.
|
||||
nkey = key.split('fanout~')[1:][0]
|
||||
if not self._ring_has(nkey):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (nkey, )
|
||||
)
|
||||
return []
|
||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||
|
||||
|
||||
class LocalhostExchange(Exchange):
|
||||
"""Exchange where all direct topics are local."""
|
||||
def __init__(self, host='localhost'):
|
||||
@ -376,8 +292,8 @@ class LocalhostExchange(Exchange):
|
||||
|
||||
|
||||
class DirectExchange(Exchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
"""Exchange where all topic keys are split, sending to second half.
|
||||
|
||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
||||
"""
|
||||
def __init__(self):
|
||||
@ -388,20 +304,9 @@ class DirectExchange(Exchange):
|
||||
return [(key, e)]
|
||||
|
||||
|
||||
class MatchMakerRing(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where hosts are loaded from a static hashmap.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(MatchMakerRing, self).__init__()
|
||||
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
|
||||
|
||||
|
||||
class MatchMakerLocalhost(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where all bare topics resolve to localhost.
|
||||
"""Match Maker where all bare topics resolve to localhost.
|
||||
|
||||
Useful for testing.
|
||||
"""
|
||||
def __init__(self, host='localhost'):
|
||||
@ -412,13 +317,13 @@ class MatchMakerLocalhost(MatchMakerBase):
|
||||
|
||||
|
||||
class MatchMakerStub(MatchMakerBase):
|
||||
"""
|
||||
Match Maker where topics are untouched.
|
||||
"""Match Maker where topics are untouched.
|
||||
|
||||
Useful for testing, or for AMQP/brokered queues.
|
||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
super(MatchMakerStub, self).__init__()
|
||||
|
||||
self.add_binding(FanoutBinding(), StubExchange())
|
||||
self.add_binding(DirectBinding(), StubExchange())
|
||||
|
@ -55,8 +55,8 @@ class RedisExchange(mm_common.Exchange):
|
||||
|
||||
|
||||
class RedisTopicExchange(RedisExchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
"""Exchange where all topic keys are split, sending to second half.
|
||||
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
"""
|
||||
def run(self, topic):
|
||||
@ -77,9 +77,7 @@ class RedisTopicExchange(RedisExchange):
|
||||
|
||||
|
||||
class RedisFanoutExchange(RedisExchange):
|
||||
"""
|
||||
Return a list of all hosts.
|
||||
"""
|
||||
"""Return a list of all hosts."""
|
||||
def run(self, topic):
|
||||
topic = topic.split('~', 1)[1]
|
||||
hosts = self.redis.smembers(topic)
|
||||
@ -90,9 +88,7 @@ class RedisFanoutExchange(RedisExchange):
|
||||
|
||||
|
||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
||||
"""
|
||||
MatchMaker registering and looking-up hosts with a Redis server.
|
||||
"""
|
||||
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
||||
def __init__(self):
|
||||
super(MatchMakerRedis, self).__init__()
|
||||
|
||||
|
110
climate/openstack/common/rpc/matchmaker_ring.py
Normal file
110
climate/openstack/common/rpc/matchmaker_ring.py
Normal file
@ -0,0 +1,110 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import log as logging
|
||||
from climate.openstack.common.rpc import matchmaker as mm
|
||||
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
||||
cfg.StrOpt('ringfile',
|
||||
deprecated_name='matchmaker_ringfile',
|
||||
deprecated_group='DEFAULT',
|
||||
default='/etc/oslo/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RingExchange(mm.Exchange):
|
||||
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
||||
|
||||
__init__ takes optional ring dictionary argument, otherwise
|
||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||
"""
|
||||
def __init__(self, ring=None):
|
||||
super(RingExchange, self).__init__()
|
||||
|
||||
if ring:
|
||||
self.ring = ring
|
||||
else:
|
||||
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
||||
self.ring = json.load(fh)
|
||||
fh.close()
|
||||
|
||||
self.ring0 = {}
|
||||
for k in self.ring.keys():
|
||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||
|
||||
def _ring_has(self, key):
|
||||
if key in self.ring0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RoundRobinRingExchange(RingExchange):
|
||||
"""A Topic Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(RoundRobinRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
if not self._ring_has(key):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (key, )
|
||||
)
|
||||
return []
|
||||
host = next(self.ring0[key])
|
||||
return [(key + '.' + host, host)]
|
||||
|
||||
|
||||
class FanoutRingExchange(RingExchange):
|
||||
"""Fanout Exchange based on a hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(FanoutRingExchange, self).__init__(ring)
|
||||
|
||||
def run(self, key):
|
||||
# Assume starts with "fanout~", strip it for lookup.
|
||||
nkey = key.split('fanout~')[1:][0]
|
||||
if not self._ring_has(nkey):
|
||||
LOG.warn(
|
||||
_("No key defining hosts for topic '%s', "
|
||||
"see ringfile") % (nkey, )
|
||||
)
|
||||
return []
|
||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||
|
||||
|
||||
class MatchMakerRing(mm.MatchMakerBase):
|
||||
"""Match Maker where hosts are loaded from a static hashmap."""
|
||||
def __init__(self, ring=None):
|
||||
super(MatchMakerRing, self).__init__()
|
||||
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
||||
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
|
||||
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# Copyright 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -23,6 +23,8 @@ For more information about rpc API version numbers, see:
|
||||
|
||||
|
||||
from climate.openstack.common import rpc
|
||||
from climate.openstack.common.rpc import common as rpc_common
|
||||
from climate.openstack.common.rpc import serializer as rpc_serializer
|
||||
|
||||
|
||||
class RpcProxy(object):
|
||||
@ -34,16 +36,28 @@ class RpcProxy(object):
|
||||
rpc API.
|
||||
"""
|
||||
|
||||
def __init__(self, topic, default_version):
|
||||
# The default namespace, which can be overriden in a subclass.
|
||||
RPC_API_NAMESPACE = None
|
||||
|
||||
def __init__(self, topic, default_version, version_cap=None,
|
||||
serializer=None):
|
||||
"""Initialize an RpcProxy.
|
||||
|
||||
:param topic: The topic to use for all messages.
|
||||
:param default_version: The default API version to request in all
|
||||
outgoing messages. This can be overridden on a per-message
|
||||
basis.
|
||||
:param version_cap: Optionally cap the maximum version used for sent
|
||||
messages.
|
||||
:param serializer: Optionaly (de-)serialize entities with a
|
||||
provided helper.
|
||||
"""
|
||||
self.topic = topic
|
||||
self.default_version = default_version
|
||||
self.version_cap = version_cap
|
||||
if serializer is None:
|
||||
serializer = rpc_serializer.NoOpSerializer()
|
||||
self.serializer = serializer
|
||||
super(RpcProxy, self).__init__()
|
||||
|
||||
def _set_version(self, msg, vers):
|
||||
@ -52,15 +66,44 @@ class RpcProxy(object):
|
||||
:param msg: The message having a version added to it.
|
||||
:param vers: The version number to add to the message.
|
||||
"""
|
||||
msg['version'] = vers if vers else self.default_version
|
||||
v = vers if vers else self.default_version
|
||||
if (self.version_cap and not
|
||||
rpc_common.version_is_compatible(self.version_cap, v)):
|
||||
raise rpc_common.RpcVersionCapError(version=self.version_cap)
|
||||
msg['version'] = v
|
||||
|
||||
def _get_topic(self, topic):
|
||||
"""Return the topic to use for a message."""
|
||||
return topic if topic else self.topic
|
||||
|
||||
def can_send_version(self, version):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
return (not self.version_cap or
|
||||
rpc_common.version_is_compatible(self.version_cap, version))
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
return {'method': method, 'args': kwargs}
|
||||
def make_namespaced_msg(method, namespace, **kwargs):
|
||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
||||
|
||||
def make_msg(self, method, **kwargs):
|
||||
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
|
||||
**kwargs)
|
||||
|
||||
def _serialize_msg_args(self, context, kwargs):
|
||||
"""Helper method called to serialize message arguments.
|
||||
|
||||
This calls our serializer on each argument, returning a new
|
||||
set of args that have been serialized.
|
||||
|
||||
:param context: The request context
|
||||
:param kwargs: The arguments to serialize
|
||||
:returns: A new set of serialized arguments
|
||||
"""
|
||||
new_kwargs = dict()
|
||||
for argname, arg in kwargs.iteritems():
|
||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
||||
arg)
|
||||
return new_kwargs
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.call() a remote method.
|
||||
@ -68,16 +111,23 @@ class RpcProxy(object):
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: The return value from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.call(context, self._get_topic(topic), msg, timeout)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
result = rpc.call(context, real_topic, msg, timeout)
|
||||
return self.serializer.deserialize_entity(context, result)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.multicall() a remote method.
|
||||
@ -85,17 +135,24 @@ class RpcProxy(object):
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: An iterator that lets you process each of the returned values
|
||||
from the remote method as they arrive.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.multicall(context, self._get_topic(topic), msg, timeout)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
result = rpc.multicall(context, real_topic, msg, timeout)
|
||||
return self.serializer.deserialize_entity(context, result)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.cast() a remote method.
|
||||
@ -110,6 +167,7 @@ class RpcProxy(object):
|
||||
remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
@ -125,6 +183,7 @@ class RpcProxy(object):
|
||||
from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
||||
|
||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
||||
@ -143,6 +202,7 @@ class RpcProxy(object):
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
||||
|
||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
||||
@ -161,5 +221,6 @@ class RpcProxy(object):
|
||||
return values.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||
rpc.fanout_cast_to_server(context, server_params,
|
||||
self._get_topic(topic), msg)
|
||||
|
52
climate/openstack/common/rpc/serializer.py
Normal file
52
climate/openstack/common/rpc/serializer.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Provides the definition of an RPC serialization handler"""
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""Generic (de-)serialization definition base class."""
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_entity(self, context, entity):
|
||||
"""Serialize something to primitive form.
|
||||
|
||||
:param context: Security context
|
||||
:param entity: Entity to be serialized
|
||||
:returns: Serialized form of entity
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_entity(self, context, entity):
|
||||
"""Deserialize something from primitive form.
|
||||
|
||||
:param context: Security context
|
||||
:param entity: Primitive to be deserialized
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NoOpSerializer(Serializer):
|
||||
"""A serializer that does nothing."""
|
||||
|
||||
def serialize_entity(self, context, entity):
|
||||
return entity
|
||||
|
||||
def deserialize_entity(self, context, entity):
|
||||
return entity
|
@ -30,7 +30,8 @@ LOG = logging.getLogger(__name__)
|
||||
class Service(service.Service):
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service enables rpc by listening to queues based on topic and host."""
|
||||
A service enables rpc by listening to queues based on topic and host.
|
||||
"""
|
||||
def __init__(self, host, topic, manager=None):
|
||||
super(Service, self).__init__()
|
||||
self.host = host
|
||||
|
41
climate/openstack/common/rpc/zmq_receiver.py
Executable file
41
climate/openstack/common/rpc/zmq_receiver.py
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common import log as logging
|
||||
from climate.openstack.common import rpc
|
||||
from climate.openstack.common.rpc import impl_zmq
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rpc.rpc_opts)
|
||||
CONF.register_opts(impl_zmq.zmq_opts)
|
||||
|
||||
|
||||
def main():
|
||||
CONF(sys.argv[1:], project='oslo')
|
||||
logging.setup("oslo")
|
||||
|
||||
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
||||
reactor.consume_in_thread()
|
||||
reactor.wait()
|
@ -52,7 +52,7 @@ class Launcher(object):
|
||||
|
||||
"""
|
||||
self._services = threadgroup.ThreadGroup()
|
||||
eventlet_backdoor.initialize_if_enabled()
|
||||
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
@staticmethod
|
||||
def run_service(service):
|
||||
@ -72,6 +72,7 @@ class Launcher(object):
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self._services.add_thread(self.run_service, service)
|
||||
|
||||
def stop(self):
|
||||
@ -270,7 +271,7 @@ class ProcessLauncher(object):
|
||||
return wrap
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary"""
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
LOG.debug(_('Full set of CONF:'))
|
||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
""" Callback function to be passed to GreenThread.link() when we spawn()
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
@ -34,7 +34,7 @@ def _thread_done(gt, *args, **kwargs):
|
||||
|
||||
|
||||
class Thread(object):
|
||||
""" Wrapper around a greenthread, that holds a reference to the
|
||||
"""Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
@ -50,7 +50,7 @@ class Thread(object):
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
""" The point of the ThreadGroup classis to:
|
||||
"""The point of the ThreadGroup classis to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
@ -61,9 +61,16 @@ class ThreadGroup(object):
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
self.timers.append(timer)
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
@ -23,6 +23,7 @@ import calendar
|
||||
import datetime
|
||||
|
||||
import iso8601
|
||||
import six
|
||||
|
||||
|
||||
# ISO 8601 extended time format with microseconds
|
||||
@ -32,7 +33,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format"""
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
@ -44,7 +45,7 @@ def isotime(at=None, subsecond=False):
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
"""Parse time from ISO 8601 format"""
|
||||
"""Parse time from ISO 8601 format."""
|
||||
try:
|
||||
return iso8601.parse_date(timestr)
|
||||
except iso8601.ParseError as e:
|
||||
@ -66,7 +67,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||
|
||||
|
||||
def normalize_time(timestamp):
|
||||
"""Normalize time in arbitrary timezone to UTC naive object"""
|
||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||
offset = timestamp.utcoffset()
|
||||
if offset is None:
|
||||
return timestamp
|
||||
@ -75,14 +76,14 @@ def normalize_time(timestamp):
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, basestring):
|
||||
if isinstance(before, six.string_types):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, basestring):
|
||||
if isinstance(after, six.string_types):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
@ -103,7 +104,7 @@ def utcnow():
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp"""
|
||||
"""Returns a iso8601 formated date from timestamp."""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
@ -111,9 +112,9 @@ utcnow.override_time = None
|
||||
|
||||
|
||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
||||
"""
|
||||
Override utils.utcnow to return a constant time or a list thereof,
|
||||
one at a time.
|
||||
"""Overrides utils.utcnow.
|
||||
|
||||
Make it return a constant time or a list thereof, one at a time.
|
||||
"""
|
||||
utcnow.override_time = override_time
|
||||
|
||||
@ -141,7 +142,8 @@ def clear_time_override():
|
||||
def marshall_now(now=None):
|
||||
"""Make an rpc-safe datetime with microseconds.
|
||||
|
||||
Note: tzinfo is stripped, but not required for relative times."""
|
||||
Note: tzinfo is stripped, but not required for relative times.
|
||||
"""
|
||||
if not now:
|
||||
now = utcnow()
|
||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||
@ -161,7 +163,8 @@ def unmarshall_time(tyme):
|
||||
|
||||
|
||||
def delta_seconds(before, after):
|
||||
"""
|
||||
"""Return the difference between two timing objects.
|
||||
|
||||
Compute the difference in seconds between two date, time, or
|
||||
datetime objects (as a float, to microsecond resolution).
|
||||
"""
|
||||
@ -174,8 +177,7 @@ def delta_seconds(before, after):
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""
|
||||
Determines if time is going to happen in the next window seconds.
|
||||
"""Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
@ -1,4 +1,6 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -13,17 +15,25 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
UUID related utilities and helper functions.
|
||||
"""
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
from climate.openstack.common import log as logging
|
||||
from climate.openstack.common.notifier import rpc_notifier
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
import uuid
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
|
||||
def generate_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
LOG.deprecated(_("The rabbit_notifier is now deprecated."
|
||||
" Please use rpc_notifier instead."))
|
||||
rpc_notifier.notify(context, message)
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
@ -1,3 +1,3 @@
|
||||
[DEFAULT]
|
||||
modules=rpc,service,setup,gettextutils,importutils,local,eventlet_backdoor,log,jsonutils,timeutils,notifier,threadgroup,loopingcall,network_utils,excutils
|
||||
modules=rpc,service,setup,gettextutils,importutils,local,eventlet_backdoor,log,jsonutils,policy,timeutils,notifier,threadgroup,loopingcall,network_utils,excutils
|
||||
base=climate
|
||||
|
Loading…
Reference in New Issue
Block a user