Refreshed Oslo Code
Change to auth_tok to auth_token Added dependency on oslo.config package Remove reddwarf.openstack.common.cfg Change-Id: I61af7a54d09f3d1d6b6efe470ea6308be8ace2e7 Fixes: bug #1175757
This commit is contained in:
parent
08a57d7035
commit
770c0fd83b
@ -39,7 +39,7 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
|
||||
from reddwarf.common import cfg
|
||||
from reddwarf.common import rpc
|
||||
from reddwarf.guestagent import dbaas
|
||||
from reddwarf.openstack.common import cfg as openstack_cfg
|
||||
from oslo.config import cfg as openstack_cfg
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import service
|
||||
from reddwarf.db import get_db_api
|
||||
@ -62,4 +62,4 @@ if __name__ == '__main__':
|
||||
except RuntimeError as error:
|
||||
import traceback
|
||||
print traceback.format_exc()
|
||||
sys.exit("ERROR: %s" % error)
|
||||
sys.exit("ERROR: %s" % error)
|
||||
|
@ -38,7 +38,7 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
|
||||
|
||||
from reddwarf.common import cfg
|
||||
from reddwarf.common import rpc
|
||||
from reddwarf.openstack.common import cfg as openstack_cfg
|
||||
from oslo.config import cfg as openstack_cfg
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import service
|
||||
from reddwarf.db import get_db_api
|
||||
|
@ -34,8 +34,7 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from reddwarf.common import cfg
|
||||
from reddwarf.openstack.common import cfg as openstack_cfg
|
||||
from reddwarf.openstack.common import service
|
||||
from oslo.config import cfg as openstack_cfg
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.common import wsgi
|
||||
from reddwarf.db import get_db_api
|
||||
|
@ -38,7 +38,7 @@ if os.path.exists(os.path.join(possible_topdir, 'reddwarf', '__init__.py')):
|
||||
|
||||
from reddwarf.common import cfg
|
||||
from reddwarf.common import rpc
|
||||
from reddwarf.openstack.common import cfg as openstack_cfg
|
||||
from oslo.config import cfg as openstack_cfg
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import service
|
||||
from reddwarf.db import get_db_api
|
||||
|
@ -2,6 +2,5 @@
|
||||
|
||||
# The list of modules to copy from openstack-common
|
||||
modules=middleware,notifier,rpc,cfg,context,eventlet_backdoor,exception,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,loopingcall,network_utils,pastedeploy,periodic_task,policy,processutils,service,setup,testutils,threadgroup,timeutils,utils,uuidutils,version,wsgi
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=reddwarf
|
||||
|
@ -16,7 +16,7 @@
|
||||
# under the License.
|
||||
"""Routines for configuring Reddwarf."""
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
common_opts = [
|
||||
cfg.StrOpt('sql_connection',
|
||||
|
@ -49,8 +49,7 @@ class ReddwarfContext(context.RequestContext):
|
||||
def to_dict(self):
|
||||
parent_dict = super(ReddwarfContext, self).to_dict()
|
||||
parent_dict.update({'limit': self.limit,
|
||||
'marker': self.marker,
|
||||
'auth_tok': parent_dict['auth_token'],
|
||||
'marker': self.marker
|
||||
})
|
||||
return parent_dict
|
||||
|
||||
|
@ -37,9 +37,9 @@ def create_guest_client(context, id):
|
||||
|
||||
|
||||
def create_nova_client(context):
|
||||
client = Client(context.user, context.auth_tok, project_id=context.tenant,
|
||||
auth_url=PROXY_AUTH_URL)
|
||||
client.client.auth_token = context.auth_tok
|
||||
client = Client(context.user, context.auth_token,
|
||||
project_id=context.tenant, auth_url=PROXY_AUTH_URL)
|
||||
client.client.auth_token = context.auth_token
|
||||
client.client.management_url = "%s/%s/" % (COMPUTE_URL, context.tenant)
|
||||
|
||||
return client
|
||||
@ -48,9 +48,9 @@ def create_nova_client(context):
|
||||
def create_nova_volume_client(context):
|
||||
# Quite annoying but due to a paste config loading bug.
|
||||
# TODO(hub-cap): talk to the openstack-common people about this
|
||||
client = Client(context.user, context.auth_tok,
|
||||
client = Client(context.user, context.auth_token,
|
||||
project_id=context.tenant, auth_url=PROXY_AUTH_URL)
|
||||
client.client.auth_token = context.auth_tok
|
||||
client.client.auth_token = context.auth_token
|
||||
client.client.management_url = "%s/%s/" % (VOLUME_URL, context.tenant)
|
||||
|
||||
return client
|
||||
|
@ -307,7 +307,9 @@ class Resource(openstack_wsgi.Resource):
|
||||
|
||||
except exception.ReddwarfError as reddwarf_error:
|
||||
LOG.debug(traceback.format_exc())
|
||||
LOG.debug("Caught Reddwarf Error %s", reddwarf_error)
|
||||
httpError = self._get_http_error(reddwarf_error)
|
||||
LOG.debug("Mapped Error to %s", httpError)
|
||||
return Fault(httpError(str(reddwarf_error), request=request))
|
||||
except webob.exc.HTTPError as http_error:
|
||||
LOG.debug(traceback.format_exc())
|
||||
@ -600,7 +602,7 @@ class ContextMiddleware(openstack_wsgi.Middleware):
|
||||
|
||||
def process_request(self, request):
|
||||
tenant_id = request.headers.get('X-Tenant-Id', None)
|
||||
auth_tok = request.headers["X-Auth-Token"]
|
||||
auth_token = request.headers["X-Auth-Token"]
|
||||
user = request.headers.get('X-User', None)
|
||||
roles = request.headers.get('X-Role', '').split(',')
|
||||
is_admin = False
|
||||
@ -609,7 +611,7 @@ class ContextMiddleware(openstack_wsgi.Middleware):
|
||||
is_admin = True
|
||||
break
|
||||
limits = self._extract_limits(request.params)
|
||||
context = rd_context.ReddwarfContext(auth_tok=auth_tok,
|
||||
context = rd_context.ReddwarfContext(auth_token=auth_token,
|
||||
tenant=tenant_id,
|
||||
user=user,
|
||||
is_admin=is_admin,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -23,11 +23,12 @@ context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
from reddwarf.openstack.common import uuidutils
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-' + str(uuid.uuid4())
|
||||
return 'req-%s' % uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
@ -37,9 +38,9 @@ class RequestContext(object):
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
|
||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None):
|
||||
self.auth_tok = auth_tok
|
||||
self.auth_token = auth_token
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.is_admin = is_admin
|
||||
@ -55,7 +56,7 @@ class RequestContext(object):
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_tok,
|
||||
'auth_token': self.auth_token,
|
||||
'request_id': self.request_id}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Openstack, LLC.
|
||||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
@ -24,8 +24,7 @@ import traceback
|
||||
import eventlet
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.IntOpt('backdoor_port',
|
||||
@ -52,12 +51,20 @@ def _print_greenthreads():
|
||||
print
|
||||
|
||||
|
||||
def _print_nativethreads():
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
print threadId
|
||||
traceback.print_stack(stack)
|
||||
print
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
'pnt': _print_nativethreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -24,10 +24,27 @@ Usual usage in an openstack.common module:
|
||||
"""
|
||||
|
||||
import gettext
|
||||
import os
|
||||
|
||||
|
||||
t = gettext.translation('openstack-common', 'locale', fallback=True)
|
||||
_localedir = os.environ.get('reddwarf'.upper() + '_LOCALEDIR')
|
||||
_t = gettext.translation('reddwarf', localedir=_localedir, fallback=True)
|
||||
|
||||
|
||||
def _(msg):
|
||||
return t.ugettext(msg)
|
||||
return _t.ugettext(msg)
|
||||
|
||||
|
||||
def install(domain):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
"""
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
|
||||
unicode=True)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -57,3 +57,11 @@ def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
||||
|
||||
|
||||
def try_import(import_str, default=None):
|
||||
"""Try to import a module and if it fails return default."""
|
||||
try:
|
||||
return import_module(import_str)
|
||||
except ImportError:
|
||||
return default
|
||||
|
@ -34,15 +34,27 @@ This module provides a few things:
|
||||
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
import types
|
||||
import xmlrpclib
|
||||
|
||||
from reddwarf.openstack.common import timeutils
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, level=0):
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
@ -56,17 +68,30 @@ def to_primitive(value, convert_instances=False, level=0):
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
for test in nasty:
|
||||
if test(value):
|
||||
return unicode(value)
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by inspects
|
||||
# above and results in infinite loop when list(value) is called.
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return unicode(value)
|
||||
|
||||
@ -78,47 +103,41 @@ def to_primitive(value, convert_instances=False, level=0):
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > 3:
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if isinstance(value, (list, tuple)):
|
||||
o = []
|
||||
for v in value:
|
||||
o.append(to_primitive(v, convert_instances=convert_instances,
|
||||
level=level))
|
||||
return o
|
||||
elif isinstance(value, dict):
|
||||
o = {}
|
||||
for k, v in value.iteritems():
|
||||
o[k] = to_primitive(v, convert_instances=convert_instances,
|
||||
level=level)
|
||||
return o
|
||||
elif isinstance(value, datetime.datetime):
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return to_primitive(dict(value.iteritems()),
|
||||
convert_instances=convert_instances,
|
||||
level=level + 1)
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return to_primitive(list(value),
|
||||
convert_instances=convert_instances,
|
||||
level=level)
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return to_primitive(value.__dict__,
|
||||
convert_instances=convert_instances,
|
||||
level=level + 1)
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return unicode(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -26,6 +26,9 @@ class WeakLocal(corolocal.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = corolocal.local.__getattribute__(self, attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
@ -34,4 +37,12 @@ class WeakLocal(corolocal.local):
|
||||
return corolocal.local.__setattr__(self, attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = corolocal.local
|
||||
|
@ -26,7 +26,7 @@ import weakref
|
||||
|
||||
from eventlet import semaphore
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
from reddwarf.openstack.common import fileutils
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
@ -29,6 +29,7 @@ It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import cStringIO
|
||||
import inspect
|
||||
import itertools
|
||||
@ -40,28 +41,91 @@ import stat
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import jsonutils
|
||||
from reddwarf.openstack.common import local
|
||||
from reddwarf.openstack.common import notifier
|
||||
|
||||
|
||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config',
|
||||
metavar='PATH',
|
||||
help='If this option is specified, the logging configuration '
|
||||
'file specified is used and overrides any other logging '
|
||||
'options specified. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=_DEFAULT_LOG_FORMAT,
|
||||
metavar='FORMAT',
|
||||
help='A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If no default is set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The base directory used for relative '
|
||||
'--log-file paths'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
|
||||
'%(user)s %(tenant)s] %(instance)s'
|
||||
'%(message)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
|
||||
' %(instance)s%(message)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=[
|
||||
@ -93,24 +157,9 @@ log_opts = [
|
||||
'format it like this'),
|
||||
]
|
||||
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.StrOpt('logdir',
|
||||
default=None,
|
||||
help='Log output to a per-service log file in named directory'),
|
||||
cfg.StrOpt('logfile',
|
||||
default=None,
|
||||
help='Log output to a named file'),
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error'),
|
||||
cfg.StrOpt('logfile_mode',
|
||||
default='0644',
|
||||
help='Default file mode used when creating log files'),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
@ -148,8 +197,8 @@ def _get_binary_name():
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file or CONF.logfile
|
||||
logdir = CONF.log_dir or CONF.logdir
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
@ -258,7 +307,7 @@ class JSONFormatter(logging.Formatter):
|
||||
class PublishErrorsHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
if ('reddwarf.openstack.common.notifier.log_notifier' in
|
||||
CONF.notification_driver):
|
||||
CONF.notification_driver):
|
||||
return
|
||||
notifier.api.notify(None, 'error.publisher',
|
||||
'error_notification',
|
||||
@ -275,18 +324,33 @@ def _create_logging_excepthook(product_name):
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config):
|
||||
try:
|
||||
logging.config.fileConfig(log_config)
|
||||
except ConfigParser.Error as exc:
|
||||
raise LogConfigError(log_config, str(exc))
|
||||
|
||||
|
||||
def setup(product_name):
|
||||
"""Setup logging."""
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
if CONF.log_config:
|
||||
try:
|
||||
logging.config.fileConfig(CONF.log_config)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
_load_log_config(CONF.log_config)
|
||||
else:
|
||||
_setup_logging_from_conf(product_name)
|
||||
_setup_logging_from_conf()
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
|
||||
def set_defaults(logging_context_format_string):
|
||||
@ -319,8 +383,8 @@ def _find_facility_from_conf():
|
||||
return facility
|
||||
|
||||
|
||||
def _setup_logging_from_conf(product_name):
|
||||
log_root = getLogger(product_name).logger
|
||||
def _setup_logging_from_conf():
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
@ -358,21 +422,21 @@ def _setup_logging_from_conf(product_name):
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
else:
|
||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
||||
|
||||
if CONF.verbose or CONF.debug:
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
else:
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
level = logging.NOTSET
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
level = logging.getLevelName(level_name)
|
||||
logger = logging.getLogger(mod)
|
||||
logger.setLevel(level)
|
||||
for handler in log_root.handlers:
|
||||
logger.addHandler(handler)
|
||||
|
||||
_loggers = {}
|
||||
|
||||
@ -424,7 +488,7 @@ class LegacyFormatter(logging.Formatter):
|
||||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formated copy
|
||||
|
@ -46,12 +46,23 @@ class LoopingCallDone(Exception):
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCall(object):
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
@ -73,11 +84,11 @@ class LoopingCall(object):
|
||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||
-delay)
|
||||
greenthread.sleep(delay if delay > 0 else 0)
|
||||
except LoopingCallDone, e:
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in looping call'))
|
||||
LOG.exception(_('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
@ -88,8 +99,49 @@ class LoopingCall(object):
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||
# in the I release
|
||||
LoopingCall = FixedIntervalLoopingCall
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||
'seconds'), idle)
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 OpenStack LLC.
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -19,7 +19,8 @@
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -15,7 +15,8 @@
|
||||
|
||||
import uuid
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import context
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
@ -29,7 +30,6 @@ LOG = logging.getLogger(__name__)
|
||||
notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
deprecated_name='list_notifier_drivers',
|
||||
help='Driver or drivers to handle sending notifications'),
|
||||
cfg.StrOpt('default_notification_level',
|
||||
default='INFO',
|
||||
|
@ -13,8 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common import jsonutils
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
|
@ -13,8 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common import context as req_context
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
52
reddwarf/openstack/common/notifier/rpc_notifier2.py
Normal file
52
reddwarf/openstack/common/notifier/rpc_notifier2.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''messaging based notification driver, with message envelopes'''
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import context as req_context
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
notification_topic_opt = cfg.ListOpt(
|
||||
'topics', default=['notifications', ],
|
||||
help='AMQP topic(s) used for openstack notifications')
|
||||
|
||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||
title='Options for rpc_notifier2')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opt(notification_topic_opt, opt_group)
|
||||
|
||||
|
||||
def notify(context, message):
|
||||
"""Sends a notification via RPC"""
|
||||
if not context:
|
||||
context = req_context.get_admin_context()
|
||||
priority = message.get('priority',
|
||||
CONF.default_notification_level)
|
||||
priority = priority.lower()
|
||||
for topic in CONF.rpc_notifier2.topics:
|
||||
topic = '%s.%s' % (topic, priority)
|
||||
try:
|
||||
rpc.notify(context, topic, message, envelope=True)
|
||||
except Exception:
|
||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||
"Payload=%(message)s"), locals())
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -19,7 +19,6 @@
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import shlex
|
||||
|
||||
@ -27,6 +26,7 @@ from eventlet.green import subprocess
|
||||
from eventlet import greenthread
|
||||
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -25,8 +25,17 @@ For some wrappers that add message versioning to rpc, see:
|
||||
rpc.proxy
|
||||
"""
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
import inspect
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import local
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
rpc_opts = [
|
||||
@ -57,17 +66,18 @@ rpc_opts = [
|
||||
cfg.BoolOpt('fake_rabbit',
|
||||
default=False,
|
||||
help='If passed, use a fake RabbitMQ provider'),
|
||||
#
|
||||
# The following options are not registered here, but are expected to be
|
||||
# present. The project using this library must register these options with
|
||||
# the configuration so that project-specific defaults may be defined.
|
||||
#
|
||||
#cfg.StrOpt('control_exchange',
|
||||
# default='nova',
|
||||
# help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='openstack',
|
||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(rpc_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rpc_opts)
|
||||
|
||||
|
||||
def set_defaults(control_exchange):
|
||||
cfg.set_defaults(rpc_opts,
|
||||
control_exchange=control_exchange)
|
||||
|
||||
|
||||
def create_connection(new=True):
|
||||
@ -83,10 +93,27 @@ def create_connection(new=True):
|
||||
|
||||
:returns: An instance of openstack.common.rpc.common.Connection
|
||||
"""
|
||||
return _get_impl().create_connection(cfg.CONF, new=new)
|
||||
return _get_impl().create_connection(CONF, new=new)
|
||||
|
||||
|
||||
def call(context, topic, msg, timeout=None):
|
||||
def _check_for_lock():
|
||||
if not CONF.debug:
|
||||
return None
|
||||
|
||||
if ((hasattr(local.strong_store, 'locks_held')
|
||||
and local.strong_store.locks_held)):
|
||||
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
|
||||
LOG.warn(_('A RPC is being made while holding a lock. The locks '
|
||||
'currently held are %(locks)s. This is probably a bug. '
|
||||
'Please report it. Include the following: [%(stack)s].'),
|
||||
{'locks': local.strong_store.locks_held,
|
||||
'stack': stack})
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def call(context, topic, msg, timeout=None, check_for_lock=False):
|
||||
"""Invoke a remote method that returns something.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
@ -100,13 +127,17 @@ def call(context, topic, msg, timeout=None):
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
||||
with a lock held.
|
||||
|
||||
:returns: A dict from the remote method.
|
||||
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
|
||||
if check_for_lock:
|
||||
_check_for_lock()
|
||||
return _get_impl().call(CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def cast(context, topic, msg):
|
||||
@ -124,7 +155,7 @@ def cast(context, topic, msg):
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast(cfg.CONF, context, topic, msg)
|
||||
return _get_impl().cast(CONF, context, topic, msg)
|
||||
|
||||
|
||||
def fanout_cast(context, topic, msg):
|
||||
@ -145,10 +176,10 @@ def fanout_cast(context, topic, msg):
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
|
||||
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
||||
|
||||
|
||||
def multicall(context, topic, msg, timeout=None):
|
||||
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
|
||||
"""Invoke a remote method and get back an iterator.
|
||||
|
||||
In this case, the remote method will be returning multiple values in
|
||||
@ -166,6 +197,8 @@ def multicall(context, topic, msg, timeout=None):
|
||||
"args" : dict_of_kwargs }
|
||||
:param timeout: int, number of seconds to use for a response timeout.
|
||||
If set, this overrides the rpc_response_timeout option.
|
||||
:param check_for_lock: if True, a warning is emitted if a RPC call is made
|
||||
with a lock held.
|
||||
|
||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
||||
an index that starts at 0 and increases by one for each value
|
||||
@ -175,20 +208,23 @@ def multicall(context, topic, msg, timeout=None):
|
||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
||||
is not received before the timeout is reached.
|
||||
"""
|
||||
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
|
||||
if check_for_lock:
|
||||
_check_for_lock()
|
||||
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
||||
|
||||
|
||||
def notify(context, topic, msg):
|
||||
def notify(context, topic, msg, envelope=False):
|
||||
"""Send notification event.
|
||||
|
||||
:param context: Information that identifies the user that has made this
|
||||
request.
|
||||
:param topic: The topic to send the notification to.
|
||||
:param msg: This is a dict of content of event.
|
||||
:param envelope: Set to True to enable message envelope for notifications.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().notify(cfg.CONF, context, topic, msg)
|
||||
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
@ -216,7 +252,7 @@ def cast_to_server(context, server_params, topic, msg):
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
|
||||
return _get_impl().cast_to_server(CONF, context, server_params, topic,
|
||||
msg)
|
||||
|
||||
|
||||
@ -232,7 +268,7 @@ def fanout_cast_to_server(context, server_params, topic, msg):
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
|
||||
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
|
||||
topic, msg)
|
||||
|
||||
|
||||
@ -262,10 +298,10 @@ def _get_impl():
|
||||
global _RPCIMPL
|
||||
if _RPCIMPL is None:
|
||||
try:
|
||||
_RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
|
||||
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
|
||||
except ImportError:
|
||||
# For backwards compatibility with older nova config.
|
||||
impl = cfg.CONF.rpc_backend.replace('nova.rpc',
|
||||
'nova.openstack.common.rpc')
|
||||
impl = CONF.rpc_backend.replace('nova.rpc',
|
||||
'nova.openstack.common.rpc')
|
||||
_RPCIMPL = importutils.import_module(impl)
|
||||
return _RPCIMPL
|
||||
|
@ -25,15 +25,20 @@ Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
||||
AMQP, but is deprecated and predates this code.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import inspect
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from eventlet import queue
|
||||
from eventlet import semaphore
|
||||
# TODO(pekowsk): Remove import cfg and below comment in Havana.
|
||||
# This import should no longer be needed when the amqp_rpc_single_reply_queue
|
||||
# option is removed.
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common import excutils
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import local
|
||||
@ -41,6 +46,17 @@ from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common.rpc import common as rpc_common
|
||||
|
||||
|
||||
# TODO(pekowski): Remove this option in Havana.
|
||||
amqp_opts = [
|
||||
cfg.BoolOpt('amqp_rpc_single_reply_queue',
|
||||
default=False,
|
||||
help='Enable a fast single reply queue if using AMQP based '
|
||||
'RPC like RabbitMQ or Qpid.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(amqp_opts)
|
||||
|
||||
UNIQUE_ID = '_unique_id'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -52,6 +68,7 @@ class Pool(pools.Pool):
|
||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
||||
kwargs.setdefault("order_as_stack", True)
|
||||
super(Pool, self).__init__(*args, **kwargs)
|
||||
self.reply_proxy = None
|
||||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
@ -61,6 +78,16 @@ class Pool(pools.Pool):
|
||||
def empty(self):
|
||||
while self.free_items:
|
||||
self.get().close()
|
||||
# Force a new connection pool to be created.
|
||||
# Note that this was added due to failing unit test cases. The issue
|
||||
# is the above "while loop" gets all the cached connections from the
|
||||
# pool and closes them, but never returns them to the pool, a pool
|
||||
# leak. The unit tests hang waiting for an item to be returned to the
|
||||
# pool. The unit tests get here via the teatDown() method. In the run
|
||||
# time code, it gets here via cleanup() and only appears in service.py
|
||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
||||
# the leakage is not a problem.
|
||||
self.connection_cls.pool = None
|
||||
|
||||
|
||||
_pool_create_sem = semaphore.Semaphore()
|
||||
@ -138,6 +165,12 @@ class ConnectionContext(rpc_common.Connection):
|
||||
def create_worker(self, topic, proxy, pool_name):
|
||||
self.connection.create_worker(topic, proxy, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||
self.connection.join_consumer_pool(callback,
|
||||
pool_name,
|
||||
topic,
|
||||
exchange_name)
|
||||
|
||||
def consume_in_thread(self):
|
||||
self.connection.consume_in_thread()
|
||||
|
||||
@ -149,8 +182,45 @@ class ConnectionContext(rpc_common.Connection):
|
||||
raise rpc_common.InvalidRPCConnectionReuse()
|
||||
|
||||
|
||||
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
|
||||
ending=False, log_failure=True):
|
||||
class ReplyProxy(ConnectionContext):
|
||||
""" Connection class for RPC replies / callbacks """
|
||||
def __init__(self, conf, connection_pool):
|
||||
self._call_waiters = {}
|
||||
self._num_call_waiters = 0
|
||||
self._num_call_waiters_wrn_threshhold = 10
|
||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
||||
self.consume_in_thread()
|
||||
|
||||
def _process_data(self, message_data):
|
||||
msg_id = message_data.pop('_msg_id', None)
|
||||
waiter = self._call_waiters.get(msg_id)
|
||||
if not waiter:
|
||||
LOG.warn(_('no calling threads waiting for msg_id : %s'
|
||||
', message : %s') % (msg_id, message_data))
|
||||
else:
|
||||
waiter.put(message_data)
|
||||
|
||||
def add_call_waiter(self, waiter, msg_id):
|
||||
self._num_call_waiters += 1
|
||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
|
||||
LOG.warn(_('Number of call waiters is greater than warning '
|
||||
'threshhold: %d. There could be a MulticallProxyWaiter '
|
||||
'leak.') % self._num_call_waiters_wrn_threshhold)
|
||||
self._num_call_waiters_wrn_threshhold *= 2
|
||||
self._call_waiters[msg_id] = waiter
|
||||
|
||||
def del_call_waiter(self, msg_id):
|
||||
self._num_call_waiters -= 1
|
||||
del self._call_waiters[msg_id]
|
||||
|
||||
def get_reply_q(self):
|
||||
return self._reply_q
|
||||
|
||||
|
||||
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
||||
failure=None, ending=False, log_failure=True):
|
||||
"""Sends a reply or an error on the channel signified by msg_id.
|
||||
|
||||
Failure should be a sys.exc_info() tuple.
|
||||
@ -169,13 +239,22 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
|
||||
'failure': failure}
|
||||
if ending:
|
||||
msg['ending'] = True
|
||||
conn.direct_send(msg_id, msg)
|
||||
_add_unique_id(msg)
|
||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
||||
# reply_q to direct_send() to use it as the response queue.
|
||||
# Otherwise use the msg_id for backward compatibilty.
|
||||
if reply_q:
|
||||
msg['_msg_id'] = msg_id
|
||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
||||
else:
|
||||
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
class RpcContext(rpc_common.CommonRpcContext):
|
||||
"""Context that supports replying to a rpc.call"""
|
||||
def __init__(self, **kwargs):
|
||||
self.msg_id = kwargs.pop('msg_id', None)
|
||||
self.reply_q = kwargs.pop('reply_q', None)
|
||||
self.conf = kwargs.pop('conf')
|
||||
super(RpcContext, self).__init__(**kwargs)
|
||||
|
||||
@ -183,13 +262,14 @@ class RpcContext(rpc_common.CommonRpcContext):
|
||||
values = self.to_dict()
|
||||
values['conf'] = self.conf
|
||||
values['msg_id'] = self.msg_id
|
||||
values['reply_q'] = self.reply_q
|
||||
return self.__class__(**values)
|
||||
|
||||
def reply(self, reply=None, failure=None, ending=False,
|
||||
connection_pool=None, log_failure=True):
|
||||
if self.msg_id:
|
||||
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
|
||||
ending, log_failure)
|
||||
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
|
||||
reply, failure, ending, log_failure)
|
||||
if ending:
|
||||
self.msg_id = None
|
||||
|
||||
@ -205,6 +285,7 @@ def unpack_context(conf, msg):
|
||||
value = msg.pop(key)
|
||||
context_dict[key[9:]] = value
|
||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||
context_dict['conf'] = conf
|
||||
ctx = RpcContext.from_dict(context_dict)
|
||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
||||
@ -225,15 +306,86 @@ def pack_context(msg, context):
|
||||
msg.update(context_d)
|
||||
|
||||
|
||||
class ProxyCallback(object):
|
||||
"""Calls methods on a proxy object based on method and args."""
|
||||
class _MsgIdCache(object):
|
||||
"""This class checks any duplicate messages."""
|
||||
|
||||
def __init__(self, conf, proxy, connection_pool):
|
||||
self.proxy = proxy
|
||||
# NOTE: This value is considered can be a configuration item, but
|
||||
# it is not necessary to change its value in most cases,
|
||||
# so let this value as static for now.
|
||||
DUP_MSG_CHECK_SIZE = 16
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.prev_msgids = collections.deque([],
|
||||
maxlen=self.DUP_MSG_CHECK_SIZE)
|
||||
|
||||
def check_duplicate_message(self, message_data):
|
||||
"""AMQP consumers may read same message twice when exceptions occur
|
||||
before ack is returned. This method prevents doing it.
|
||||
"""
|
||||
if UNIQUE_ID in message_data:
|
||||
msg_id = message_data[UNIQUE_ID]
|
||||
if msg_id not in self.prev_msgids:
|
||||
self.prev_msgids.append(msg_id)
|
||||
else:
|
||||
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
|
||||
|
||||
|
||||
def _add_unique_id(msg):
|
||||
"""Add unique_id for checking duplicate messages."""
|
||||
unique_id = uuid.uuid4().hex
|
||||
msg.update({UNIQUE_ID: unique_id})
|
||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
||||
|
||||
|
||||
class _ThreadPoolWithWait(object):
|
||||
"""Base class for a delayed invocation manager used by
|
||||
the Connection class to start up green threads
|
||||
to handle incoming messages.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, connection_pool):
|
||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||
self.connection_pool = connection_pool
|
||||
self.conf = conf
|
||||
|
||||
def wait(self):
|
||||
"""Wait for all callback threads to exit."""
|
||||
self.pool.waitall()
|
||||
|
||||
|
||||
class CallbackWrapper(_ThreadPoolWithWait):
|
||||
"""Wraps a straight callback to allow it to be invoked in a green
|
||||
thread.
|
||||
"""
|
||||
|
||||
def __init__(self, conf, callback, connection_pool):
|
||||
"""
|
||||
:param conf: cfg.CONF instance
|
||||
:param callback: a callable (probably a function)
|
||||
:param connection_pool: connection pool as returned by
|
||||
get_connection_pool()
|
||||
"""
|
||||
super(CallbackWrapper, self).__init__(
|
||||
conf=conf,
|
||||
connection_pool=connection_pool,
|
||||
)
|
||||
self.callback = callback
|
||||
|
||||
def __call__(self, message_data):
|
||||
self.pool.spawn_n(self.callback, message_data)
|
||||
|
||||
|
||||
class ProxyCallback(_ThreadPoolWithWait):
|
||||
"""Calls methods on a proxy object based on method and args."""
|
||||
|
||||
def __init__(self, conf, proxy, connection_pool):
|
||||
super(ProxyCallback, self).__init__(
|
||||
conf=conf,
|
||||
connection_pool=connection_pool,
|
||||
)
|
||||
self.proxy = proxy
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def __call__(self, message_data):
|
||||
"""Consumer callback to call a method on a proxy object.
|
||||
|
||||
@ -252,18 +404,21 @@ class ProxyCallback(object):
|
||||
if hasattr(local.store, 'context'):
|
||||
del local.store.context
|
||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
||||
self.msg_id_cache.check_duplicate_message(message_data)
|
||||
ctxt = unpack_context(self.conf, message_data)
|
||||
method = message_data.get('method')
|
||||
args = message_data.get('args', {})
|
||||
version = message_data.get('version', None)
|
||||
version = message_data.get('version')
|
||||
namespace = message_data.get('namespace')
|
||||
if not method:
|
||||
LOG.warn(_('no method for message: %s') % message_data)
|
||||
ctxt.reply(_('No method for message: %s') % message_data,
|
||||
connection_pool=self.connection_pool)
|
||||
return
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
|
||||
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
||||
namespace, args)
|
||||
|
||||
def _process_data(self, ctxt, version, method, args):
|
||||
def _process_data(self, ctxt, version, method, namespace, args):
|
||||
"""Process a message in a new thread.
|
||||
|
||||
If the proxy object we have has a dispatch method
|
||||
@ -274,7 +429,8 @@ class ProxyCallback(object):
|
||||
"""
|
||||
ctxt.update_store()
|
||||
try:
|
||||
rval = self.proxy.dispatch(ctxt, version, method, **args)
|
||||
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
||||
**args)
|
||||
# Check if the result was a generator
|
||||
if inspect.isgenerator(rval):
|
||||
for x in rval:
|
||||
@ -290,11 +446,73 @@ class ProxyCallback(object):
|
||||
connection_pool=self.connection_pool,
|
||||
log_failure=False)
|
||||
except Exception:
|
||||
LOG.exception(_('Exception during message handling'))
|
||||
ctxt.reply(None, sys.exc_info(),
|
||||
connection_pool=self.connection_pool)
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error(_('Exception during message handling'),
|
||||
exc_info=exc_info)
|
||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
||||
|
||||
|
||||
class MulticallProxyWaiter(object):
|
||||
def __init__(self, conf, msg_id, timeout, connection_pool):
|
||||
self._msg_id = msg_id
|
||||
self._timeout = timeout or conf.rpc_response_timeout
|
||||
self._reply_proxy = connection_pool.reply_proxy
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
self._dataqueue = queue.LightQueue()
|
||||
# Add this caller to the reply proxy's call_waiters
|
||||
self._reply_proxy.add_call_waiter(self, self._msg_id)
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def put(self, data):
|
||||
self._dataqueue.put(data)
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
return
|
||||
self._done = True
|
||||
# Remove this caller from reply proxy's call_waiters
|
||||
self._reply_proxy.del_call_waiter(self._msg_id)
|
||||
|
||||
def _process_data(self, data):
|
||||
result = None
|
||||
self.msg_id_cache.check_duplicate_message(data)
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
failure)
|
||||
elif data.get('ending', False):
|
||||
self._got_ending = True
|
||||
else:
|
||||
result = data['result']
|
||||
return result
|
||||
|
||||
def __iter__(self):
|
||||
"""Return a result until we get a reply with an 'ending" flag"""
|
||||
if self._done:
|
||||
raise StopIteration
|
||||
while True:
|
||||
try:
|
||||
data = self._dataqueue.get(timeout=self._timeout)
|
||||
result = self._process_data(data)
|
||||
except queue.Empty:
|
||||
self.done()
|
||||
raise rpc_common.Timeout()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.done()
|
||||
if self._got_ending:
|
||||
self.done()
|
||||
raise StopIteration
|
||||
if isinstance(result, Exception):
|
||||
self.done()
|
||||
raise result
|
||||
yield result
|
||||
|
||||
|
||||
#TODO(pekowski): Remove MulticallWaiter() in Havana.
|
||||
class MulticallWaiter(object):
|
||||
def __init__(self, conf, connection, timeout):
|
||||
self._connection = connection
|
||||
@ -304,6 +522,7 @@ class MulticallWaiter(object):
|
||||
self._done = False
|
||||
self._got_ending = False
|
||||
self._conf = conf
|
||||
self.msg_id_cache = _MsgIdCache()
|
||||
|
||||
def done(self):
|
||||
if self._done:
|
||||
@ -315,6 +534,7 @@ class MulticallWaiter(object):
|
||||
|
||||
def __call__(self, data):
|
||||
"""The consume() callback will call this. Store the result."""
|
||||
self.msg_id_cache.check_duplicate_message(data)
|
||||
if data['failure']:
|
||||
failure = data['failure']
|
||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
||||
@ -350,22 +570,41 @@ def create_connection(conf, new, connection_pool):
|
||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||
|
||||
|
||||
_reply_proxy_create_sem = semaphore.Semaphore()
|
||||
|
||||
|
||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||
"""Make a call that returns multiple times."""
|
||||
# TODO(pekowski): Remove all these comments in Havana.
|
||||
# For amqp_rpc_single_reply_queue = False,
|
||||
# Can't use 'with' for multicall, as it returns an iterator
|
||||
# that will continue to use the connection. When it's done,
|
||||
# connection.close() will get called which will put it back into
|
||||
# the pool
|
||||
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
||||
# For amqp_rpc_single_reply_queue = True,
|
||||
# The 'with' statement is mandatory for closing the connection
|
||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||
msg_id = uuid.uuid4().hex
|
||||
msg.update({'_msg_id': msg_id})
|
||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
|
||||
conn = ConnectionContext(conf, connection_pool)
|
||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
||||
conn.topic_send(topic, msg)
|
||||
# TODO(pekowski): Remove this flag and the code under the if clause
|
||||
# in Havana.
|
||||
if not conf.amqp_rpc_single_reply_queue:
|
||||
conn = ConnectionContext(conf, connection_pool)
|
||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||
else:
|
||||
with _reply_proxy_create_sem:
|
||||
if not connection_pool.reply_proxy:
|
||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||
return wait_msg
|
||||
|
||||
|
||||
@ -382,43 +621,50 @@ def call(conf, context, topic, msg, timeout, connection_pool):
|
||||
def cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.topic_send(topic, msg)
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
conn.fanout_send(topic, msg)
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
||||
"""Sends a message on a topic to a specific server."""
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.topic_send(topic, msg)
|
||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
||||
connection_pool):
|
||||
"""Sends a message on a fanout exchange to a specific server."""
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
||||
server_params=server_params) as conn:
|
||||
conn.fanout_send(topic, msg)
|
||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, connection_pool):
|
||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
||||
dict(event_type=msg.get('event_type'),
|
||||
topic=topic))
|
||||
_add_unique_id(msg)
|
||||
pack_context(msg, context)
|
||||
with ConnectionContext(conf, connection_pool) as conn:
|
||||
if envelope:
|
||||
msg = rpc_common.serialize_msg(msg)
|
||||
conn.notify_send(topic, msg)
|
||||
|
||||
|
||||
@ -428,7 +674,4 @@ def cleanup(connection_pool):
|
||||
|
||||
|
||||
def get_control_exchange(conf):
|
||||
try:
|
||||
return conf.control_exchange
|
||||
except cfg.NoSuchOptError:
|
||||
return 'openstack'
|
||||
return conf.control_exchange
|
||||
|
@ -21,6 +21,8 @@ import copy
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import jsonutils
|
||||
@ -28,9 +30,46 @@ from reddwarf.openstack.common import local
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
'''RPC Envelope Version.
|
||||
|
||||
This version number applies to the top level structure of messages sent out.
|
||||
It does *not* apply to the message payload, which must be versioned
|
||||
independently. For example, when using rpc APIs, a version number is applied
|
||||
for changes to the API being exposed over rpc. This version number is handled
|
||||
in the rpc proxy and dispatcher modules.
|
||||
|
||||
This version number applies to the message envelope that is used in the
|
||||
serialization done inside the rpc layer. See serialize_msg() and
|
||||
deserialize_msg().
|
||||
|
||||
The current message format (version 2.0) is very simple. It is:
|
||||
|
||||
{
|
||||
'oslo.version': <RPC Envelope Version as a String>,
|
||||
'oslo.message': <Application Message Payload, JSON encoded>
|
||||
}
|
||||
|
||||
Message format version '1.0' is just considered to be the messages we sent
|
||||
without a message envelope.
|
||||
|
||||
So, the current message envelope just includes the envelope version. It may
|
||||
eventually contain additional information, such as a signature for the message
|
||||
payload.
|
||||
|
||||
We will JSON encode the application message payload. The message envelope,
|
||||
which includes the JSON encoded application message body, will be passed down
|
||||
to the messaging libraries as a dict.
|
||||
'''
|
||||
_RPC_ENVELOPE_VERSION = '2.0'
|
||||
|
||||
_VERSION_KEY = 'oslo.version'
|
||||
_MESSAGE_KEY = 'oslo.message'
|
||||
|
||||
|
||||
class RPCException(Exception):
|
||||
message = _("An unknown RPC related exception occurred.")
|
||||
|
||||
@ -79,7 +118,29 @@ class Timeout(RPCException):
|
||||
This exception is raised if the rpc_response_timeout is reached while
|
||||
waiting for a response from the remote side.
|
||||
"""
|
||||
message = _("Timeout while waiting on RPC response.")
|
||||
message = _('Timeout while waiting on RPC response - '
|
||||
'topic: "%(topic)s", RPC method: "%(method)s" '
|
||||
'info: "%(info)s"')
|
||||
|
||||
def __init__(self, info=None, topic=None, method=None):
|
||||
"""
|
||||
:param info: Extra info to convey to the user
|
||||
:param topic: The topic that the rpc call was sent to
|
||||
:param rpc_method_name: The name of the rpc method being
|
||||
called
|
||||
"""
|
||||
self.info = info
|
||||
self.topic = topic
|
||||
self.method = method
|
||||
super(Timeout, self).__init__(
|
||||
None,
|
||||
info=info or _('<unknown>'),
|
||||
topic=topic or _('<unknown>'),
|
||||
method=method or _('<unknown>'))
|
||||
|
||||
|
||||
class DuplicateMessageError(RPCException):
|
||||
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
||||
|
||||
|
||||
class InvalidRPCConnectionReuse(RPCException):
|
||||
@ -91,6 +152,11 @@ class UnsupportedRpcVersion(RPCException):
|
||||
"this endpoint.")
|
||||
|
||||
|
||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
||||
message = _("Specified RPC envelope version, %(version)s, "
|
||||
"not supported by this endpoint.")
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A connection, returned by rpc.create_connection().
|
||||
|
||||
@ -149,6 +215,28 @@ class Connection(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
|
||||
:param callback: Callable to be invoked for each message.
|
||||
:type callback: callable accepting one argument
|
||||
:param pool_name: The name of the consumer pool.
|
||||
:type pool_name: str
|
||||
:param topic: The routing topic for desired messages.
|
||||
:type topic: str
|
||||
:param exchange_name: The name of the message exchange where
|
||||
the client should attach. Defaults to
|
||||
the configured exchange.
|
||||
:type exchange_name: str
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Spawn a thread to handle incoming messages.
|
||||
|
||||
@ -165,8 +253,12 @@ class Connection(object):
|
||||
|
||||
def _safe_log(log_func, msg, msg_data):
|
||||
"""Sanitizes the msg_data field before logging."""
|
||||
SANITIZE = {'set_admin_password': ('new_pass',),
|
||||
'run_instance': ('admin_password',), }
|
||||
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
|
||||
'run_instance': [('args', 'admin_password')],
|
||||
'route_message': [('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'password'),
|
||||
('args', 'message', 'args', 'method_info',
|
||||
'method_kwargs', 'admin_password')]}
|
||||
|
||||
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
|
||||
has_context_token = '_context_auth_token' in msg_data
|
||||
@ -178,14 +270,16 @@ def _safe_log(log_func, msg, msg_data):
|
||||
msg_data = copy.deepcopy(msg_data)
|
||||
|
||||
if has_method:
|
||||
method = msg_data['method']
|
||||
if method in SANITIZE:
|
||||
args_to_sanitize = SANITIZE[method]
|
||||
for arg in args_to_sanitize:
|
||||
try:
|
||||
msg_data['args'][arg] = "<SANITIZED>"
|
||||
except KeyError:
|
||||
pass
|
||||
for arg in SANITIZE.get(msg_data['method'], []):
|
||||
try:
|
||||
d = msg_data
|
||||
for elem in arg[:-1]:
|
||||
d = d[elem]
|
||||
d[arg[-1]] = '<SANITIZED>'
|
||||
except KeyError as e:
|
||||
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
|
||||
{'item': arg,
|
||||
'err': e})
|
||||
|
||||
if has_context_token:
|
||||
msg_data['_context_auth_token'] = '<SANITIZED>'
|
||||
@ -236,7 +330,7 @@ def deserialize_remote_exception(conf, data):
|
||||
|
||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
||||
# order to prevent arbitrary code execution.
|
||||
if not module in conf.allowed_rpc_exception_modules:
|
||||
if module not in conf.allowed_rpc_exception_modules:
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
try:
|
||||
@ -245,7 +339,7 @@ def deserialize_remote_exception(conf, data):
|
||||
if not issubclass(klass, Exception):
|
||||
raise TypeError("Can only deserialize Exceptions")
|
||||
|
||||
failure = klass(**failure.get('kwargs', {}))
|
||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
@ -325,7 +419,7 @@ class ClientException(Exception):
|
||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
if type(e) in exceptions:
|
||||
raise ClientException()
|
||||
else:
|
||||
@ -344,3 +438,71 @@ def client_exceptions(*exceptions):
|
||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||
return inner
|
||||
return outer
|
||||
|
||||
|
||||
def version_is_compatible(imp_version, version):
|
||||
"""Determine whether versions are compatible.
|
||||
|
||||
:param imp_version: The version implemented
|
||||
:param version: The version requested by an incoming message.
|
||||
"""
|
||||
version_parts = version.split('.')
|
||||
imp_version_parts = imp_version.split('.')
|
||||
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
|
||||
return False
|
||||
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def serialize_msg(raw_msg):
|
||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
||||
# information about this format.
|
||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
||||
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def deserialize_msg(msg):
|
||||
# NOTE(russellb): Hang on to your hats, this road is about to
|
||||
# get a little bumpy.
|
||||
#
|
||||
# Robustness Principle:
|
||||
# "Be strict in what you send, liberal in what you accept."
|
||||
#
|
||||
# At this point we have to do a bit of guessing about what it
|
||||
# is we just received. Here is the set of possibilities:
|
||||
#
|
||||
# 1) We received a dict. This could be 2 things:
|
||||
#
|
||||
# a) Inspect it to see if it looks like a standard message envelope.
|
||||
# If so, great!
|
||||
#
|
||||
# b) If it doesn't look like a standard message envelope, it could either
|
||||
# be a notification, or a message from before we added a message
|
||||
# envelope (referred to as version 1.0).
|
||||
# Just return the message as-is.
|
||||
#
|
||||
# 2) It's any other non-dict type. Just return it and hope for the best.
|
||||
# This case covers return values from rpc.call() from before message
|
||||
# envelopes were used. (messages to call a method were always a dict)
|
||||
|
||||
if not isinstance(msg, dict):
|
||||
# See #2 above.
|
||||
return msg
|
||||
|
||||
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
|
||||
if not all(map(lambda key: key in msg, base_envelope_keys)):
|
||||
# See #1.b above.
|
||||
return msg
|
||||
|
||||
# At this point we think we have the message envelope
|
||||
# format we were expecting. (#1.a above)
|
||||
|
||||
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
|
||||
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
|
||||
|
||||
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
|
||||
|
||||
return raw_msg
|
||||
|
@ -103,28 +103,16 @@ class RpcDispatcher(object):
|
||||
self.callbacks = callbacks
|
||||
super(RpcDispatcher, self).__init__()
|
||||
|
||||
@staticmethod
|
||||
def _is_compatible(mversion, version):
|
||||
"""Determine whether versions are compatible.
|
||||
|
||||
:param mversion: The API version implemented by a callback.
|
||||
:param version: The API version requested by an incoming message.
|
||||
"""
|
||||
version_parts = version.split('.')
|
||||
mversion_parts = mversion.split('.')
|
||||
if int(version_parts[0]) != int(mversion_parts[0]): # Major
|
||||
return False
|
||||
if int(version_parts[1]) > int(mversion_parts[1]): # Minor
|
||||
return False
|
||||
return True
|
||||
|
||||
def dispatch(self, ctxt, version, method, **kwargs):
|
||||
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
||||
"""Dispatch a message based on a requested version.
|
||||
|
||||
:param ctxt: The request context
|
||||
:param version: The requested API version from the incoming message
|
||||
:param method: The method requested to be called by the incoming
|
||||
message.
|
||||
:param namespace: The namespace for the requested method. If None,
|
||||
the dispatcher will look for a method on a callback
|
||||
object with no namespace set.
|
||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||
|
||||
:returns: Whatever is returned by the underlying method that gets
|
||||
@ -135,12 +123,25 @@ class RpcDispatcher(object):
|
||||
|
||||
had_compatible = False
|
||||
for proxyobj in self.callbacks:
|
||||
if hasattr(proxyobj, 'RPC_API_VERSION'):
|
||||
# Check for namespace compatibility
|
||||
try:
|
||||
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
||||
except AttributeError:
|
||||
cb_namespace = None
|
||||
|
||||
if namespace != cb_namespace:
|
||||
continue
|
||||
|
||||
# Check for version compatibility
|
||||
try:
|
||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||
else:
|
||||
except AttributeError:
|
||||
rpc_api_version = '1.0'
|
||||
is_compatible = self._is_compatible(rpc_api_version, version)
|
||||
|
||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
||||
version)
|
||||
had_compatible = had_compatible or is_compatible
|
||||
|
||||
if not hasattr(proxyobj, method):
|
||||
continue
|
||||
if is_compatible:
|
||||
|
@ -57,13 +57,14 @@ class Consumer(object):
|
||||
self.topic = topic
|
||||
self.proxy = proxy
|
||||
|
||||
def call(self, context, version, method, args, timeout):
|
||||
def call(self, context, version, method, namespace, args, timeout):
|
||||
done = eventlet.event.Event()
|
||||
|
||||
def _inner():
|
||||
ctxt = RpcContext.from_dict(context.to_dict())
|
||||
try:
|
||||
rval = self.proxy.dispatch(context, version, method, **args)
|
||||
rval = self.proxy.dispatch(context, version, method,
|
||||
namespace, **args)
|
||||
res = []
|
||||
# Caller might have called ctxt.reply() manually
|
||||
for (reply, failure) in ctxt._response:
|
||||
@ -140,13 +141,15 @@ def multicall(conf, context, topic, msg, timeout=None):
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
try:
|
||||
consumer = CONSUMERS[topic][0]
|
||||
except (KeyError, IndexError):
|
||||
return iter([None])
|
||||
else:
|
||||
return consumer.call(context, version, method, args, timeout)
|
||||
return consumer.call(context, version, method, namespace, args,
|
||||
timeout)
|
||||
|
||||
|
||||
def call(conf, context, topic, msg, timeout=None):
|
||||
@ -167,7 +170,7 @@ def cast(conf, context, topic, msg):
|
||||
pass
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
check_serialize(msg)
|
||||
|
||||
|
||||
@ -183,9 +186,10 @@ def fanout_cast(conf, context, topic, msg):
|
||||
return
|
||||
args = msg.get('args', {})
|
||||
version = msg.get('version', None)
|
||||
namespace = msg.get('namespace', None)
|
||||
|
||||
for consumer in CONSUMERS.get(topic, []):
|
||||
try:
|
||||
consumer.call(context, version, method, args, None)
|
||||
consumer.call(context, version, method, namespace, args, None)
|
||||
except Exception:
|
||||
pass
|
||||
|
@ -28,8 +28,8 @@ import kombu
|
||||
import kombu.connection
|
||||
import kombu.entity
|
||||
import kombu.messaging
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import network_utils
|
||||
from reddwarf.openstack.common.rpc import amqp as rpc_amqp
|
||||
@ -66,7 +66,8 @@ kombu_opts = [
|
||||
help='the RabbitMQ userid'),
|
||||
cfg.StrOpt('rabbit_password',
|
||||
default='guest',
|
||||
help='the RabbitMQ password'),
|
||||
help='the RabbitMQ password',
|
||||
secret=True),
|
||||
cfg.StrOpt('rabbit_virtual_host',
|
||||
default='/',
|
||||
help='the RabbitMQ virtual host'),
|
||||
@ -162,10 +163,12 @@ class ConsumerBase(object):
|
||||
def _callback(raw_message):
|
||||
message = self.channel.message_to_python(raw_message)
|
||||
try:
|
||||
callback(message.payload)
|
||||
message.ack()
|
||||
msg = rpc_common.deserialize_msg(message.payload)
|
||||
callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
message.ack()
|
||||
|
||||
self.queue.consume(*args, callback=_callback, **options)
|
||||
|
||||
@ -173,7 +176,7 @@ class ConsumerBase(object):
|
||||
"""Cancel the consuming from the queue, if it has started"""
|
||||
try:
|
||||
self.queue.cancel(self.tag)
|
||||
except KeyError, e:
|
||||
except KeyError as e:
|
||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||
if str(e) != "u'%s'" % self.tag:
|
||||
raise
|
||||
@ -195,8 +198,9 @@ class DirectConsumer(ConsumerBase):
|
||||
"""
|
||||
# Default options
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=msg_id,
|
||||
type='direct',
|
||||
@ -269,7 +273,7 @@ class FanoutConsumer(ConsumerBase):
|
||||
options = {'durable': False,
|
||||
'queue_arguments': _get_queue_arguments(conf),
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
||||
durable=options['durable'],
|
||||
@ -301,9 +305,15 @@ class Publisher(object):
|
||||
channel=channel,
|
||||
routing_key=self.routing_key)
|
||||
|
||||
def send(self, msg):
|
||||
def send(self, msg, timeout=None):
|
||||
"""Send a message"""
|
||||
self.producer.publish(msg)
|
||||
if timeout:
|
||||
#
|
||||
# AMQP TTL is in milliseconds when set in the header.
|
||||
#
|
||||
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
|
||||
else:
|
||||
self.producer.publish(msg)
|
||||
|
||||
|
||||
class DirectPublisher(Publisher):
|
||||
@ -316,7 +326,7 @@ class DirectPublisher(Publisher):
|
||||
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
||||
type='direct', **options)
|
||||
@ -350,7 +360,7 @@ class FanoutPublisher(Publisher):
|
||||
"""
|
||||
options = {'durable': False,
|
||||
'auto_delete': True,
|
||||
'exclusive': True}
|
||||
'exclusive': False}
|
||||
options.update(kwargs)
|
||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
||||
None, type='fanout', **options)
|
||||
@ -387,6 +397,7 @@ class Connection(object):
|
||||
def __init__(self, conf, server_params=None):
|
||||
self.consumers = []
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
self.max_retries = self.conf.rabbit_max_retries
|
||||
# Try forever?
|
||||
@ -469,7 +480,7 @@ class Connection(object):
|
||||
LOG.info(_("Reconnecting to AMQP server on "
|
||||
"%(hostname)s:%(port)d") % params)
|
||||
try:
|
||||
self.connection.close()
|
||||
self.connection.release()
|
||||
except self.connection_errors:
|
||||
pass
|
||||
# Setting this in case the next statement fails, though
|
||||
@ -509,7 +520,7 @@ class Connection(object):
|
||||
return
|
||||
except (IOError, self.connection_errors) as e:
|
||||
pass
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
@ -550,10 +561,10 @@ class Connection(object):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (self.connection_errors, socket.timeout, IOError), e:
|
||||
except (self.connection_errors, socket.timeout, IOError) as e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||
# to return an error not covered by its transport
|
||||
# connection_errors in the case of a timeout waiting for
|
||||
@ -573,12 +584,14 @@ class Connection(object):
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.release()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.channel.close()
|
||||
self.channel = self.connection.channel()
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
@ -611,8 +624,8 @@ class Connection(object):
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, socket.timeout):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
@ -644,7 +657,12 @@ class Connection(object):
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def publisher_send(self, cls, topic, msg, **kwargs):
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
def _error_callback(exc):
|
||||
@ -654,7 +672,7 @@ class Connection(object):
|
||||
|
||||
def _publish():
|
||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
||||
publisher.send(msg)
|
||||
publisher.send(msg, timeout)
|
||||
|
||||
self.ensure(_error_callback, _publish)
|
||||
|
||||
@ -682,9 +700,9 @@ class Connection(object):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
@ -692,7 +710,7 @@ class Connection(object):
|
||||
|
||||
def notify_send(self, topic, msg, **kwargs):
|
||||
"""Send a notify message on a topic"""
|
||||
self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
|
||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
||||
|
||||
def consume(self, limit=None):
|
||||
"""Consume from all queues/consumers"""
|
||||
@ -719,6 +737,7 @@ class Connection(object):
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
self.declare_fanout_consumer(topic, proxy_cb)
|
||||
@ -730,8 +749,33 @@ class Connection(object):
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
"""
|
||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
||||
conf=self.conf,
|
||||
callback=callback,
|
||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||
Connection),
|
||||
)
|
||||
self.proxy_callbacks.append(callback_wrapper)
|
||||
self.declare_topic_consumer(
|
||||
queue_name=pool_name,
|
||||
topic=topic,
|
||||
exchange_name=exchange_name,
|
||||
callback=callback_wrapper,
|
||||
)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
@ -782,11 +826,12 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(
|
||||
conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
|
@ -22,24 +22,26 @@ import uuid
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
import qpid.messaging
|
||||
import qpid.messaging.exceptions
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import jsonutils
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common.rpc import amqp as rpc_amqp
|
||||
from reddwarf.openstack.common.rpc import common as rpc_common
|
||||
|
||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
qpid_opts = [
|
||||
cfg.StrOpt('qpid_hostname',
|
||||
default='localhost',
|
||||
help='Qpid broker hostname'),
|
||||
cfg.StrOpt('qpid_port',
|
||||
default='5672',
|
||||
cfg.IntOpt('qpid_port',
|
||||
default=5672,
|
||||
help='Qpid broker port'),
|
||||
cfg.ListOpt('qpid_hosts',
|
||||
default=['$qpid_hostname:$qpid_port'],
|
||||
@ -49,7 +51,8 @@ qpid_opts = [
|
||||
help='Username for qpid connection'),
|
||||
cfg.StrOpt('qpid_password',
|
||||
default='',
|
||||
help='Password for qpid connection'),
|
||||
help='Password for qpid connection',
|
||||
secret=True),
|
||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
||||
default='',
|
||||
help='Space separated list of SASL mechanisms to use for auth'),
|
||||
@ -124,7 +127,8 @@ class ConsumerBase(object):
|
||||
"""Fetch the message and pass it to the callback object"""
|
||||
message = self.receiver.fetch()
|
||||
try:
|
||||
self.callback(message.content)
|
||||
msg = rpc_common.deserialize_msg(message.content)
|
||||
self.callback(msg)
|
||||
except Exception:
|
||||
LOG.exception(_("Failed to process message... skipping it."))
|
||||
finally:
|
||||
@ -274,9 +278,13 @@ class Connection(object):
|
||||
pool = None
|
||||
|
||||
def __init__(self, conf, server_params=None):
|
||||
if not qpid_messaging:
|
||||
raise ImportError("Failed to import qpid.messaging")
|
||||
|
||||
self.session = None
|
||||
self.consumers = {}
|
||||
self.consumer_thread = None
|
||||
self.proxy_callbacks = []
|
||||
self.conf = conf
|
||||
|
||||
if server_params and 'hostname' in server_params:
|
||||
@ -301,7 +309,7 @@ class Connection(object):
|
||||
|
||||
def connection_create(self, broker):
|
||||
# Create the connection - this does not open the connection
|
||||
self.connection = qpid.messaging.Connection(broker)
|
||||
self.connection = qpid_messaging.Connection(broker)
|
||||
|
||||
# Check if flags are set and if so set them for the connection
|
||||
# before we call open
|
||||
@ -312,7 +320,7 @@ class Connection(object):
|
||||
# Reconnection is done by self.reconnect()
|
||||
self.connection.reconnect = False
|
||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||
self.connection.protocol = self.conf.qpid_protocol
|
||||
self.connection.transport = self.conf.qpid_protocol
|
||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||
|
||||
def _register_consumer(self, consumer):
|
||||
@ -326,7 +334,7 @@ class Connection(object):
|
||||
if self.connection.opened():
|
||||
try:
|
||||
self.connection.close()
|
||||
except qpid.messaging.exceptions.ConnectionError:
|
||||
except qpid_exceptions.ConnectionError:
|
||||
pass
|
||||
|
||||
attempt = 0
|
||||
@ -338,7 +346,7 @@ class Connection(object):
|
||||
try:
|
||||
self.connection_create(broker)
|
||||
self.connection.open()
|
||||
except qpid.messaging.exceptions.ConnectionError, e:
|
||||
except qpid_exceptions.ConnectionError as e:
|
||||
msg_dict = dict(e=e, delay=delay)
|
||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||
"Sleeping %(delay)s seconds") % msg_dict
|
||||
@ -365,8 +373,8 @@ class Connection(object):
|
||||
while True:
|
||||
try:
|
||||
return method(*args, **kwargs)
|
||||
except (qpid.messaging.exceptions.Empty,
|
||||
qpid.messaging.exceptions.ConnectionError), e:
|
||||
except (qpid_exceptions.Empty,
|
||||
qpid_exceptions.ConnectionError), e:
|
||||
if error_callback:
|
||||
error_callback(e)
|
||||
self.reconnect()
|
||||
@ -374,12 +382,14 @@ class Connection(object):
|
||||
def close(self):
|
||||
"""Close/release this connection"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.connection.close()
|
||||
self.connection = None
|
||||
|
||||
def reset(self):
|
||||
"""Reset a connection so it can be used again"""
|
||||
self.cancel_consumer_thread()
|
||||
self.wait_on_proxy_callbacks()
|
||||
self.session.close()
|
||||
self.session = self.connection.session()
|
||||
self.consumers = {}
|
||||
@ -404,9 +414,9 @@ class Connection(object):
|
||||
"""Return an iterator that will consume from all queues/consumers"""
|
||||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid.messaging.exceptions.Empty):
|
||||
LOG.exception(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
if isinstance(exc, qpid_exceptions.Empty):
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
||||
str(exc))
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
@ -434,6 +444,11 @@ class Connection(object):
|
||||
pass
|
||||
self.consumer_thread = None
|
||||
|
||||
def wait_on_proxy_callbacks(self):
|
||||
"""Wait for all proxy callback threads to exit."""
|
||||
for proxy_cb in self.proxy_callbacks:
|
||||
proxy_cb.wait()
|
||||
|
||||
def publisher_send(self, cls, topic, msg):
|
||||
"""Send to a publisher based on the publisher class"""
|
||||
|
||||
@ -472,9 +487,20 @@ class Connection(object):
|
||||
"""Send a 'direct' message"""
|
||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||
|
||||
def topic_send(self, topic, msg):
|
||||
def topic_send(self, topic, msg, timeout=None):
|
||||
"""Send a 'topic' message"""
|
||||
self.publisher_send(TopicPublisher, topic, msg)
|
||||
#
|
||||
# We want to create a message with attributes, e.g. a TTL. We
|
||||
# don't really need to keep 'msg' in its JSON format any longer
|
||||
# so let's create an actual qpid message here and get some
|
||||
# value-add on the go.
|
||||
#
|
||||
# WARNING: Request timeout happens to be in the same units as
|
||||
# qpid's TTL (seconds). If this changes in the future, then this
|
||||
# will need to be altered accordingly.
|
||||
#
|
||||
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
|
||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
||||
|
||||
def fanout_send(self, topic, msg):
|
||||
"""Send a 'fanout' message"""
|
||||
@ -509,6 +535,7 @@ class Connection(object):
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
if fanout:
|
||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
||||
@ -524,6 +551,7 @@ class Connection(object):
|
||||
proxy_cb = rpc_amqp.ProxyCallback(
|
||||
self.conf, proxy,
|
||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||
self.proxy_callbacks.append(proxy_cb)
|
||||
|
||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
||||
name=pool_name)
|
||||
@ -532,6 +560,34 @@ class Connection(object):
|
||||
|
||||
return consumer
|
||||
|
||||
def join_consumer_pool(self, callback, pool_name, topic,
|
||||
exchange_name=None):
|
||||
"""Register as a member of a group of consumers for a given topic from
|
||||
the specified exchange.
|
||||
|
||||
Exactly one member of a given pool will receive each message.
|
||||
|
||||
A message will be delivered to multiple pools, if more than
|
||||
one is created.
|
||||
"""
|
||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
||||
conf=self.conf,
|
||||
callback=callback,
|
||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||
Connection),
|
||||
)
|
||||
self.proxy_callbacks.append(callback_wrapper)
|
||||
|
||||
consumer = TopicConsumer(conf=self.conf,
|
||||
session=self.session,
|
||||
topic=topic,
|
||||
callback=callback_wrapper,
|
||||
name=pool_name,
|
||||
exchange_name=exchange_name)
|
||||
|
||||
self._register_consumer(consumer)
|
||||
return consumer
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
"""Create a connection"""
|
||||
@ -582,10 +638,11 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg):
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""Sends a notification event on a topic."""
|
||||
return rpc_amqp.notify(conf, context, topic, msg,
|
||||
rpc_amqp.get_connection_pool(conf, Connection))
|
||||
rpc_amqp.get_connection_pool(conf, Connection),
|
||||
envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
|
@ -14,23 +14,26 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import socket
|
||||
import string
|
||||
import sys
|
||||
import types
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
from eventlet.green import zmq
|
||||
import greenlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common import excutils
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import jsonutils
|
||||
from reddwarf.openstack.common import processutils as utils
|
||||
from reddwarf.openstack.common.rpc import common as rpc_common
|
||||
|
||||
zmq = importutils.try_import('eventlet.green.zmq')
|
||||
|
||||
# for convenience, are not modified.
|
||||
pformat = pprint.pformat
|
||||
@ -61,6 +64,10 @@ zmq_opts = [
|
||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
||||
|
||||
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
|
||||
help='Maximum number of ingress messages to locally buffer '
|
||||
'per topic. Default is unlimited.'),
|
||||
|
||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
||||
help='Directory for holding IPC sockets'),
|
||||
|
||||
@ -70,9 +77,9 @@ zmq_opts = [
|
||||
]
|
||||
|
||||
|
||||
# These globals are defined in register_opts(conf),
|
||||
# a mandatory initialization call
|
||||
CONF = None
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(zmq_opts)
|
||||
|
||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
||||
matchmaker = None # memoized matchmaker object
|
||||
|
||||
@ -84,10 +91,10 @@ def _serialize(data):
|
||||
Error if a developer passes us bad data.
|
||||
"""
|
||||
try:
|
||||
return str(jsonutils.dumps(data, ensure_ascii=True))
|
||||
return jsonutils.dumps(data, ensure_ascii=True)
|
||||
except TypeError:
|
||||
LOG.error(_("JSON serialization failed."))
|
||||
raise
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("JSON serialization failed."))
|
||||
|
||||
|
||||
def _deserialize(data):
|
||||
@ -107,7 +114,7 @@ class ZmqSocket(object):
|
||||
"""
|
||||
|
||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
||||
self.sock = ZMQ_CTX.socket(zmq_type)
|
||||
self.sock = _get_ctxt().socket(zmq_type)
|
||||
self.addr = addr
|
||||
self.type = zmq_type
|
||||
self.subscriptions = []
|
||||
@ -181,11 +188,15 @@ class ZmqSocket(object):
|
||||
pass
|
||||
self.subscriptions = []
|
||||
|
||||
# Linger -1 prevents lost/dropped messages
|
||||
try:
|
||||
self.sock.close(linger=-1)
|
||||
# Default is to linger
|
||||
self.sock.close()
|
||||
except Exception:
|
||||
pass
|
||||
# While this is a bad thing to happen,
|
||||
# it would be much worse if some of the code calling this
|
||||
# were to fail. For now, lets log, and later evaluate
|
||||
# if we can safely raise here.
|
||||
LOG.error("ZeroMQ socket could not be closed.")
|
||||
self.sock = None
|
||||
|
||||
def recv(self):
|
||||
@ -202,12 +213,23 @@ class ZmqSocket(object):
|
||||
class ZmqClient(object):
|
||||
"""Client for ZMQ sockets."""
|
||||
|
||||
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
|
||||
def __init__(self, addr, socket_type=None, bind=False):
|
||||
if socket_type is None:
|
||||
socket_type = zmq.PUSH
|
||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
||||
|
||||
def cast(self, msg_id, topic, data):
|
||||
self.outq.send([str(msg_id), str(topic), str('cast'),
|
||||
_serialize(data)])
|
||||
def cast(self, msg_id, topic, data, envelope=False):
|
||||
msg_id = msg_id or 0
|
||||
|
||||
if not envelope:
|
||||
self.outq.send(map(bytes,
|
||||
(msg_id, topic, 'cast', _serialize(data))))
|
||||
return
|
||||
|
||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
||||
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
|
||||
self.outq.send(map(bytes,
|
||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
||||
|
||||
def close(self):
|
||||
self.outq.close()
|
||||
@ -250,16 +272,17 @@ class InternalContext(object):
|
||||
"""Process a curried message and cast the result to topic."""
|
||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', [])
|
||||
data.setdefault('args', {})
|
||||
|
||||
try:
|
||||
result = proxy.dispatch(
|
||||
ctx, data['version'], data['method'], **data['args'])
|
||||
ctx, data['version'], data['method'],
|
||||
data.get('namespace'), **data['args'])
|
||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||
except greenlet.GreenletExit:
|
||||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except rpc_common.ClientException, e:
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||
e._exc_info[1])
|
||||
return {'exc':
|
||||
@ -273,21 +296,26 @@ class InternalContext(object):
|
||||
def reply(self, ctx, proxy,
|
||||
msg_id=None, context=None, topic=None, msg=None):
|
||||
"""Reply to a casted call."""
|
||||
# Our real method is curried into msg['args']
|
||||
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
||||
# this may be able to be removed earlier than
|
||||
# 'I' if ConsumerBase.process were refactored.
|
||||
if type(msg) is list:
|
||||
payload = msg[-1]
|
||||
else:
|
||||
payload = msg
|
||||
|
||||
child_ctx = RpcContext.unmarshal(msg[0])
|
||||
response = ConsumerBase.normalize_reply(
|
||||
self._get_response(child_ctx, proxy, topic, msg[1]),
|
||||
self._get_response(ctx, proxy, topic, payload),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
cast(CONF, ctx, topic, {
|
||||
_multi_send(_cast, ctx, topic, {
|
||||
'method': '-process_reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'msg_id': msg_id, # Include for Folsom compat.
|
||||
'response': response
|
||||
}
|
||||
})
|
||||
}, _msg_id=msg_id)
|
||||
|
||||
|
||||
class ConsumerBase(object):
|
||||
@ -306,24 +334,25 @@ class ConsumerBase(object):
|
||||
else:
|
||||
return [result]
|
||||
|
||||
def process(self, style, target, proxy, ctx, data):
|
||||
def process(self, proxy, ctx, data):
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
|
||||
# Method starting with - are
|
||||
# processed internally. (non-valid method name)
|
||||
method = data['method']
|
||||
method = data.get('method')
|
||||
if not method:
|
||||
LOG.error(_("RPC message did not include method."))
|
||||
return
|
||||
|
||||
# Internal method
|
||||
# uses internal context for safety.
|
||||
if data['method'][0] == '-':
|
||||
# For reply / process_reply
|
||||
method = method[1:]
|
||||
if method == 'reply':
|
||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
||||
if method == '-reply':
|
||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
||||
return
|
||||
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', [])
|
||||
proxy.dispatch(ctx, data['version'],
|
||||
data['method'], **data['args'])
|
||||
data['method'], data.get('namespace'), **data['args'])
|
||||
|
||||
|
||||
class ZmqBaseReactor(ConsumerBase):
|
||||
@ -409,53 +438,126 @@ class ZmqProxy(ZmqBaseReactor):
|
||||
|
||||
def __init__(self, conf):
|
||||
super(ZmqProxy, self).__init__(conf)
|
||||
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
|
||||
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
|
||||
|
||||
self.topic_proxy = {}
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
self.topic_proxy['zmq_replies'] = \
|
||||
ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
|
||||
zmq.PUB, bind=True)
|
||||
self.sockets.append(self.topic_proxy['zmq_replies'])
|
||||
|
||||
def consume(self, sock):
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
msg_id, topic, style, in_msg = data
|
||||
topic = topic.split('.', 1)[0]
|
||||
topic = data[1]
|
||||
|
||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
||||
|
||||
# Handle zmq_replies magic
|
||||
if topic.startswith('fanout~'):
|
||||
sock_type = zmq.PUB
|
||||
topic = topic.split('.', 1)[0]
|
||||
elif topic.startswith('zmq_replies'):
|
||||
sock_type = zmq.PUB
|
||||
inside = _deserialize(in_msg)
|
||||
msg_id = inside[-1]['args']['msg_id']
|
||||
response = inside[-1]['args']['response']
|
||||
LOG.debug(_("->response->%s"), response)
|
||||
data = [str(msg_id), _serialize(response)]
|
||||
else:
|
||||
sock_type = zmq.PUSH
|
||||
|
||||
if not topic in self.topic_proxy:
|
||||
outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
|
||||
sock_type, bind=True)
|
||||
self.topic_proxy[topic] = outq
|
||||
self.sockets.append(outq)
|
||||
LOG.info(_("Created topic proxy: %s"), topic)
|
||||
if topic not in self.topic_proxy:
|
||||
def publisher(waiter):
|
||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
||||
|
||||
# It takes some time for a pub socket to open,
|
||||
# before we can have any faith in doing a send() to it.
|
||||
if sock_type == zmq.PUB:
|
||||
eventlet.sleep(.5)
|
||||
try:
|
||||
# The topic is received over the network,
|
||||
# don't trust this input.
|
||||
if self.badchars.search(topic) is not None:
|
||||
emsg = _("Topic contained dangerous characters.")
|
||||
LOG.warn(emsg)
|
||||
raise RPCException(emsg)
|
||||
|
||||
LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
|
||||
self.topic_proxy[topic].send(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
|
||||
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
|
||||
(ipc_dir, topic),
|
||||
sock_type, bind=True)
|
||||
except RPCException:
|
||||
waiter.send_exception(*sys.exc_info())
|
||||
return
|
||||
|
||||
self.topic_proxy[topic] = eventlet.queue.LightQueue(
|
||||
CONF.rpc_zmq_topic_backlog)
|
||||
self.sockets.append(out_sock)
|
||||
|
||||
# It takes some time for a pub socket to open,
|
||||
# before we can have any faith in doing a send() to it.
|
||||
if sock_type == zmq.PUB:
|
||||
eventlet.sleep(.5)
|
||||
|
||||
waiter.send(True)
|
||||
|
||||
while(True):
|
||||
data = self.topic_proxy[topic].get()
|
||||
out_sock.send(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
|
||||
{'data': data})
|
||||
|
||||
wait_sock_creation = eventlet.event.Event()
|
||||
eventlet.spawn(publisher, wait_sock_creation)
|
||||
|
||||
try:
|
||||
wait_sock_creation.wait()
|
||||
except RPCException:
|
||||
LOG.error(_("Topic socket file creation failed."))
|
||||
return
|
||||
|
||||
try:
|
||||
self.topic_proxy[topic].put_nowait(data)
|
||||
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
|
||||
{'data': data})
|
||||
except eventlet.queue.Full:
|
||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||
|
||||
def consume_in_thread(self):
|
||||
"""Runs the ZmqProxy service"""
|
||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||
consume_in = "tcp://%s:%s" % \
|
||||
(CONF.rpc_zmq_bind_address,
|
||||
CONF.rpc_zmq_port)
|
||||
consumption_proxy = InternalContext(None)
|
||||
|
||||
if not os.path.isdir(ipc_dir):
|
||||
try:
|
||||
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
|
||||
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
|
||||
ipc_dir, run_as_root=True)
|
||||
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
|
||||
except utils.ProcessExecutionError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Could not create IPC directory %s") %
|
||||
(ipc_dir, ))
|
||||
|
||||
try:
|
||||
self.register(consumption_proxy,
|
||||
consume_in,
|
||||
zmq.PULL,
|
||||
out_bind=True)
|
||||
except zmq.ZMQError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
||||
"Socket may already be in use."))
|
||||
|
||||
super(ZmqProxy, self).consume_in_thread()
|
||||
|
||||
|
||||
def unflatten_envelope(packenv):
|
||||
"""Unflattens the RPC envelope.
|
||||
Takes a list and returns a dictionary.
|
||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||
"""
|
||||
i = iter(packenv)
|
||||
h = {}
|
||||
try:
|
||||
while True:
|
||||
k = i.next()
|
||||
h[k] = i.next()
|
||||
except StopIteration:
|
||||
return h
|
||||
|
||||
|
||||
class ZmqReactor(ZmqBaseReactor):
|
||||
@ -478,38 +580,53 @@ class ZmqReactor(ZmqBaseReactor):
|
||||
self.mapping[sock].send(data)
|
||||
return
|
||||
|
||||
msg_id, topic, style, in_msg = data
|
||||
|
||||
ctx, request = _deserialize(in_msg)
|
||||
ctx = RpcContext.unmarshal(ctx)
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
self.pool.spawn_n(self.process, style, topic,
|
||||
proxy, ctx, request)
|
||||
if data[2] == 'cast': # Legacy protocol
|
||||
packenv = data[3]
|
||||
|
||||
ctx, msg = _deserialize(packenv)
|
||||
request = rpc_common.deserialize_msg(msg)
|
||||
ctx = RpcContext.unmarshal(ctx)
|
||||
elif data[2] == 'impl_zmq_v2':
|
||||
packenv = data[4:]
|
||||
|
||||
msg = unflatten_envelope(packenv)
|
||||
request = rpc_common.deserialize_msg(msg)
|
||||
|
||||
# Unmarshal only after verifying the message.
|
||||
ctx = RpcContext.unmarshal(data[3])
|
||||
else:
|
||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
||||
return
|
||||
|
||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
||||
|
||||
|
||||
class Connection(rpc_common.Connection):
|
||||
"""Manages connections and threads."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.topics = []
|
||||
self.reactor = ZmqReactor(conf)
|
||||
|
||||
def create_consumer(self, topic, proxy, fanout=False):
|
||||
# Only consume on the base topic name.
|
||||
topic = topic.split('.', 1)[0]
|
||||
|
||||
LOG.info(_("Create Consumer for topic (%(topic)s)") %
|
||||
{'topic': topic})
|
||||
# Register with matchmaker.
|
||||
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
|
||||
|
||||
# Subscription scenarios
|
||||
if fanout:
|
||||
subscribe = ('', fanout)[type(fanout) == str]
|
||||
sock_type = zmq.SUB
|
||||
topic = 'fanout~' + topic
|
||||
subscribe = ('', fanout)[type(fanout) == str]
|
||||
topic = 'fanout~' + topic.split('.', 1)[0]
|
||||
else:
|
||||
sock_type = zmq.PULL
|
||||
subscribe = None
|
||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
||||
|
||||
if topic in self.topics:
|
||||
LOG.info(_("Skipping topic registration. Already registered."))
|
||||
return
|
||||
|
||||
# Receive messages from (local) proxy
|
||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||
@ -520,18 +637,26 @@ class Connection(rpc_common.Connection):
|
||||
|
||||
self.reactor.register(proxy, inaddr, sock_type,
|
||||
subscribe=subscribe, in_bind=False)
|
||||
self.topics.append(topic)
|
||||
|
||||
def close(self):
|
||||
_get_matchmaker().stop_heartbeat()
|
||||
for topic in self.topics:
|
||||
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
|
||||
|
||||
self.reactor.close()
|
||||
self.topics = []
|
||||
|
||||
def wait(self):
|
||||
self.reactor.wait()
|
||||
|
||||
def consume_in_thread(self):
|
||||
_get_matchmaker().start_heartbeat()
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
|
||||
def _cast(addr, context, msg_id, topic, msg, timeout=None):
|
||||
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
|
||||
_msg_id=None):
|
||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
||||
payload = [RpcContext.marshal(context), msg]
|
||||
|
||||
@ -540,7 +665,7 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None):
|
||||
conn = ZmqClient(addr)
|
||||
|
||||
# assumes cast can't return an exception
|
||||
conn.cast(msg_id, topic, payload)
|
||||
conn.cast(_msg_id, topic, payload, envelope)
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
||||
finally:
|
||||
@ -548,7 +673,8 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None):
|
||||
conn.close()
|
||||
|
||||
|
||||
def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
def _call(addr, context, topic, msg, timeout=None,
|
||||
envelope=False):
|
||||
# timeout_response is how long we wait for a response
|
||||
timeout = timeout or CONF.rpc_response_timeout
|
||||
|
||||
@ -565,8 +691,8 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
'method': '-reply',
|
||||
'args': {
|
||||
'msg_id': msg_id,
|
||||
'context': mcontext,
|
||||
'topic': reply_topic,
|
||||
# TODO(ewindisch): safe to remove mcontext in I.
|
||||
'msg': [mcontext, msg]
|
||||
}
|
||||
}
|
||||
@ -578,22 +704,36 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
||||
try:
|
||||
msg_waiter = ZmqSocket(
|
||||
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
|
||||
"ipc://%s/zmq_topic_zmq_replies.%s" %
|
||||
(CONF.rpc_zmq_ipc_dir,
|
||||
CONF.rpc_zmq_host),
|
||||
zmq.SUB, subscribe=msg_id, bind=False
|
||||
)
|
||||
|
||||
LOG.debug(_("Sending cast"))
|
||||
_cast(addr, context, msg_id, topic, payload)
|
||||
_cast(addr, context, topic, payload, envelope)
|
||||
|
||||
LOG.debug(_("Cast sent; Waiting reply"))
|
||||
# Blocks until receives reply
|
||||
msg = msg_waiter.recv()
|
||||
LOG.debug(_("Received message: %s"), msg)
|
||||
LOG.debug(_("Unpacking response"))
|
||||
responses = _deserialize(msg[-1])
|
||||
|
||||
if msg[2] == 'cast': # Legacy version
|
||||
raw_msg = _deserialize(msg[-1])[-1]
|
||||
elif msg[2] == 'impl_zmq_v2':
|
||||
rpc_envelope = unflatten_envelope(msg[4:])
|
||||
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
|
||||
else:
|
||||
raise rpc_common.UnsupportedRpcEnvelopeVersion(
|
||||
_("Unsupported or unknown ZMQ envelope returned."))
|
||||
|
||||
responses = raw_msg['args']['response']
|
||||
# ZMQError trumps the Timeout error.
|
||||
except zmq.ZMQError:
|
||||
raise RPCException("ZMQ Socket Error")
|
||||
except (IndexError, KeyError):
|
||||
raise RPCException(_("RPC Message Invalid."))
|
||||
finally:
|
||||
if 'msg_waiter' in vars():
|
||||
msg_waiter.close()
|
||||
@ -609,7 +749,8 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
|
||||
return responses[-1]
|
||||
|
||||
|
||||
def _multi_send(method, context, topic, msg, timeout=None):
|
||||
def _multi_send(method, context, topic, msg, timeout=None,
|
||||
envelope=False, _msg_id=None):
|
||||
"""
|
||||
Wraps the sending of messages,
|
||||
dispatches to the matchmaker and sends
|
||||
@ -618,7 +759,7 @@ def _multi_send(method, context, topic, msg, timeout=None):
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
|
||||
queues = matchmaker.queues(topic)
|
||||
queues = _get_matchmaker().queues(topic)
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
@ -626,7 +767,7 @@ def _multi_send(method, context, topic, msg, timeout=None):
|
||||
LOG.warn(_("No matchmaker results. Not casting."))
|
||||
# While not strictly a timeout, callers know how to handle
|
||||
# this exception and a timeout isn't too big a lie.
|
||||
raise rpc_common.Timeout, "No match from matchmaker."
|
||||
raise rpc_common.Timeout(_("No match from matchmaker."))
|
||||
|
||||
# This supports brokerless fanout (addresses > 1)
|
||||
for queue in queues:
|
||||
@ -635,9 +776,11 @@ def _multi_send(method, context, topic, msg, timeout=None):
|
||||
|
||||
if method.__name__ == '_cast':
|
||||
eventlet.spawn_n(method, _addr, context,
|
||||
_topic, _topic, msg, timeout)
|
||||
_topic, msg, timeout, envelope,
|
||||
_msg_id)
|
||||
return
|
||||
return method(_addr, context, _topic, _topic, msg, timeout)
|
||||
return method(_addr, context, _topic, msg, timeout,
|
||||
envelope)
|
||||
|
||||
|
||||
def create_connection(conf, new=True):
|
||||
@ -667,7 +810,7 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
|
||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
||||
|
||||
|
||||
def notify(conf, context, topic, msg, **kwargs):
|
||||
def notify(conf, context, topic, msg, envelope):
|
||||
"""
|
||||
Send notification event.
|
||||
Notifications are sent to topic-priority.
|
||||
@ -675,51 +818,34 @@ def notify(conf, context, topic, msg, **kwargs):
|
||||
"""
|
||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
||||
# work with our assumptions.
|
||||
topic.replace('.', '-')
|
||||
cast(conf, context, topic, msg, **kwargs)
|
||||
topic = topic.replace('.', '-')
|
||||
cast(conf, context, topic, msg, envelope=envelope)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Clean up resources in use by implementation."""
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
matchmaker = None
|
||||
ZMQ_CTX.term()
|
||||
if ZMQ_CTX:
|
||||
ZMQ_CTX.term()
|
||||
ZMQ_CTX = None
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
"""Registration of options for this driver."""
|
||||
#NOTE(ewindisch): ZMQ_CTX and matchmaker
|
||||
# are initialized here as this is as good
|
||||
# an initialization method as any.
|
||||
|
||||
# We memoize through these globals
|
||||
global ZMQ_CTX
|
||||
global matchmaker
|
||||
global CONF
|
||||
matchmaker = None
|
||||
|
||||
if not CONF:
|
||||
conf.register_opts(zmq_opts)
|
||||
CONF = conf
|
||||
# Don't re-set, if this method is called twice.
|
||||
|
||||
def _get_ctxt():
|
||||
if not zmq:
|
||||
raise ImportError("Failed to import eventlet.green.zmq")
|
||||
|
||||
global ZMQ_CTX
|
||||
if not ZMQ_CTX:
|
||||
ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
|
||||
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
|
||||
return ZMQ_CTX
|
||||
|
||||
|
||||
def _get_matchmaker(*args, **kwargs):
|
||||
global matchmaker
|
||||
if not matchmaker:
|
||||
# rpc_zmq_matchmaker should be set to a 'module.Class'
|
||||
mm_path = conf.rpc_zmq_matchmaker.split('.')
|
||||
mm_module = '.'.join(mm_path[:-1])
|
||||
mm_class = mm_path[-1]
|
||||
|
||||
# Only initialize a class.
|
||||
if mm_path[-1][0] not in string.ascii_uppercase:
|
||||
LOG.error(_("Matchmaker could not be loaded.\n"
|
||||
"rpc_zmq_matchmaker is not a class."))
|
||||
raise RPCException(_("Error loading Matchmaker."))
|
||||
|
||||
mm_impl = importutils.import_module(mm_module)
|
||||
mm_constructor = getattr(mm_impl, mm_class)
|
||||
matchmaker = mm_constructor()
|
||||
|
||||
|
||||
register_opts(cfg.CONF)
|
||||
matchmaker = importutils.import_object(
|
||||
CONF.rpc_zmq_matchmaker, *args, **kwargs)
|
||||
return matchmaker
|
||||
|
@ -22,7 +22,9 @@ import contextlib
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import log as logging
|
||||
|
||||
@ -32,6 +34,12 @@ matchmaker_opts = [
|
||||
cfg.StrOpt('matchmaker_ringfile',
|
||||
default='/etc/nova/matchmaker_ring.json',
|
||||
help='Matchmaker ring file (JSON)'),
|
||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
||||
default=300,
|
||||
help='Heartbeat frequency'),
|
||||
cfg.IntOpt('matchmaker_heartbeat_ttl',
|
||||
default=600,
|
||||
help='Heartbeat time-to-live.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -69,12 +77,73 @@ class Binding(object):
|
||||
|
||||
|
||||
class MatchMakerBase(object):
|
||||
"""Match Maker Base Class."""
|
||||
|
||||
"""
|
||||
Match Maker Base Class.
|
||||
Build off HeartbeatMatchMakerBase if building a
|
||||
heartbeat-capable MatchMaker.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||
self.bindings = []
|
||||
|
||||
self.no_heartbeat_msg = _('Matchmaker does not implement '
|
||||
'registration or heartbeat.')
|
||||
|
||||
def register(self, key, host):
|
||||
"""
|
||||
Register a host on a backend.
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
pass
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""
|
||||
Acknowledge that a key.host is alive.
|
||||
Used internally for updating heartbeats,
|
||||
but may also be used publically to acknowledge
|
||||
a system is alive (i.e. rpc message successfully
|
||||
sent to host)
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_alive(self, topic, host):
|
||||
"""
|
||||
Checks if a host is alive.
|
||||
"""
|
||||
pass
|
||||
|
||||
def expire(self, topic, host):
|
||||
"""
|
||||
Explicitly expire a host's registration.
|
||||
"""
|
||||
pass
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""
|
||||
Send all heartbeats.
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
pass
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""
|
||||
Unregister a topic.
|
||||
"""
|
||||
pass
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""
|
||||
Spawn heartbeat greenthread.
|
||||
"""
|
||||
pass
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""
|
||||
Destroys the heartbeat greenthread.
|
||||
"""
|
||||
pass
|
||||
|
||||
def add_binding(self, binding, rule, last=True):
|
||||
self.bindings.append((binding, rule, False, last))
|
||||
|
||||
@ -98,6 +167,103 @@ class MatchMakerBase(object):
|
||||
return workers
|
||||
|
||||
|
||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||
"""
|
||||
Base for a heart-beat capable MatchMaker.
|
||||
Provides common methods for registering,
|
||||
unregistering, and maintaining heartbeats.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.hosts = set()
|
||||
self._heart = None
|
||||
self.host_topic = {}
|
||||
|
||||
super(HeartbeatMatchMakerBase, self).__init__()
|
||||
|
||||
def send_heartbeats(self):
|
||||
"""
|
||||
Send all heartbeats.
|
||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||
which loops this method.
|
||||
"""
|
||||
for key, host in self.host_topic:
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
"""
|
||||
Acknowledge that a host.topic is alive.
|
||||
Used internally for updating heartbeats,
|
||||
but may also be used publically to acknowledge
|
||||
a system is alive (i.e. rpc message successfully
|
||||
sent to host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement ack_alive")
|
||||
|
||||
def backend_register(self, key, host):
|
||||
"""
|
||||
Implements registration logic.
|
||||
Called by register(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_register")
|
||||
|
||||
def backend_unregister(self, key, key_host):
|
||||
"""
|
||||
Implements de-registration logic.
|
||||
Called by unregister(self,key,host)
|
||||
"""
|
||||
raise NotImplementedError("Must implement backend_unregister")
|
||||
|
||||
def register(self, key, host):
|
||||
"""
|
||||
Register a host on a backend.
|
||||
Heartbeats, if applicable, may keepalive registration.
|
||||
"""
|
||||
self.hosts.add(host)
|
||||
self.host_topic[(key, host)] = host
|
||||
key_host = '.'.join((key, host))
|
||||
|
||||
self.backend_register(key, key_host)
|
||||
|
||||
self.ack_alive(key, host)
|
||||
|
||||
def unregister(self, key, host):
|
||||
"""
|
||||
Unregister a topic.
|
||||
"""
|
||||
if (key, host) in self.host_topic:
|
||||
del self.host_topic[(key, host)]
|
||||
|
||||
self.hosts.discard(host)
|
||||
self.backend_unregister(key, '.'.join((key, host)))
|
||||
|
||||
LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host)))
|
||||
|
||||
def start_heartbeat(self):
|
||||
"""
|
||||
Implementation of MatchMakerBase.start_heartbeat
|
||||
Launches greenthread looping send_heartbeats(),
|
||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
||||
between iterations.
|
||||
"""
|
||||
if len(self.hosts) == 0:
|
||||
raise MatchMakerException(
|
||||
_("Register before starting heartbeat."))
|
||||
|
||||
def do_heartbeat():
|
||||
while True:
|
||||
self.send_heartbeats()
|
||||
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
|
||||
|
||||
self._heart = eventlet.spawn(do_heartbeat)
|
||||
|
||||
def stop_heartbeat(self):
|
||||
"""
|
||||
Destroys the heartbeat greenthread.
|
||||
"""
|
||||
if self._heart:
|
||||
self._heart.kill()
|
||||
|
||||
|
||||
class DirectBinding(Binding):
|
||||
"""
|
||||
Specifies a host in the key via a '.' character
|
||||
@ -201,24 +367,25 @@ class FanoutRingExchange(RingExchange):
|
||||
|
||||
class LocalhostExchange(Exchange):
|
||||
"""Exchange where all direct topics are local."""
|
||||
def __init__(self):
|
||||
def __init__(self, host='localhost'):
|
||||
self.host = host
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
return [(key.split('.')[0] + '.localhost', 'localhost')]
|
||||
return [('.'.join((key.split('.')[0], self.host)), self.host)]
|
||||
|
||||
|
||||
class DirectExchange(Exchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Exchange, self).__init__()
|
||||
|
||||
def run(self, key):
|
||||
b, e = key.split('.', 1)
|
||||
return [(b, e)]
|
||||
e = key.split('.', 1)[1]
|
||||
return [(key, e)]
|
||||
|
||||
|
||||
class MatchMakerRing(MatchMakerBase):
|
||||
@ -237,11 +404,11 @@ class MatchMakerLocalhost(MatchMakerBase):
|
||||
Match Maker where all bare topics resolve to localhost.
|
||||
Useful for testing.
|
||||
"""
|
||||
def __init__(self):
|
||||
def __init__(self, host='localhost'):
|
||||
super(MatchMakerLocalhost, self).__init__()
|
||||
self.add_binding(FanoutBinding(), LocalhostExchange())
|
||||
self.add_binding(FanoutBinding(), LocalhostExchange(host))
|
||||
self.add_binding(DirectBinding(), DirectExchange())
|
||||
self.add_binding(TopicBinding(), LocalhostExchange())
|
||||
self.add_binding(TopicBinding(), LocalhostExchange(host))
|
||||
|
||||
|
||||
class MatchMakerStub(MatchMakerBase):
|
||||
|
149
reddwarf/openstack/common/rpc/matchmaker_redis.py
Normal file
149
reddwarf/openstack/common/rpc/matchmaker_redis.py
Normal file
@ -0,0 +1,149 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 Cloudscaling Group, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common.rpc import matchmaker as mm_common
|
||||
|
||||
redis = importutils.try_import('redis')
|
||||
|
||||
|
||||
matchmaker_redis_opts = [
|
||||
cfg.StrOpt('host',
|
||||
default='127.0.0.1',
|
||||
help='Host to locate redis'),
|
||||
cfg.IntOpt('port',
|
||||
default=6379,
|
||||
help='Use this port to connect to redis host.'),
|
||||
cfg.StrOpt('password',
|
||||
default=None,
|
||||
help='Password for Redis server. (optional)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
opt_group = cfg.OptGroup(name='matchmaker_redis',
|
||||
title='Options for Redis-based MatchMaker')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(matchmaker_redis_opts, opt_group)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisExchange(mm_common.Exchange):
|
||||
def __init__(self, matchmaker):
|
||||
self.matchmaker = matchmaker
|
||||
self.redis = matchmaker.redis
|
||||
super(RedisExchange, self).__init__()
|
||||
|
||||
|
||||
class RedisTopicExchange(RedisExchange):
|
||||
"""
|
||||
Exchange where all topic keys are split, sending to second half.
|
||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||
"""
|
||||
def run(self, topic):
|
||||
while True:
|
||||
member_name = self.redis.srandmember(topic)
|
||||
|
||||
if not member_name:
|
||||
# If this happens, there are no
|
||||
# longer any members.
|
||||
break
|
||||
|
||||
if not self.matchmaker.is_alive(topic, member_name):
|
||||
continue
|
||||
|
||||
host = member_name.split('.', 1)[1]
|
||||
return [(member_name, host)]
|
||||
return []
|
||||
|
||||
|
||||
class RedisFanoutExchange(RedisExchange):
|
||||
"""
|
||||
Return a list of all hosts.
|
||||
"""
|
||||
def run(self, topic):
|
||||
topic = topic.split('~', 1)[1]
|
||||
hosts = self.redis.smembers(topic)
|
||||
good_hosts = filter(
|
||||
lambda host: self.matchmaker.is_alive(topic, host), hosts)
|
||||
|
||||
return [(x, x.split('.', 1)[1]) for x in good_hosts]
|
||||
|
||||
|
||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
||||
"""
|
||||
MatchMaker registering and looking-up hosts with a Redis server.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatchMakerRedis, self).__init__()
|
||||
|
||||
if not redis:
|
||||
raise ImportError("Failed to import module redis.")
|
||||
|
||||
self.redis = redis.StrictRedis(
|
||||
host=CONF.matchmaker_redis.host,
|
||||
port=CONF.matchmaker_redis.port,
|
||||
password=CONF.matchmaker_redis.password)
|
||||
|
||||
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
|
||||
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
|
||||
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
|
||||
|
||||
def ack_alive(self, key, host):
|
||||
topic = "%s.%s" % (key, host)
|
||||
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
|
||||
# If we could not update the expiration, the key
|
||||
# might have been pruned. Re-register, creating a new
|
||||
# key in Redis.
|
||||
self.register(self.topic_host[host], host)
|
||||
|
||||
def is_alive(self, topic, host):
|
||||
if self.redis.ttl(host) == -1:
|
||||
self.expire(topic, host)
|
||||
return False
|
||||
return True
|
||||
|
||||
def expire(self, topic, host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.delete(host)
|
||||
pipe.srem(topic, host)
|
||||
pipe.execute()
|
||||
|
||||
def backend_register(self, key, key_host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.sadd(key, key_host)
|
||||
|
||||
# No value is needed, we just
|
||||
# care if it exists. Sets aren't viable
|
||||
# because only keys can expire.
|
||||
pipe.set(key_host, '')
|
||||
|
||||
pipe.execute()
|
||||
|
||||
def backend_unregister(self, key, key_host):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.srem(key, key_host)
|
||||
pipe.delete(key_host)
|
||||
pipe.execute()
|
@ -58,9 +58,13 @@ class RpcProxy(object):
|
||||
"""Return the topic to use for a message."""
|
||||
return topic if topic else self.topic
|
||||
|
||||
@staticmethod
|
||||
def make_namespaced_msg(method, namespace, **kwargs):
|
||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
return {'method': method, 'args': kwargs}
|
||||
return RpcProxy.make_namespaced_msg(method, None, **kwargs)
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.call() a remote method.
|
||||
@ -68,16 +72,21 @@ class RpcProxy(object):
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: The return value from the remote method.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.call(context, self._get_topic(topic), msg, timeout)
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
return rpc.call(context, real_topic, msg, timeout)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||
"""rpc.multicall() a remote method.
|
||||
@ -85,17 +94,22 @@ class RpcProxy(object):
|
||||
:param context: The request context
|
||||
:param msg: The message to send, including the method and args.
|
||||
:param topic: Override the topic for this message.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
:param timeout: (Optional) A timeout to use when waiting for the
|
||||
response. If no timeout is specified, a default timeout will be
|
||||
used that is usually sufficient.
|
||||
:param version: (Optional) Override the requested API version in this
|
||||
message.
|
||||
|
||||
:returns: An iterator that lets you process each of the returned values
|
||||
from the remote method as they arrive.
|
||||
"""
|
||||
self._set_version(msg, version)
|
||||
return rpc.multicall(context, self._get_topic(topic), msg, timeout)
|
||||
real_topic = self._get_topic(topic)
|
||||
try:
|
||||
return rpc.multicall(context, real_topic, msg, timeout)
|
||||
except rpc.common.Timeout as exc:
|
||||
raise rpc.common.Timeout(
|
||||
exc.info, real_topic, msg.get('method'))
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
"""rpc.cast() a remote method.
|
||||
|
41
reddwarf/openstack/common/rpc/zmq_receiver.py
Executable file
41
reddwarf/openstack/common/rpc/zmq_receiver.py
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import rpc
|
||||
from reddwarf.openstack.common.rpc import impl_zmq
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rpc.rpc_opts)
|
||||
CONF.register_opts(impl_zmq.zmq_opts)
|
||||
|
||||
|
||||
def main():
|
||||
CONF(sys.argv[1:], project='oslo')
|
||||
logging.setup("oslo")
|
||||
|
||||
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
||||
reactor.consume_in_thread()
|
||||
reactor.wait()
|
@ -27,17 +27,17 @@ import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
import extras
|
||||
import logging as std_logging
|
||||
from oslo.config import cfg
|
||||
|
||||
from reddwarf.openstack.common import cfg
|
||||
from reddwarf.openstack.common import eventlet_backdoor
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common import threadgroup
|
||||
|
||||
|
||||
rpc = extras.try_import('reddwarf.openstack.common.rpc')
|
||||
rpc = importutils.try_import('reddwarf.openstack.common.rpc')
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -51,7 +51,7 @@ class Launcher(object):
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self._services = threadgroup.ThreadGroup('launcher')
|
||||
self._services = threadgroup.ThreadGroup()
|
||||
eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
@staticmethod
|
||||
@ -243,7 +243,10 @@ class ProcessLauncher(object):
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
pid, status = os.wait()
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
@ -275,6 +278,10 @@ class ProcessLauncher(object):
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(.01)
|
||||
continue
|
||||
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
@ -303,7 +310,7 @@ class Service(object):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup('service', threads)
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
@ -38,8 +38,7 @@ class Thread(object):
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, name, thread, group):
|
||||
self.name = name
|
||||
def __init__(self, thread, group):
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
@ -57,22 +56,21 @@ class ThreadGroup(object):
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, name, thread_pool_size=10):
|
||||
self.name = name
|
||||
def __init__(self, thread_pool_size=10):
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(callback.__name__, gt, self)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
|
||||
def thread_done(self, thread):
|
||||
|
@ -1,6 +1,6 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -25,18 +25,22 @@ import datetime
|
||||
import iso8601
|
||||
|
||||
|
||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
|
||||
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None):
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format"""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
str = at.strftime(TIME_FORMAT)
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
str += ('Z' if tz == 'UTC' else tz)
|
||||
return str
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
@ -71,11 +75,15 @@ def normalize_time(timestamp):
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, basestring):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, basestring):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
@ -94,6 +102,11 @@ def utcnow():
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formated date from timestamp"""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
@ -158,3 +171,16 @@ def delta_seconds(before, after):
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""
|
||||
Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:params dt: the time
|
||||
:params window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
||||
|
@ -19,11 +19,9 @@
|
||||
|
||||
from reddwarf.openstack.common import log as logging
|
||||
from reddwarf.openstack.common.gettextutils import _
|
||||
from reddwarf.openstack.common import cfg
|
||||
import datetime
|
||||
from oslo.config import cfg
|
||||
from reddwarf.common import exception
|
||||
from reddwarf.openstack.common import importutils
|
||||
from reddwarf.openstack.common import timeutils
|
||||
from reddwarf.quota.models import Quota
|
||||
from reddwarf.quota.models import QuotaUsage
|
||||
from reddwarf.quota.models import Reservation
|
||||
|
@ -14,3 +14,4 @@ lxml
|
||||
python-novaclient
|
||||
python-keystoneclient
|
||||
iso8601
|
||||
oslo.config>=1.1.0
|
||||
|
Loading…
x
Reference in New Issue
Block a user