Sync with oslo-incubator
Modules held back: - openstack.common.policy. Bug #1268315. Compatibility changes made: - Additional arguments added to the Context object - RPC Services no longer need to explicitly wait on the rpc consumer thread - Our openstack-common.conf file explicitly includes transitive dependencies. This is no longer necessary. Bug #1268314. Additional changes still necessary: - We use some depreciated modules (WSGI), we need to remove our dependence on these. Bug #1268313. Change-Id: Ia52b386a928e1d03709dfdb23f0d504c7d01bc19 Oslo-SHA1: 8710dbacfd7d8dad58fccbdd4ffda7246dcbbb7c Closes-Bug: 1268314
This commit is contained in:
parent
4aa6edee6f
commit
6cc6019ace
@ -40,10 +40,6 @@ class Service(rpc_service.Service):
|
|||||||
self.manager.start()
|
self.manager.start()
|
||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
super(Service, self).wait()
|
|
||||||
self.conn.consumer_thread.wait()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
super(Service, self).stop()
|
super(Service, self).stop()
|
||||||
self.manager.stop()
|
self.manager.stop()
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
from paste import deploy
|
from paste import deploy
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import wsgi
|
from designate.openstack.deprecated import wsgi
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate import utils
|
from designate import utils
|
||||||
|
@ -74,10 +74,6 @@ class Service(rpc_service.Service):
|
|||||||
|
|
||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
super(Service, self).wait()
|
|
||||||
self.conn.consumer_thread.wait()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
super(Service, self).stop()
|
super(Service, self).stop()
|
||||||
|
|
||||||
|
@ -21,17 +21,23 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class DesignateContext(context.RequestContext):
|
class DesignateContext(context.RequestContext):
|
||||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||||
|
user_domain=None, project_domain=None, is_admin=False,
|
||||||
read_only=False, show_deleted=False, request_id=None,
|
read_only=False, show_deleted=False, request_id=None,
|
||||||
roles=[], service_catalog=None, all_tenants=False):
|
instance_uuid=None, roles=[], service_catalog=None,
|
||||||
|
all_tenants=False):
|
||||||
super(DesignateContext, self).__init__(
|
super(DesignateContext, self).__init__(
|
||||||
auth_token=auth_token,
|
auth_token=auth_token,
|
||||||
user=user,
|
user=user,
|
||||||
tenant=tenant,
|
tenant=tenant,
|
||||||
|
domain=domain,
|
||||||
|
user_domain=user_domain,
|
||||||
|
project_domain=project_domain,
|
||||||
is_admin=is_admin,
|
is_admin=is_admin,
|
||||||
read_only=read_only,
|
read_only=read_only,
|
||||||
show_deleted=show_deleted,
|
show_deleted=show_deleted,
|
||||||
request_id=request_id)
|
request_id=request_id,
|
||||||
|
instance_uuid=instance_uuid)
|
||||||
|
|
||||||
self.roles = roles
|
self.roles = roles
|
||||||
self.service_catalog = service_catalog
|
self.service_catalog = service_catalog
|
||||||
@ -43,6 +49,7 @@ class DesignateContext(context.RequestContext):
|
|||||||
# Remove the user and tenant id fields, this map to user and tenant
|
# Remove the user and tenant id fields, this map to user and tenant
|
||||||
d.pop('user_id')
|
d.pop('user_id')
|
||||||
d.pop('tenant_id')
|
d.pop('tenant_id')
|
||||||
|
d.pop('user_identity')
|
||||||
|
|
||||||
return self.from_dict(d)
|
return self.from_dict(d)
|
||||||
|
|
||||||
@ -61,6 +68,9 @@ class DesignateContext(context.RequestContext):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, values):
|
def from_dict(cls, values):
|
||||||
|
if 'user_identity' in values:
|
||||||
|
values.pop('user_identity')
|
||||||
|
|
||||||
return cls(**values)
|
return cls(**values)
|
||||||
|
|
||||||
def elevated(self, show_deleted=None):
|
def elevated(self, show_deleted=None):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -27,39 +25,60 @@ import uuid
|
|||||||
|
|
||||||
|
|
||||||
def generate_request_id():
|
def generate_request_id():
|
||||||
return 'req-' + str(uuid.uuid4())
|
return 'req-%s' % str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
class RequestContext(object):
|
class RequestContext(object):
|
||||||
|
|
||||||
"""
|
"""Helper class to represent useful information about a request context.
|
||||||
|
|
||||||
Stores information about the security context under which the user
|
Stores information about the security context under which the user
|
||||||
accesses the system, as well as additional request information.
|
accesses the system, as well as additional request information.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
||||||
read_only=False, show_deleted=False, request_id=None):
|
|
||||||
|
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||||
|
user_domain=None, project_domain=None, is_admin=False,
|
||||||
|
read_only=False, show_deleted=False, request_id=None,
|
||||||
|
instance_uuid=None):
|
||||||
self.auth_token = auth_token
|
self.auth_token = auth_token
|
||||||
self.user = user
|
self.user = user
|
||||||
self.tenant = tenant
|
self.tenant = tenant
|
||||||
|
self.domain = domain
|
||||||
|
self.user_domain = user_domain
|
||||||
|
self.project_domain = project_domain
|
||||||
self.is_admin = is_admin
|
self.is_admin = is_admin
|
||||||
self.read_only = read_only
|
self.read_only = read_only
|
||||||
self.show_deleted = show_deleted
|
self.show_deleted = show_deleted
|
||||||
|
self.instance_uuid = instance_uuid
|
||||||
if not request_id:
|
if not request_id:
|
||||||
request_id = generate_request_id()
|
request_id = generate_request_id()
|
||||||
self.request_id = request_id
|
self.request_id = request_id
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
|
user_idt = (
|
||||||
|
self.user_idt_format.format(user=self.user or '-',
|
||||||
|
tenant=self.tenant or '-',
|
||||||
|
domain=self.domain or '-',
|
||||||
|
user_domain=self.user_domain or '-',
|
||||||
|
p_domain=self.project_domain or '-'))
|
||||||
|
|
||||||
return {'user': self.user,
|
return {'user': self.user,
|
||||||
'tenant': self.tenant,
|
'tenant': self.tenant,
|
||||||
|
'domain': self.domain,
|
||||||
|
'user_domain': self.user_domain,
|
||||||
|
'project_domain': self.project_domain,
|
||||||
'is_admin': self.is_admin,
|
'is_admin': self.is_admin,
|
||||||
'read_only': self.read_only,
|
'read_only': self.read_only,
|
||||||
'show_deleted': self.show_deleted,
|
'show_deleted': self.show_deleted,
|
||||||
'auth_token': self.auth_token,
|
'auth_token': self.auth_token,
|
||||||
'request_id': self.request_id}
|
'request_id': self.request_id,
|
||||||
|
'instance_uuid': self.instance_uuid,
|
||||||
|
'user_identity': user_idt}
|
||||||
|
|
||||||
|
|
||||||
def get_admin_context(show_deleted="no"):
|
def get_admin_context(show_deleted=False):
|
||||||
context = RequestContext(None,
|
context = RequestContext(None,
|
||||||
tenant=None,
|
tenant=None,
|
||||||
is_admin=True,
|
is_admin=True,
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -16,8 +14,13 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import errno
|
||||||
import gc
|
import gc
|
||||||
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
@ -26,36 +29,83 @@ import eventlet.backdoor
|
|||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
|
help_for_backdoor_port = (
|
||||||
|
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||||
|
"in listening on a random tcp port number; <port> results in listening "
|
||||||
|
"on the specified port number (and not enabling backdoor if that port "
|
||||||
|
"is in use); and <start>:<end> results in listening on the smallest "
|
||||||
|
"unused port number within the specified range of port numbers. The "
|
||||||
|
"chosen port is displayed in the service's log file.")
|
||||||
eventlet_backdoor_opts = [
|
eventlet_backdoor_opts = [
|
||||||
cfg.IntOpt('backdoor_port',
|
cfg.StrOpt('backdoor_port',
|
||||||
default=None,
|
default=None,
|
||||||
help='port for eventlet backdoor to listen')
|
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(eventlet_backdoor_opts)
|
CONF.register_opts(eventlet_backdoor_opts)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class EventletBackdoorConfigValueError(Exception):
|
||||||
|
def __init__(self, port_range, help_msg, ex):
|
||||||
|
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||||
|
'%(help)s' %
|
||||||
|
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||||
|
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||||
|
self.port_range = port_range
|
||||||
|
|
||||||
|
|
||||||
def _dont_use_this():
|
def _dont_use_this():
|
||||||
print "Don't use this, just disconnect instead"
|
print("Don't use this, just disconnect instead")
|
||||||
|
|
||||||
|
|
||||||
def _find_objects(t):
|
def _find_objects(t):
|
||||||
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||||
|
|
||||||
|
|
||||||
def _print_greenthreads():
|
def _print_greenthreads():
|
||||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||||
print i, gt
|
print(i, gt)
|
||||||
traceback.print_stack(gt.gr_frame)
|
traceback.print_stack(gt.gr_frame)
|
||||||
print
|
print()
|
||||||
|
|
||||||
|
|
||||||
def _print_nativethreads():
|
def _print_nativethreads():
|
||||||
for threadId, stack in sys._current_frames().items():
|
for threadId, stack in sys._current_frames().items():
|
||||||
print threadId
|
print(threadId)
|
||||||
traceback.print_stack(stack)
|
traceback.print_stack(stack)
|
||||||
print
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_port_range(port_range):
|
||||||
|
if ':' not in port_range:
|
||||||
|
start, end = port_range, port_range
|
||||||
|
else:
|
||||||
|
start, end = port_range.split(':', 1)
|
||||||
|
try:
|
||||||
|
start, end = int(start), int(end)
|
||||||
|
if end < start:
|
||||||
|
raise ValueError
|
||||||
|
return start, end
|
||||||
|
except ValueError as ex:
|
||||||
|
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||||
|
help_for_backdoor_port)
|
||||||
|
|
||||||
|
|
||||||
|
def _listen(host, start_port, end_port, listen_func):
|
||||||
|
try_port = start_port
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return listen_func((host, try_port))
|
||||||
|
except socket.error as exc:
|
||||||
|
if (exc.errno != errno.EADDRINUSE or
|
||||||
|
try_port >= end_port):
|
||||||
|
raise
|
||||||
|
try_port += 1
|
||||||
|
|
||||||
|
|
||||||
def initialize_if_enabled():
|
def initialize_if_enabled():
|
||||||
@ -70,6 +120,8 @@ def initialize_if_enabled():
|
|||||||
if CONF.backdoor_port is None:
|
if CONF.backdoor_port is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||||
|
|
||||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||||
# the last expression and set it to __builtin__._, which overwrites
|
# the last expression and set it to __builtin__._, which overwrites
|
||||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||||
@ -80,8 +132,13 @@ def initialize_if_enabled():
|
|||||||
pprint.pprint(val)
|
pprint.pprint(val)
|
||||||
sys.displayhook = displayhook
|
sys.displayhook = displayhook
|
||||||
|
|
||||||
sock = eventlet.listen(('localhost', CONF.backdoor_port))
|
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||||
|
|
||||||
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
|
# listen(). In any case, pull the port number out here.
|
||||||
port = sock.getsockname()[1]
|
port = sock.getsockname()[1]
|
||||||
|
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()})
|
||||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
locals=backdoor_locals)
|
locals=backdoor_locals)
|
||||||
return port
|
return port
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2012, Red Hat, Inc.
|
# Copyright 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -19,16 +17,17 @@
|
|||||||
Exception related utilities.
|
Exception related utilities.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
class save_and_reraise_exception(object):
|
||||||
def save_and_reraise_exception():
|
|
||||||
"""Save current exception, run some code and then re-raise.
|
"""Save current exception, run some code and then re-raise.
|
||||||
|
|
||||||
In some cases the exception context can be cleared, resulting in None
|
In some cases the exception context can be cleared, resulting in None
|
||||||
@ -40,12 +39,61 @@ def save_and_reraise_exception():
|
|||||||
To work around this, we save the exception state, run handler code, and
|
To work around this, we save the exception state, run handler code, and
|
||||||
then re-raise the original exception. If another exception occurs, the
|
then re-raise the original exception. If another exception occurs, the
|
||||||
saved exception is logged and the new exception is re-raised.
|
saved exception is logged and the new exception is re-raised.
|
||||||
|
|
||||||
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
|
for those circumstances this context provides a reraise flag that
|
||||||
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception() as ctxt:
|
||||||
|
decide_if_need_reraise()
|
||||||
|
if not should_be_reraised:
|
||||||
|
ctxt.reraise = False
|
||||||
"""
|
"""
|
||||||
type_, value, tb = sys.exc_info()
|
def __init__(self):
|
||||||
try:
|
self.reraise = True
|
||||||
yield
|
|
||||||
except Exception:
|
def __enter__(self):
|
||||||
logging.error(_('Original exception being dropped: %s'),
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
traceback.format_exception(type_, value, tb))
|
return self
|
||||||
raise
|
|
||||||
raise type_, value, tb
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if exc_type is not None:
|
||||||
|
logging.error(_('Original exception being dropped: %s'),
|
||||||
|
traceback.format_exception(self.type_,
|
||||||
|
self.value,
|
||||||
|
self.tb))
|
||||||
|
return False
|
||||||
|
if self.reraise:
|
||||||
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
|
def inner_func(*args, **kwargs):
|
||||||
|
last_log_time = 0
|
||||||
|
last_exc_message = None
|
||||||
|
exc_count = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return infunc(*args, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
this_exc_message = six.u(str(exc))
|
||||||
|
if this_exc_message == last_exc_message:
|
||||||
|
exc_count += 1
|
||||||
|
else:
|
||||||
|
exc_count = 1
|
||||||
|
# Do not log any more frequently than once a minute unless
|
||||||
|
# the exception message changes
|
||||||
|
cur_time = int(time.time())
|
||||||
|
if (cur_time - last_log_time > 60 or
|
||||||
|
this_exc_message != last_exc_message):
|
||||||
|
logging.exception(
|
||||||
|
_('Unexpected exception occurred %d time(s)... '
|
||||||
|
'retrying.') % exc_count)
|
||||||
|
last_log_time = cur_time
|
||||||
|
last_exc_message = this_exc_message
|
||||||
|
exc_count = 0
|
||||||
|
# This should be a very rare event. In case it isn't, do
|
||||||
|
# a sleep.
|
||||||
|
time.sleep(1)
|
||||||
|
return inner_func
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -22,7 +20,7 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
from designate.openstack.common import excutils
|
||||||
from designate.openstack.common.gettextutils import _ # noqa
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
#
|
||||||
# Copyright 2013 Mirantis, Inc.
|
# Copyright 2013 Mirantis, Inc.
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -17,7 +15,7 @@
|
|||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
|
|
||||||
from designate.openstack.common.lockutils import lock
|
from designate.openstack.common import lockutils
|
||||||
|
|
||||||
|
|
||||||
class LockFixture(fixtures.Fixture):
|
class LockFixture(fixtures.Fixture):
|
||||||
@ -45,7 +43,7 @@ class LockFixture(fixtures.Fixture):
|
|||||||
test method exits. (either by completing or raising an exception)
|
test method exits. (either by completing or raising an exception)
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, lock_file_prefix=None):
|
def __init__(self, name, lock_file_prefix=None):
|
||||||
self.mgr = lock(name, lock_file_prefix, True)
|
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LockFixture, self).setUp()
|
super(LockFixture, self).setUp()
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -23,11 +22,390 @@ Usual usage in an openstack.common module:
|
|||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import copy
|
||||||
import gettext
|
import gettext
|
||||||
|
import locale
|
||||||
|
from logging import handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from babel import localedata
|
||||||
|
import six
|
||||||
|
|
||||||
|
_localedir = os.environ.get('designate'.upper() + '_LOCALEDIR')
|
||||||
|
_t = gettext.translation('designate', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
t = gettext.translation('openstack-common', 'locale', fallback=True)
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
def _(msg):
|
def _(msg):
|
||||||
return t.ugettext(msg)
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='designate')
|
||||||
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
return _t.gettext(msg)
|
||||||
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def install(domain, lazy=False):
|
||||||
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
|
Given a translation domain, install a _() function using gettext's
|
||||||
|
install() function.
|
||||||
|
|
||||||
|
The main difference from gettext.install() is that we allow
|
||||||
|
overriding the default localedir (e.g. /usr/share/locale) using
|
||||||
|
a translation-domain-specific environment variable (e.g.
|
||||||
|
NOVA_LOCALEDIR).
|
||||||
|
|
||||||
|
:param domain: the translation domain
|
||||||
|
:param lazy: indicates whether or not to install the lazy _() function.
|
||||||
|
The lazy _() introduces a way to do deferred translation
|
||||||
|
of messages by installing a _ that builds Message objects,
|
||||||
|
instead of strings, which can then be lazily translated into
|
||||||
|
any available locale.
|
||||||
|
"""
|
||||||
|
if lazy:
|
||||||
|
# NOTE(mrodden): Lazy gettext functionality.
|
||||||
|
#
|
||||||
|
# The following introduces a deferred way to do translations on
|
||||||
|
# messages in OpenStack. We override the standard _() function
|
||||||
|
# and % (format string) operation to build Message objects that can
|
||||||
|
# later be translated when we have more information.
|
||||||
|
def _lazy_gettext(msg):
|
||||||
|
"""Create and return a Message object.
|
||||||
|
|
||||||
|
Lazy gettext function for a given domain, it is a factory method
|
||||||
|
for a project/module to get a lazy gettext function for its own
|
||||||
|
translation domain (i.e. nova, glance, cinder, etc.)
|
||||||
|
|
||||||
|
Message encapsulates a string so that we can translate
|
||||||
|
it later when needed.
|
||||||
|
"""
|
||||||
|
return Message(msg, domain=domain)
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||||
|
else:
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
if six.PY3:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir))
|
||||||
|
else:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
|
class Message(six.text_type):
|
||||||
|
"""A Message object is a unicode object that can be translated.
|
||||||
|
|
||||||
|
Translation of Message is done explicitly using the translate() method.
|
||||||
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
|
and can be treated as such.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, msgid, msgtext=None, params=None, domain='designate', *args):
|
||||||
|
"""Create a new Message object.
|
||||||
|
|
||||||
|
In order for translation to work gettext requires a message ID, this
|
||||||
|
msgid will be used as the base unicode text. It is also possible
|
||||||
|
for the msgid and the base unicode text to be different by passing
|
||||||
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
|
if six.PY3:
|
||||||
|
translator = lang.gettext
|
||||||
|
else:
|
||||||
|
translator = lang.ugettext
|
||||||
|
|
||||||
|
translated_message = translator(msgid)
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
unicode_mod = super(Message, self).__mod__(other)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=self._sanitize_mod_params(other),
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
|
- Add support for modding 'None' so translation supports it
|
||||||
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
params = self._trim_dictionary_parameters(other)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _trim_dictionary_parameters(self, dict_param):
|
||||||
|
"""Return a dict that only has matching entries in the msgid."""
|
||||||
|
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
||||||
|
# to avoid carrying a lot of unnecessary weight around in the message
|
||||||
|
# object, for example if someone passes in Message() % locals() but
|
||||||
|
# only some params are used, and additionally we prevent errors for
|
||||||
|
# non-deepcopyable objects by unicoding() them.
|
||||||
|
|
||||||
|
# Look for %(param) keys in msgid;
|
||||||
|
# Skip %% and deal with the case where % is first character on the line
|
||||||
|
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
||||||
|
|
||||||
|
# If we don't find any %(param) keys but have a %s
|
||||||
|
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
||||||
|
# Apparently the full dictionary is the parameter
|
||||||
|
params = self._copy_param(dict_param)
|
||||||
|
else:
|
||||||
|
params = {}
|
||||||
|
for key in keys:
|
||||||
|
params[key] = self._copy_param(dict_param[key])
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _copy_param(self, param):
|
||||||
|
try:
|
||||||
|
return copy.deepcopy(param)
|
||||||
|
except TypeError:
|
||||||
|
# Fallback to casting to unicode this will handle the
|
||||||
|
# python code-like objects that can't be deep-copied
|
||||||
|
return six.text_type(param)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
msg = _('Message objects do not support addition.')
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
return self.__add__(other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
|
msg = _('Message objects do not support str() because they may '
|
||||||
|
'contain non-ascii characters. '
|
||||||
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_languages(domain):
|
||||||
|
"""Lists the available languages for the given translation domain.
|
||||||
|
|
||||||
|
:param domain: the domain to get languages for
|
||||||
|
"""
|
||||||
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
find = lambda x: gettext.find(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
languages=[x])
|
||||||
|
|
||||||
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
|
# order matters) since our in-line message strings are en_US
|
||||||
|
language_list = ['en_US']
|
||||||
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
|
getattr(localedata, 'locale_identifiers'))
|
||||||
|
locale_identifiers = list_identifiers()
|
||||||
|
for i in locale_identifiers:
|
||||||
|
if find(i) is not None:
|
||||||
|
language_list.append(i)
|
||||||
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
|
def translate(obj, desired_locale=None):
|
||||||
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
|
If the object is not translatable it is returned as-is.
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
|
if isinstance(message, Message):
|
||||||
|
# Even after unicoding() we still need to check if we are
|
||||||
|
# running with translatable unicode before translating
|
||||||
|
return message.translate(desired_locale)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_args(args, desired_locale=None):
|
||||||
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
|
This method is used for translating the translatable values in method
|
||||||
|
arguments which include values of tuples or dictionaries.
|
||||||
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, locale=None, target=None):
|
||||||
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
|
:param locale: locale to use for translating messages
|
||||||
|
:param target: logging.Handler object to forward
|
||||||
|
LogRecord objects to after translation
|
||||||
|
"""
|
||||||
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
|
self.locale = locale
|
||||||
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
# We save the message from the original record to restore it
|
||||||
|
# after translation, so other handlers are not affected by this
|
||||||
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
|
self.target.emit(record)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -24,7 +22,7 @@ import traceback
|
|||||||
|
|
||||||
|
|
||||||
def import_class(import_str):
|
def import_class(import_str):
|
||||||
"""Returns a class from a string including module and class"""
|
"""Returns a class from a string including module and class."""
|
||||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
try:
|
try:
|
||||||
__import__(mod_str)
|
__import__(mod_str)
|
||||||
@ -41,8 +39,9 @@ def import_object(import_str, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||||
"""
|
"""Tries to import object from default namespace.
|
||||||
Import a class and return an instance of it, first by trying
|
|
||||||
|
Imports a class and return an instance of it, first by trying
|
||||||
to find the class in a default namespace, then failing back to
|
to find the class in a default namespace, then failing back to
|
||||||
a full path if not found in the default namespace.
|
a full path if not found in the default namespace.
|
||||||
"""
|
"""
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -38,10 +36,33 @@ import functools
|
|||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import xmlrpclib
|
try:
|
||||||
|
import xmlrpclib
|
||||||
|
except ImportError:
|
||||||
|
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
||||||
|
# however the function and object call signatures
|
||||||
|
# remained the same. This whole try/except block should
|
||||||
|
# be removed and replaced with a call to six.moves once
|
||||||
|
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from designate.openstack.common import gettextutils
|
||||||
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import timeutils
|
from designate.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
|
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||||
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
|
inspect.isabstract]
|
||||||
|
|
||||||
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
level=0, max_depth=3):
|
level=0, max_depth=3):
|
||||||
@ -58,19 +79,32 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
Therefore, convert_instances=True is lossy ... be aware.
|
Therefore, convert_instances=True is lossy ... be aware.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
# handle obvious types first - order of basic types determined by running
|
||||||
inspect.isfunction, inspect.isgeneratorfunction,
|
# full tests on nova project, resulting in the following counts:
|
||||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
# 572754 <type 'NoneType'>
|
||||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
# 460353 <type 'int'>
|
||||||
inspect.isabstract]
|
# 379632 <type 'unicode'>
|
||||||
for test in nasty:
|
# 274610 <type 'str'>
|
||||||
if test(value):
|
# 199918 <type 'dict'>
|
||||||
return unicode(value)
|
# 114200 <type 'datetime.datetime'>
|
||||||
|
# 51817 <type 'bool'>
|
||||||
|
# 26164 <type 'list'>
|
||||||
|
# 6491 <type 'float'>
|
||||||
|
# 283 <type 'tuple'>
|
||||||
|
# 19 <type 'long'>
|
||||||
|
if isinstance(value, _simple_types):
|
||||||
|
return value
|
||||||
|
|
||||||
# value of itertools.count doesn't get caught by inspects
|
if isinstance(value, datetime.datetime):
|
||||||
# above and results in infinite loop when list(value) is called.
|
if convert_datetime:
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||||
|
# and results in infinite loop when list(value) is called.
|
||||||
if type(value) == itertools.count:
|
if type(value) == itertools.count:
|
||||||
return unicode(value)
|
return six.text_type(value)
|
||||||
|
|
||||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||||
# tests that raise an exception in a mocked method that
|
# tests that raise an exception in a mocked method that
|
||||||
@ -91,18 +125,21 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
convert_datetime=convert_datetime,
|
convert_datetime=convert_datetime,
|
||||||
level=level,
|
level=level,
|
||||||
max_depth=max_depth)
|
max_depth=max_depth)
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
|
elif isinstance(value, (list, tuple)):
|
||||||
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
# for our purposes, make it a datetime type which is explicitly
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
# handled
|
# handled
|
||||||
if isinstance(value, xmlrpclib.DateTime):
|
if isinstance(value, xmlrpclib.DateTime):
|
||||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
if isinstance(value, (list, tuple)):
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
return [recursive(v) for v in value]
|
|
||||||
elif isinstance(value, dict):
|
|
||||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
|
||||||
elif convert_datetime and isinstance(value, datetime.datetime):
|
|
||||||
return timeutils.strtime(value)
|
return timeutils.strtime(value)
|
||||||
|
elif isinstance(value, gettextutils.Message):
|
||||||
|
return value.data
|
||||||
elif hasattr(value, 'iteritems'):
|
elif hasattr(value, 'iteritems'):
|
||||||
return recursive(dict(value.iteritems()), level=level + 1)
|
return recursive(dict(value.iteritems()), level=level + 1)
|
||||||
elif hasattr(value, '__iter__'):
|
elif hasattr(value, '__iter__'):
|
||||||
@ -111,12 +148,16 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
# Likely an instance of something. Watch for cycles.
|
# Likely an instance of something. Watch for cycles.
|
||||||
# Ignore class member vars.
|
# Ignore class member vars.
|
||||||
return recursive(value.__dict__, level=level + 1)
|
return recursive(value.__dict__, level=level + 1)
|
||||||
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
|
return six.text_type(value)
|
||||||
else:
|
else:
|
||||||
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
return six.text_type(value)
|
||||||
return value
|
return value
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# Class objects are tricky since they may define something like
|
# Class objects are tricky since they may define something like
|
||||||
# __iter__ defined but it isn't callable as list().
|
# __iter__ defined but it isn't callable as list().
|
||||||
return unicode(value)
|
return six.text_type(value)
|
||||||
|
|
||||||
|
|
||||||
def dumps(value, default=to_primitive, **kwargs):
|
def dumps(value, default=to_primitive, **kwargs):
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -15,16 +13,15 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Greenthread local storage of variables using weak references"""
|
"""Local storage of variables using weak references"""
|
||||||
|
|
||||||
|
import threading
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
from eventlet import corolocal
|
|
||||||
|
|
||||||
|
class WeakLocal(threading.local):
|
||||||
class WeakLocal(corolocal.local):
|
|
||||||
def __getattribute__(self, attr):
|
def __getattribute__(self, attr):
|
||||||
rval = corolocal.local.__getattribute__(self, attr)
|
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||||
if rval:
|
if rval:
|
||||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||||
# reference, not the value itself. We therefore need to lookup
|
# reference, not the value itself. We therefore need to lookup
|
||||||
@ -34,7 +31,7 @@ class WeakLocal(corolocal.local):
|
|||||||
|
|
||||||
def __setattr__(self, attr, value):
|
def __setattr__(self, attr, value):
|
||||||
value = weakref.ref(value)
|
value = weakref.ref(value)
|
||||||
return corolocal.local.__setattr__(self, attr, value)
|
return super(WeakLocal, self).__setattr__(attr, value)
|
||||||
|
|
||||||
|
|
||||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||||
@ -45,4 +42,4 @@ store = WeakLocal()
|
|||||||
# "strong" store will hold a reference to the object so that it never falls out
|
# "strong" store will hold a reference to the object so that it never falls out
|
||||||
# of scope.
|
# of scope.
|
||||||
weak_store = WeakLocal()
|
weak_store = WeakLocal()
|
||||||
strong_store = corolocal.local
|
strong_store = threading.local()
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -31,7 +29,7 @@ import weakref
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate.openstack.common import fileutils
|
from designate.openstack.common import fileutils
|
||||||
from designate.openstack.common.gettextutils import _ # noqa
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import local
|
from designate.openstack.common import local
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
@ -140,25 +138,46 @@ _semaphores_lock = threading.Lock()
|
|||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||||
"""Context based lock
|
with internal_lock(name):
|
||||||
|
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
This function yields a `threading.Semaphore` instance (if we don't use
|
# We need a copy of lock_path because it is non-local
|
||||||
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
True, in which case, it'll yield an InterProcessLock instance.
|
if not local_lock_path:
|
||||||
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
|
||||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
if not os.path.exists(local_lock_path):
|
||||||
lock files on disk with a meaningful prefix.
|
fileutils.ensure_tree(local_lock_path)
|
||||||
|
LOG.info(_('Created lock path: %s'), local_lock_path)
|
||||||
|
|
||||||
:param external: The external keyword argument denotes whether this lock
|
def add_prefix(name, prefix):
|
||||||
should work across multiple processes. This means that if two different
|
if not prefix:
|
||||||
workers both run a a method decorated with @synchronized('mylock',
|
return name
|
||||||
external=True), only one of them will execute at a time.
|
sep = '' if prefix.endswith('-') else '-'
|
||||||
|
return '%s%s%s' % (prefix, sep, name)
|
||||||
|
|
||||||
:param lock_path: The lock_path keyword argument is used to specify a
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
special location for external lock files to live. If nothing is set, then
|
# separators
|
||||||
CONF.lock_path is used as a default.
|
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
||||||
"""
|
lock_file_prefix)
|
||||||
|
|
||||||
|
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
lock = InterProcessLock(lock_file_path)
|
||||||
|
with lock as lock:
|
||||||
|
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
yield lock
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def internal_lock(name):
|
||||||
with _semaphores_lock:
|
with _semaphores_lock:
|
||||||
try:
|
try:
|
||||||
sem = _semaphores[name]
|
sem = _semaphores[name]
|
||||||
@ -175,48 +194,39 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
|||||||
local.strong_store.locks_held.append(name)
|
local.strong_store.locks_held.append(name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if external and not CONF.disable_process_locking:
|
yield sem
|
||||||
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
|
||||||
{'lock': name})
|
|
||||||
|
|
||||||
# We need a copy of lock_path because it is non-local
|
|
||||||
local_lock_path = lock_path or CONF.lock_path
|
|
||||||
if not local_lock_path:
|
|
||||||
raise cfg.RequiredOptError('lock_path')
|
|
||||||
|
|
||||||
if not os.path.exists(local_lock_path):
|
|
||||||
fileutils.ensure_tree(local_lock_path)
|
|
||||||
LOG.info(_('Created lock path: %s'), local_lock_path)
|
|
||||||
|
|
||||||
def add_prefix(name, prefix):
|
|
||||||
if not prefix:
|
|
||||||
return name
|
|
||||||
sep = '' if prefix.endswith('-') else '-'
|
|
||||||
return '%s%s%s' % (prefix, sep, name)
|
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
|
||||||
# separators
|
|
||||||
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
|
||||||
lock_file_prefix)
|
|
||||||
|
|
||||||
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
lock = InterProcessLock(lock_file_path)
|
|
||||||
with lock as lock:
|
|
||||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
yield lock
|
|
||||||
finally:
|
|
||||||
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
else:
|
|
||||||
yield sem
|
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
local.strong_store.locks_held.remove(name)
|
local.strong_store.locks_held.remove(name)
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Context based lock
|
||||||
|
|
||||||
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
|
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||||
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
|
:param external: The external keyword argument denotes whether this lock
|
||||||
|
should work across multiple processes. This means that if two different
|
||||||
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
|
:param lock_path: The lock_path keyword argument is used to specify a
|
||||||
|
special location for external lock files to live. If nothing is set, then
|
||||||
|
CONF.lock_path is used as a default.
|
||||||
|
"""
|
||||||
|
if external and not CONF.disable_process_locking:
|
||||||
|
with external_lock(name, lock_file_prefix, lock_path) as lock:
|
||||||
|
yield lock
|
||||||
|
else:
|
||||||
|
with internal_lock(name) as lock:
|
||||||
|
yield lock
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
"""Synchronization decorator.
|
"""Synchronization decorator.
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@ -29,28 +27,46 @@ It also allows setting of formatting information through conf.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import cStringIO
|
|
||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
import stat
|
import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import jsonutils
|
from designate.openstack.common import jsonutils
|
||||||
from designate.openstack.common import local
|
from designate.openstack.common import local
|
||||||
from designate.openstack.common import notifier
|
|
||||||
|
|
||||||
|
|
||||||
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
|
|
||||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS = []
|
||||||
|
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
common_cli_opts = [
|
common_cli_opts = [
|
||||||
cfg.BoolOpt('debug',
|
cfg.BoolOpt('debug',
|
||||||
short='d',
|
short='d',
|
||||||
@ -65,19 +81,24 @@ common_cli_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
logging_cli_opts = [
|
logging_cli_opts = [
|
||||||
cfg.StrOpt('log-config',
|
cfg.StrOpt('log-config-append',
|
||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
help='If this option is specified, the logging configuration '
|
deprecated_name='log-config',
|
||||||
'file specified is used and overrides any other logging '
|
help='The name of logging configuration file. It does not '
|
||||||
'options specified. Please see the Python logging module '
|
'disable existing loggers, but just appends specified '
|
||||||
|
'logging configuration to any other existing logging '
|
||||||
|
'options. Please see the Python logging module '
|
||||||
'documentation for details on logging configuration '
|
'documentation for details on logging configuration '
|
||||||
'files.'),
|
'files.'),
|
||||||
cfg.StrOpt('log-format',
|
cfg.StrOpt('log-format',
|
||||||
default=_DEFAULT_LOG_FORMAT,
|
default=None,
|
||||||
metavar='FORMAT',
|
metavar='FORMAT',
|
||||||
help='A logging.Formatter log message format string which may '
|
help='DEPRECATED. '
|
||||||
|
'A logging.Formatter log message format string which may '
|
||||||
'use any of the available logging.LogRecord attributes. '
|
'use any of the available logging.LogRecord attributes. '
|
||||||
'Default: %(default)s'),
|
'This option is deprecated. Please use '
|
||||||
|
'logging_context_format_string and '
|
||||||
|
'logging_default_format_string instead.'),
|
||||||
cfg.StrOpt('log-date-format',
|
cfg.StrOpt('log-date-format',
|
||||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||||
metavar='DATE_FORMAT',
|
metavar='DATE_FORMAT',
|
||||||
@ -87,11 +108,11 @@ logging_cli_opts = [
|
|||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
deprecated_name='logfile',
|
deprecated_name='logfile',
|
||||||
help='(Optional) Name of log file to output to. '
|
help='(Optional) Name of log file to output to. '
|
||||||
'If not set, logging will go to stdout.'),
|
'If no default is set, logging will go to stdout.'),
|
||||||
cfg.StrOpt('log-dir',
|
cfg.StrOpt('log-dir',
|
||||||
deprecated_name='logdir',
|
deprecated_name='logdir',
|
||||||
help='(Optional) The directory to keep log files in '
|
help='(Optional) The base directory used for relative '
|
||||||
'(will be prepended to --log-file)'),
|
'--log-file paths'),
|
||||||
cfg.BoolOpt('use-syslog',
|
cfg.BoolOpt('use-syslog',
|
||||||
default=False,
|
default=False,
|
||||||
help='Use syslog for logging.'),
|
help='Use syslog for logging.'),
|
||||||
@ -103,17 +124,14 @@ logging_cli_opts = [
|
|||||||
generic_log_opts = [
|
generic_log_opts = [
|
||||||
cfg.BoolOpt('use_stderr',
|
cfg.BoolOpt('use_stderr',
|
||||||
default=True,
|
default=True,
|
||||||
help='Log output to standard error'),
|
help='Log output to standard error')
|
||||||
cfg.StrOpt('logfile_mode',
|
|
||||||
default='0644',
|
|
||||||
help='Default file mode used when creating log files'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
log_opts = [
|
log_opts = [
|
||||||
cfg.StrOpt('logging_context_format_string',
|
cfg.StrOpt('logging_context_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
'%(message)s',
|
'%(instance)s%(message)s',
|
||||||
help='format string to use for log messages with context'),
|
help='format string to use for log messages with context'),
|
||||||
cfg.StrOpt('logging_default_format_string',
|
cfg.StrOpt('logging_default_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
@ -128,12 +146,13 @@ log_opts = [
|
|||||||
help='prefix each line of exception output with this format'),
|
help='prefix each line of exception output with this format'),
|
||||||
cfg.ListOpt('default_log_levels',
|
cfg.ListOpt('default_log_levels',
|
||||||
default=[
|
default=[
|
||||||
|
'amqp=WARN',
|
||||||
'amqplib=WARN',
|
'amqplib=WARN',
|
||||||
'sqlalchemy=WARN',
|
|
||||||
'boto=WARN',
|
'boto=WARN',
|
||||||
|
'qpid=WARN',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
'suds=INFO',
|
'suds=INFO',
|
||||||
'keystone=INFO',
|
'iso8601=WARN',
|
||||||
'eventlet.wsgi.server=WARN'
|
|
||||||
],
|
],
|
||||||
help='list of logger=LEVEL pairs'),
|
help='list of logger=LEVEL pairs'),
|
||||||
cfg.BoolOpt('publish_errors',
|
cfg.BoolOpt('publish_errors',
|
||||||
@ -209,8 +228,64 @@ def _get_log_file_path(binary=None):
|
|||||||
binary = binary or _get_binary_name()
|
binary = binary or _get_binary_name()
|
||||||
return '%s.log' % (os.path.join(logdir, binary),)
|
return '%s.log' % (os.path.join(logdir, binary),)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
class ContextAdapter(logging.LoggerAdapter):
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
secret = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS:
|
||||||
|
message = re.sub(pattern, secret, message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
|
def audit(self, msg, *args, **kwargs):
|
||||||
|
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyAdapter(BaseLoggerAdapter):
|
||||||
|
def __init__(self, name='unknown', version='unknown'):
|
||||||
|
self._logger = None
|
||||||
|
self.extra = {}
|
||||||
|
self.name = name
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if not self._logger:
|
||||||
|
self._logger = getLogger(self.name, self.version)
|
||||||
|
return self._logger
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAdapter(BaseLoggerAdapter):
|
||||||
warn = logging.LoggerAdapter.warning
|
warn = logging.LoggerAdapter.warning
|
||||||
|
|
||||||
def __init__(self, logger, project_name, version_string):
|
def __init__(self, logger, project_name, version_string):
|
||||||
@ -218,8 +293,9 @@ class ContextAdapter(logging.LoggerAdapter):
|
|||||||
self.project = project_name
|
self.project = project_name
|
||||||
self.version = version_string
|
self.version = version_string
|
||||||
|
|
||||||
def audit(self, msg, *args, **kwargs):
|
@property
|
||||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
def handlers(self):
|
||||||
|
return self.logger.handlers
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
@ -230,6 +306,13 @@ class ContextAdapter(logging.LoggerAdapter):
|
|||||||
self.warn(stdmsg, *args, **kwargs)
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
def process(self, msg, kwargs):
|
||||||
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
|
# coerce to unicode before they can get
|
||||||
|
# to the python logging and possibly
|
||||||
|
# cause string encoding trouble
|
||||||
|
if not isinstance(msg, six.string_types):
|
||||||
|
msg = six.text_type(msg)
|
||||||
|
|
||||||
if 'extra' not in kwargs:
|
if 'extra' not in kwargs:
|
||||||
kwargs['extra'] = {}
|
kwargs['extra'] = {}
|
||||||
extra = kwargs['extra']
|
extra = kwargs['extra']
|
||||||
@ -241,18 +324,20 @@ class ContextAdapter(logging.LoggerAdapter):
|
|||||||
extra.update(_dictify_context(context))
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
instance = kwargs.pop('instance', None)
|
||||||
|
instance_uuid = (extra.get('instance_uuid', None) or
|
||||||
|
kwargs.pop('instance_uuid', None))
|
||||||
instance_extra = ''
|
instance_extra = ''
|
||||||
if instance:
|
if instance:
|
||||||
instance_extra = CONF.instance_format % instance
|
instance_extra = CONF.instance_format % instance
|
||||||
else:
|
elif instance_uuid:
|
||||||
instance_uuid = kwargs.pop('instance_uuid', None)
|
instance_extra = (CONF.instance_uuid_format
|
||||||
if instance_uuid:
|
% {'uuid': instance_uuid})
|
||||||
instance_extra = (CONF.instance_uuid_format
|
extra['instance'] = instance_extra
|
||||||
% {'uuid': instance_uuid})
|
|
||||||
extra.update({'instance': instance_extra})
|
|
||||||
|
|
||||||
extra.update({"project": self.project})
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
extra.update({"version": self.version})
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
extra['extra'] = extra.copy()
|
extra['extra'] = extra.copy()
|
||||||
return msg, kwargs
|
return msg, kwargs
|
||||||
|
|
||||||
@ -266,7 +351,7 @@ class JSONFormatter(logging.Formatter):
|
|||||||
def formatException(self, ei, strip_newlines=True):
|
def formatException(self, ei, strip_newlines=True):
|
||||||
lines = traceback.format_exception(*ei)
|
lines = traceback.format_exception(*ei)
|
||||||
if strip_newlines:
|
if strip_newlines:
|
||||||
lines = [itertools.ifilter(
|
lines = [moves.filter(
|
||||||
lambda x: x,
|
lambda x: x,
|
||||||
line.rstrip().splitlines()) for line in lines]
|
line.rstrip().splitlines()) for line in lines]
|
||||||
lines = list(itertools.chain(*lines))
|
lines = list(itertools.chain(*lines))
|
||||||
@ -303,30 +388,40 @@ class JSONFormatter(logging.Formatter):
|
|||||||
return jsonutils.dumps(message)
|
return jsonutils.dumps(message)
|
||||||
|
|
||||||
|
|
||||||
class PublishErrorsHandler(logging.Handler):
|
|
||||||
def emit(self, record):
|
|
||||||
if ('designate.openstack.common.notifier.log_notifier' in
|
|
||||||
CONF.notification_driver):
|
|
||||||
return
|
|
||||||
notifier.api.notify(None, 'error.publisher',
|
|
||||||
'error_notification',
|
|
||||||
notifier.api.ERROR,
|
|
||||||
dict(error=record.msg))
|
|
||||||
|
|
||||||
|
|
||||||
def _create_logging_excepthook(product_name):
|
def _create_logging_excepthook(product_name):
|
||||||
def logging_excepthook(type, value, tb):
|
def logging_excepthook(exc_type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if CONF.verbose:
|
if CONF.verbose or CONF.debug:
|
||||||
extra['exc_info'] = (type, value, tb)
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
getLogger(product_name).critical(str(value), **extra)
|
getLogger(product_name).critical(str(value), **extra)
|
||||||
return logging_excepthook
|
return logging_excepthook
|
||||||
|
|
||||||
|
|
||||||
|
class LogConfigError(Exception):
|
||||||
|
|
||||||
|
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||||
|
|
||||||
|
def __init__(self, log_config, err_msg):
|
||||||
|
self.log_config = log_config
|
||||||
|
self.err_msg = err_msg
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message % dict(log_config=self.log_config,
|
||||||
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_log_config(log_config_append):
|
||||||
|
try:
|
||||||
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
|
except moves.configparser.Error as exc:
|
||||||
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name):
|
def setup(product_name):
|
||||||
"""Setup logging."""
|
"""Setup logging."""
|
||||||
if CONF.log_config:
|
if CONF.log_config_append:
|
||||||
logging.config.fileConfig(CONF.log_config)
|
_load_log_config(CONF.log_config_append)
|
||||||
else:
|
else:
|
||||||
_setup_logging_from_conf()
|
_setup_logging_from_conf()
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
@ -378,31 +473,33 @@ def _setup_logging_from_conf():
|
|||||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||||
log_root.addHandler(filelog)
|
log_root.addHandler(filelog)
|
||||||
|
|
||||||
mode = int(CONF.logfile_mode, 8)
|
|
||||||
st = os.stat(logpath)
|
|
||||||
if st.st_mode != (stat.S_IFREG | mode):
|
|
||||||
os.chmod(logpath, mode)
|
|
||||||
|
|
||||||
if CONF.use_stderr:
|
if CONF.use_stderr:
|
||||||
streamlog = ColorHandler()
|
streamlog = ColorHandler()
|
||||||
log_root.addHandler(streamlog)
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
elif not CONF.log_file:
|
elif not logpath:
|
||||||
# pass sys.stdout as a positional argument
|
# pass sys.stdout as a positional argument
|
||||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
streamlog = logging.StreamHandler(sys.stdout)
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
log_root.addHandler(streamlog)
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
if CONF.publish_errors:
|
if CONF.publish_errors:
|
||||||
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
|
handler = importutils.import_object(
|
||||||
|
"designate.openstack.common.log_handler.PublishErrorsHandler",
|
||||||
|
logging.ERROR)
|
||||||
|
log_root.addHandler(handler)
|
||||||
|
|
||||||
|
datefmt = CONF.log_date_format
|
||||||
for handler in log_root.handlers:
|
for handler in log_root.handlers:
|
||||||
datefmt = CONF.log_date_format
|
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||||
|
# should be deprecated in favor of context aware formatting.
|
||||||
if CONF.log_format:
|
if CONF.log_format:
|
||||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||||
datefmt=datefmt))
|
datefmt=datefmt))
|
||||||
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
|
'be removed in the next release')
|
||||||
else:
|
else:
|
||||||
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
|
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||||
|
|
||||||
if CONF.debug:
|
if CONF.debug:
|
||||||
log_root.setLevel(logging.DEBUG)
|
log_root.setLevel(logging.DEBUG)
|
||||||
@ -411,14 +508,11 @@ def _setup_logging_from_conf():
|
|||||||
else:
|
else:
|
||||||
log_root.setLevel(logging.WARNING)
|
log_root.setLevel(logging.WARNING)
|
||||||
|
|
||||||
level = logging.NOTSET
|
|
||||||
for pair in CONF.default_log_levels:
|
for pair in CONF.default_log_levels:
|
||||||
mod, _sep, level_name = pair.partition('=')
|
mod, _sep, level_name = pair.partition('=')
|
||||||
level = logging.getLevelName(level_name)
|
level = logging.getLevelName(level_name)
|
||||||
logger = logging.getLogger(mod)
|
logger = logging.getLogger(mod)
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
for handler in log_root.handlers:
|
|
||||||
logger.addHandler(handler)
|
|
||||||
|
|
||||||
_loggers = {}
|
_loggers = {}
|
||||||
|
|
||||||
@ -431,6 +525,16 @@ def getLogger(name='unknown', version='unknown'):
|
|||||||
return _loggers[name]
|
return _loggers[name]
|
||||||
|
|
||||||
|
|
||||||
|
def getLazyLogger(name='unknown', version='unknown'):
|
||||||
|
"""Returns lazy logger.
|
||||||
|
|
||||||
|
Creates a pass-through logger that does not create the real logger
|
||||||
|
until it is really needed and delegates all calls to the real logger
|
||||||
|
once it is created.
|
||||||
|
"""
|
||||||
|
return LazyAdapter(name, version)
|
||||||
|
|
||||||
|
|
||||||
class WritableLogger(object):
|
class WritableLogger(object):
|
||||||
"""A thin wrapper that responds to `write` and logs."""
|
"""A thin wrapper that responds to `write` and logs."""
|
||||||
|
|
||||||
@ -442,7 +546,7 @@ class WritableLogger(object):
|
|||||||
self.logger.log(self.level, msg)
|
self.logger.log(self.level, msg)
|
||||||
|
|
||||||
|
|
||||||
class LegacyFormatter(logging.Formatter):
|
class ContextFormatter(logging.Formatter):
|
||||||
"""A context.RequestContext aware formatter configured through flags.
|
"""A context.RequestContext aware formatter configured through flags.
|
||||||
|
|
||||||
The flags used to set format strings are: logging_context_format_string
|
The flags used to set format strings are: logging_context_format_string
|
||||||
@ -483,7 +587,7 @@ class LegacyFormatter(logging.Formatter):
|
|||||||
if not record:
|
if not record:
|
||||||
return logging.Formatter.formatException(self, exc_info)
|
return logging.Formatter.formatException(self, exc_info)
|
||||||
|
|
||||||
stringbuffer = cStringIO.StringIO()
|
stringbuffer = moves.StringIO()
|
||||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||||
None, stringbuffer)
|
None, stringbuffer)
|
||||||
lines = stringbuffer.getvalue().split('\n')
|
lines = stringbuffer.getvalue().split('\n')
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -46,12 +44,23 @@ class LoopingCallDone(Exception):
|
|||||||
self.retvalue = retvalue
|
self.retvalue = retvalue
|
||||||
|
|
||||||
|
|
||||||
class LoopingCall(object):
|
class LoopingCallBase(object):
|
||||||
def __init__(self, f=None, *args, **kw):
|
def __init__(self, f=None, *args, **kw):
|
||||||
self.args = args
|
self.args = args
|
||||||
self.kw = kw
|
self.kw = kw
|
||||||
self.f = f
|
self.f = f
|
||||||
self._running = False
|
self._running = False
|
||||||
|
self.done = None
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self.done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||||
|
"""A fixed interval looping call."""
|
||||||
|
|
||||||
def start(self, interval, initial_delay=None):
|
def start(self, interval, initial_delay=None):
|
||||||
self._running = True
|
self._running = True
|
||||||
@ -73,11 +82,11 @@ class LoopingCall(object):
|
|||||||
LOG.warn(_('task run outlasted interval by %s sec') %
|
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||||
-delay)
|
-delay)
|
||||||
greenthread.sleep(delay if delay > 0 else 0)
|
greenthread.sleep(delay if delay > 0 else 0)
|
||||||
except LoopingCallDone, e:
|
except LoopingCallDone as e:
|
||||||
self.stop()
|
self.stop()
|
||||||
done.send(e.retvalue)
|
done.send(e.retvalue)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_('in looping call'))
|
LOG.exception(_('in fixed duration looping call'))
|
||||||
done.send_exception(*sys.exc_info())
|
done.send_exception(*sys.exc_info())
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -88,8 +97,49 @@ class LoopingCall(object):
|
|||||||
greenthread.spawn_n(_inner)
|
greenthread.spawn_n(_inner)
|
||||||
return self.done
|
return self.done
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
self._running = False
|
|
||||||
|
|
||||||
def wait(self):
|
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||||
return self.done.wait()
|
# in the I release
|
||||||
|
LoopingCall = FixedIntervalLoopingCall
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicLoopingCall(LoopingCallBase):
|
||||||
|
"""A looping call which sleeps until the next known event.
|
||||||
|
|
||||||
|
The function called should return how long to sleep for before being
|
||||||
|
called again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||||
|
self._running = True
|
||||||
|
done = event.Event()
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
if initial_delay:
|
||||||
|
greenthread.sleep(initial_delay)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self._running:
|
||||||
|
idle = self.f(*self.args, **self.kw)
|
||||||
|
if not self._running:
|
||||||
|
break
|
||||||
|
|
||||||
|
if periodic_interval_max is not None:
|
||||||
|
idle = min(idle, periodic_interval_max)
|
||||||
|
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||||
|
'seconds'), idle)
|
||||||
|
greenthread.sleep(idle)
|
||||||
|
except LoopingCallDone as e:
|
||||||
|
self.stop()
|
||||||
|
done.send(e.retvalue)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_('in dynamic looping call'))
|
||||||
|
done.send_exception(*sys.exc_info())
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
done.send(True)
|
||||||
|
|
||||||
|
self.done = done
|
||||||
|
|
||||||
|
greenthread.spawn(_inner)
|
||||||
|
return self.done
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
# Copyright 2012 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -19,14 +17,12 @@
|
|||||||
Network-related utilities and helper functions.
|
Network-related utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
from designate.openstack.common.py3kcompat import urlutils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_host_port(address, default_port=None):
|
def parse_host_port(address, default_port=None):
|
||||||
"""
|
"""Interpret a string as a host:port pair.
|
||||||
Interpret a string as a host:port pair.
|
|
||||||
An IPv6 address MUST be escaped if accompanied by a port,
|
An IPv6 address MUST be escaped if accompanied by a port,
|
||||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||||
@ -66,3 +62,18 @@ def parse_host_port(address, default_port=None):
|
|||||||
port = default_port
|
port = default_port
|
||||||
|
|
||||||
return (host, None if port is None else int(port))
|
return (host, None if port is None else int(port))
|
||||||
|
|
||||||
|
|
||||||
|
def urlsplit(url, scheme='', allow_fragments=True):
|
||||||
|
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||||
|
This function papers over Python issue9374 when needed.
|
||||||
|
|
||||||
|
The parameters are the same as urlparse.urlsplit.
|
||||||
|
"""
|
||||||
|
scheme, netloc, path, query, fragment = urlutils.urlsplit(
|
||||||
|
url, scheme, allow_fragments)
|
||||||
|
if allow_fragments and '#' in path:
|
||||||
|
path, fragment = path.split('#', 1)
|
||||||
|
if '?' in path:
|
||||||
|
path, query = path.split('?', 1)
|
||||||
|
return urlutils.SplitResult(scheme, netloc, path, query, fragment)
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -13,6 +13,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import socket
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
@ -30,13 +31,12 @@ LOG = logging.getLogger(__name__)
|
|||||||
notifier_opts = [
|
notifier_opts = [
|
||||||
cfg.MultiStrOpt('notification_driver',
|
cfg.MultiStrOpt('notification_driver',
|
||||||
default=[],
|
default=[],
|
||||||
deprecated_name='list_notifier_drivers',
|
|
||||||
help='Driver or drivers to handle sending notifications'),
|
help='Driver or drivers to handle sending notifications'),
|
||||||
cfg.StrOpt('default_notification_level',
|
cfg.StrOpt('default_notification_level',
|
||||||
default='INFO',
|
default='INFO',
|
||||||
help='Default notification level for outgoing notifications'),
|
help='Default notification level for outgoing notifications'),
|
||||||
cfg.StrOpt('default_publisher_id',
|
cfg.StrOpt('default_publisher_id',
|
||||||
default='$host',
|
default=None,
|
||||||
help='Default publisher_id for outgoing notifications'),
|
help='Default publisher_id for outgoing notifications'),
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ class BadPriorityException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def notify_decorator(name, fn):
|
def notify_decorator(name, fn):
|
||||||
""" decorator for notify which is used from utils.monkey_patch()
|
"""Decorator for notify which is used from utils.monkey_patch().
|
||||||
|
|
||||||
:param name: name of the function
|
:param name: name of the function
|
||||||
:param function: - object of the function
|
:param function: - object of the function
|
||||||
@ -75,7 +75,7 @@ def notify_decorator(name, fn):
|
|||||||
|
|
||||||
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
||||||
notify(ctxt,
|
notify(ctxt,
|
||||||
CONF.default_publisher_id,
|
CONF.default_publisher_id or socket.gethostname(),
|
||||||
name,
|
name,
|
||||||
CONF.default_notification_level,
|
CONF.default_notification_level,
|
||||||
body)
|
body)
|
||||||
@ -85,7 +85,10 @@ def notify_decorator(name, fn):
|
|||||||
|
|
||||||
def publisher_id(service, host=None):
|
def publisher_id(service, host=None):
|
||||||
if not host:
|
if not host:
|
||||||
host = CONF.host
|
try:
|
||||||
|
host = CONF.host
|
||||||
|
except AttributeError:
|
||||||
|
host = CONF.default_publisher_id or socket.gethostname()
|
||||||
return "%s.%s" % (service, host)
|
return "%s.%s" % (service, host)
|
||||||
|
|
||||||
|
|
||||||
@ -154,29 +157,16 @@ def _get_drivers():
|
|||||||
if _drivers is None:
|
if _drivers is None:
|
||||||
_drivers = {}
|
_drivers = {}
|
||||||
for notification_driver in CONF.notification_driver:
|
for notification_driver in CONF.notification_driver:
|
||||||
add_driver(notification_driver)
|
try:
|
||||||
|
driver = importutils.import_module(notification_driver)
|
||||||
|
_drivers[notification_driver] = driver
|
||||||
|
except ImportError:
|
||||||
|
LOG.exception(_("Failed to load notifier %s. "
|
||||||
|
"These notifications will not be sent.") %
|
||||||
|
notification_driver)
|
||||||
return _drivers.values()
|
return _drivers.values()
|
||||||
|
|
||||||
|
|
||||||
def add_driver(notification_driver):
|
|
||||||
"""Add a notification driver at runtime."""
|
|
||||||
# Make sure the driver list is initialized.
|
|
||||||
_get_drivers()
|
|
||||||
if isinstance(notification_driver, basestring):
|
|
||||||
# Load and add
|
|
||||||
try:
|
|
||||||
driver = importutils.import_module(notification_driver)
|
|
||||||
_drivers[notification_driver] = driver
|
|
||||||
except ImportError:
|
|
||||||
LOG.exception(_("Failed to load notifier %s. "
|
|
||||||
"These notifications will not be sent.") %
|
|
||||||
notification_driver)
|
|
||||||
else:
|
|
||||||
# Driver is already loaded; just add the object.
|
|
||||||
_drivers[notification_driver] = notification_driver
|
|
||||||
|
|
||||||
|
|
||||||
def _reset_drivers():
|
def _reset_drivers():
|
||||||
"""Used by unit tests to reset the drivers."""
|
"""Used by unit tests to reset the drivers."""
|
||||||
global _drivers
|
global _drivers
|
||||||
|
@ -24,7 +24,9 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
def notify(_context, message):
|
def notify(_context, message):
|
||||||
"""Notifies the recipient of the desired event given the model.
|
"""Notifies the recipient of the desired event given the model.
|
||||||
Log notifications using openstack's default logging system"""
|
|
||||||
|
Log notifications using OpenStack's default logging system.
|
||||||
|
"""
|
||||||
|
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
CONF.default_notification_level)
|
CONF.default_notification_level)
|
||||||
|
@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
def notify(_context, message):
|
||||||
"""Notifies the recipient of the desired event given the model"""
|
"""Notifies the recipient of the desired event given the model."""
|
||||||
pass
|
pass
|
||||||
|
79
designate/openstack/common/notifier/proxy.py
Normal file
79
designate/openstack/common/notifier/proxy.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
A temporary helper which emulates designate.messaging.Notifier.
|
||||||
|
|
||||||
|
This helper method allows us to do the tedious porting to the new Notifier API
|
||||||
|
as a standalone commit so that the commit which switches us to designate.messaging
|
||||||
|
is smaller and easier to review. This file will be removed as part of that
|
||||||
|
commit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from designate.openstack.common.notifier import api as notifier_api
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class Notifier(object):
|
||||||
|
|
||||||
|
def __init__(self, publisher_id):
|
||||||
|
super(Notifier, self).__init__()
|
||||||
|
self.publisher_id = publisher_id
|
||||||
|
|
||||||
|
_marker = object()
|
||||||
|
|
||||||
|
def prepare(self, publisher_id=_marker):
|
||||||
|
ret = self.__class__(self.publisher_id)
|
||||||
|
if publisher_id is not self._marker:
|
||||||
|
ret.publisher_id = publisher_id
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _notify(self, ctxt, event_type, payload, priority):
|
||||||
|
notifier_api.notify(ctxt,
|
||||||
|
self.publisher_id,
|
||||||
|
event_type,
|
||||||
|
priority,
|
||||||
|
payload)
|
||||||
|
|
||||||
|
def audit(self, ctxt, event_type, payload):
|
||||||
|
# No audit in old notifier.
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def debug(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'DEBUG')
|
||||||
|
|
||||||
|
def info(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'INFO')
|
||||||
|
|
||||||
|
def warn(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'WARN')
|
||||||
|
|
||||||
|
warning = warn
|
||||||
|
|
||||||
|
def error(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'ERROR')
|
||||||
|
|
||||||
|
def critical(self, ctxt, event_type, payload):
|
||||||
|
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
||||||
|
|
||||||
|
|
||||||
|
def get_notifier(service=None, host=None, publisher_id=None):
|
||||||
|
if not publisher_id:
|
||||||
|
publisher_id = "%s.%s" % (service, host or CONF.host)
|
||||||
|
return Notifier(publisher_id)
|
@ -24,14 +24,14 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
notification_topic_opt = cfg.ListOpt(
|
||||||
'notification_topics', default=['notifications', ],
|
'notification_topics', default=['notifications', ],
|
||||||
help='AMQP topic used for openstack notifications')
|
help='AMQP topic used for OpenStack notifications')
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opt(notification_topic_opt)
|
CONF.register_opt(notification_topic_opt)
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
def notify(context, message):
|
||||||
"""Sends a notification via RPC"""
|
"""Sends a notification via RPC."""
|
||||||
if not context:
|
if not context:
|
||||||
context = req_context.get_admin_context()
|
context = req_context.get_admin_context()
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
@ -43,4 +43,5 @@ def notify(context, message):
|
|||||||
rpc.notify(context, topic, message)
|
rpc.notify(context, topic, message)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"), locals())
|
"Payload=%(message)s"),
|
||||||
|
{"topic": topic, "message": message})
|
||||||
|
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
notification_topic_opt = cfg.ListOpt(
|
||||||
'topics', default=['notifications', ],
|
'topics', default=['notifications', ],
|
||||||
help='AMQP topic(s) used for openstack notifications')
|
help='AMQP topic(s) used for OpenStack notifications')
|
||||||
|
|
||||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
||||||
title='Options for rpc_notifier2')
|
title='Options for rpc_notifier2')
|
||||||
@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
|
|||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
def notify(context, message):
|
||||||
"""Sends a notification via RPC"""
|
"""Sends a notification via RPC."""
|
||||||
if not context:
|
if not context:
|
||||||
context = req_context.get_admin_context()
|
context = req_context.get_admin_context()
|
||||||
priority = message.get('priority',
|
priority = message.get('priority',
|
||||||
@ -49,4 +49,5 @@ def notify(context, message):
|
|||||||
rpc.notify(context, topic, message, envelope=True)
|
rpc.notify(context, topic, message, envelope=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
LOG.exception(_("Could not send notification to %(topic)s. "
|
||||||
"Payload=%(message)s"), locals())
|
"Payload=%(message)s"),
|
||||||
|
{"topic": topic, "message": message})
|
||||||
|
@ -1,115 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def periodic_task(*args, **kwargs):
|
|
||||||
"""Decorator to indicate that a method is a periodic task.
|
|
||||||
|
|
||||||
This decorator can be used in two ways:
|
|
||||||
|
|
||||||
1. Without arguments '@periodic_task', this will be run on every tick
|
|
||||||
of the periodic scheduler.
|
|
||||||
|
|
||||||
2. With arguments, @periodic_task(ticks_between_runs=N), this will be
|
|
||||||
run on every N ticks of the periodic scheduler.
|
|
||||||
"""
|
|
||||||
def decorator(f):
|
|
||||||
f._periodic_task = True
|
|
||||||
f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
|
|
||||||
return f
|
|
||||||
|
|
||||||
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
|
|
||||||
# and without parens.
|
|
||||||
#
|
|
||||||
# In the 'with-parens' case (with kwargs present), this function needs to
|
|
||||||
# return a decorator function since the interpreter will invoke it like:
|
|
||||||
#
|
|
||||||
# periodic_task(*args, **kwargs)(f)
|
|
||||||
#
|
|
||||||
# In the 'without-parens' case, the original function will be passed
|
|
||||||
# in as the first argument, like:
|
|
||||||
#
|
|
||||||
# periodic_task(f)
|
|
||||||
if kwargs:
|
|
||||||
return decorator
|
|
||||||
else:
|
|
||||||
return decorator(args[0])
|
|
||||||
|
|
||||||
|
|
||||||
class _PeriodicTasksMeta(type):
|
|
||||||
def __init__(cls, names, bases, dict_):
|
|
||||||
"""Metaclass that allows us to collect decorated periodic tasks."""
|
|
||||||
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
|
|
||||||
|
|
||||||
# NOTE(sirp): if the attribute is not present then we must be the base
|
|
||||||
# class, so, go ahead and initialize it. If the attribute is present,
|
|
||||||
# then we're a subclass so make a copy of it so we don't step on our
|
|
||||||
# parent's toes.
|
|
||||||
try:
|
|
||||||
cls._periodic_tasks = cls._periodic_tasks[:]
|
|
||||||
except AttributeError:
|
|
||||||
cls._periodic_tasks = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
cls._ticks_to_skip = cls._ticks_to_skip.copy()
|
|
||||||
except AttributeError:
|
|
||||||
cls._ticks_to_skip = {}
|
|
||||||
|
|
||||||
# This uses __dict__ instead of
|
|
||||||
# inspect.getmembers(cls, inspect.ismethod) so only the methods of the
|
|
||||||
# current class are added when this class is scanned, and base classes
|
|
||||||
# are not added redundantly.
|
|
||||||
for value in cls.__dict__.values():
|
|
||||||
if getattr(value, '_periodic_task', False):
|
|
||||||
task = value
|
|
||||||
name = task.__name__
|
|
||||||
cls._periodic_tasks.append((name, task))
|
|
||||||
cls._ticks_to_skip[name] = task._ticks_between_runs
|
|
||||||
|
|
||||||
|
|
||||||
class PeriodicTasks(object):
|
|
||||||
__metaclass__ = _PeriodicTasksMeta
|
|
||||||
|
|
||||||
def run_periodic_tasks(self, context, raise_on_error=False):
|
|
||||||
"""Tasks to be run at a periodic interval."""
|
|
||||||
for task_name, task in self._periodic_tasks:
|
|
||||||
full_task_name = '.'.join([self.__class__.__name__, task_name])
|
|
||||||
|
|
||||||
ticks_to_skip = self._ticks_to_skip[task_name]
|
|
||||||
if ticks_to_skip > 0:
|
|
||||||
LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
|
|
||||||
" ticks left until next run"),
|
|
||||||
dict(full_task_name=full_task_name,
|
|
||||||
ticks_to_skip=ticks_to_skip))
|
|
||||||
self._ticks_to_skip[task_name] -= 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
self._ticks_to_skip[task_name] = task._ticks_between_runs
|
|
||||||
LOG.debug(_("Running periodic task %(full_task_name)s"),
|
|
||||||
dict(full_task_name=full_task_name))
|
|
||||||
|
|
||||||
try:
|
|
||||||
task(self, context)
|
|
||||||
except Exception as e:
|
|
||||||
if raise_on_error:
|
|
||||||
raise
|
|
||||||
LOG.exception(_("Error during %(full_task_name)s:"
|
|
||||||
" %(e)s"),
|
|
||||||
dict(e=e, full_task_name=full_task_name))
|
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -19,19 +17,27 @@
|
|||||||
System-level utilities and helper functions.
|
System-level utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging as stdlib_logging
|
||||||
|
import os
|
||||||
import random
|
import random
|
||||||
import shlex
|
import shlex
|
||||||
|
import signal
|
||||||
|
|
||||||
from eventlet.green import subprocess
|
from eventlet.green import subprocess
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgumentError(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(InvalidArgumentError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
class UnknownArgumentError(Exception):
|
class UnknownArgumentError(Exception):
|
||||||
def __init__(self, message=None):
|
def __init__(self, message=None):
|
||||||
super(UnknownArgumentError, self).__init__(message)
|
super(UnknownArgumentError, self).__init__(message)
|
||||||
@ -40,29 +46,53 @@ class UnknownArgumentError(Exception):
|
|||||||
class ProcessExecutionError(Exception):
|
class ProcessExecutionError(Exception):
|
||||||
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
|
||||||
description=None):
|
description=None):
|
||||||
|
self.exit_code = exit_code
|
||||||
|
self.stderr = stderr
|
||||||
|
self.stdout = stdout
|
||||||
|
self.cmd = cmd
|
||||||
|
self.description = description
|
||||||
|
|
||||||
if description is None:
|
if description is None:
|
||||||
description = "Unexpected error while running command."
|
description = _("Unexpected error while running command.")
|
||||||
if exit_code is None:
|
if exit_code is None:
|
||||||
exit_code = '-'
|
exit_code = '-'
|
||||||
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
|
message = _('%(description)s\n'
|
||||||
% (description, cmd, exit_code, stdout, stderr))
|
'Command: %(cmd)s\n'
|
||||||
|
'Exit code: %(exit_code)s\n'
|
||||||
|
'Stdout: %(stdout)r\n'
|
||||||
|
'Stderr: %(stderr)r') % {'description': description,
|
||||||
|
'cmd': cmd,
|
||||||
|
'exit_code': exit_code,
|
||||||
|
'stdout': stdout,
|
||||||
|
'stderr': stderr}
|
||||||
super(ProcessExecutionError, self).__init__(message)
|
super(ProcessExecutionError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class NoRootWrapSpecified(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(NoRootWrapSpecified, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _subprocess_setup():
|
||||||
|
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||||
|
# non-Python subprocesses expect.
|
||||||
|
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||||
|
|
||||||
|
|
||||||
def execute(*cmd, **kwargs):
|
def execute(*cmd, **kwargs):
|
||||||
"""
|
"""Helper method to shell out and execute a command through subprocess.
|
||||||
Helper method to shell out and execute a command through subprocess with
|
|
||||||
optional retry.
|
Allows optional retry.
|
||||||
|
|
||||||
:param cmd: Passed to subprocess.Popen.
|
:param cmd: Passed to subprocess.Popen.
|
||||||
:type cmd: string
|
:type cmd: string
|
||||||
:param process_input: Send to opened process.
|
:param process_input: Send to opened process.
|
||||||
:type proces_input: string
|
:type process_input: string
|
||||||
:param check_exit_code: Defaults to 0. Will raise
|
:param check_exit_code: Single bool, int, or list of allowed exit
|
||||||
:class:`ProcessExecutionError`
|
codes. Defaults to [0]. Raise
|
||||||
if the command exits without returning this value
|
:class:`ProcessExecutionError` unless
|
||||||
as a returncode
|
program exits with one of these code.
|
||||||
:type check_exit_code: int
|
:type check_exit_code: boolean, int, or [int]
|
||||||
:param delay_on_retry: True | False. Defaults to True. If set to True,
|
:param delay_on_retry: True | False. Defaults to True. If set to True,
|
||||||
wait a short amount of time before retrying.
|
wait a short amount of time before retrying.
|
||||||
:type delay_on_retry: boolean
|
:type delay_on_retry: boolean
|
||||||
@ -72,8 +102,15 @@ def execute(*cmd, **kwargs):
|
|||||||
the command is prefixed by the command specified
|
the command is prefixed by the command specified
|
||||||
in the root_helper kwarg.
|
in the root_helper kwarg.
|
||||||
:type run_as_root: boolean
|
:type run_as_root: boolean
|
||||||
:param root_helper: command to prefix all cmd's with
|
:param root_helper: command to prefix to commands called with
|
||||||
|
run_as_root=True
|
||||||
:type root_helper: string
|
:type root_helper: string
|
||||||
|
:param shell: whether or not there should be a shell used to
|
||||||
|
execute this command. Defaults to false.
|
||||||
|
:type shell: boolean
|
||||||
|
:param loglevel: log level for execute commands.
|
||||||
|
:type loglevel: int. (Should be stdlib_logging.DEBUG or
|
||||||
|
stdlib_logging.INFO)
|
||||||
:returns: (stdout, stderr) from process execution
|
:returns: (stdout, stderr) from process execution
|
||||||
:raises: :class:`UnknownArgumentError` on
|
:raises: :class:`UnknownArgumentError` on
|
||||||
receiving unknown arguments
|
receiving unknown arguments
|
||||||
@ -81,28 +118,54 @@ def execute(*cmd, **kwargs):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
process_input = kwargs.pop('process_input', None)
|
process_input = kwargs.pop('process_input', None)
|
||||||
check_exit_code = kwargs.pop('check_exit_code', 0)
|
check_exit_code = kwargs.pop('check_exit_code', [0])
|
||||||
|
ignore_exit_code = False
|
||||||
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
||||||
attempts = kwargs.pop('attempts', 1)
|
attempts = kwargs.pop('attempts', 1)
|
||||||
run_as_root = kwargs.pop('run_as_root', False)
|
run_as_root = kwargs.pop('run_as_root', False)
|
||||||
root_helper = kwargs.pop('root_helper', '')
|
root_helper = kwargs.pop('root_helper', '')
|
||||||
if len(kwargs):
|
shell = kwargs.pop('shell', False)
|
||||||
|
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
|
||||||
|
|
||||||
|
if isinstance(check_exit_code, bool):
|
||||||
|
ignore_exit_code = not check_exit_code
|
||||||
|
check_exit_code = [0]
|
||||||
|
elif isinstance(check_exit_code, int):
|
||||||
|
check_exit_code = [check_exit_code]
|
||||||
|
|
||||||
|
if kwargs:
|
||||||
raise UnknownArgumentError(_('Got unknown keyword args '
|
raise UnknownArgumentError(_('Got unknown keyword args '
|
||||||
'to utils.execute: %r') % kwargs)
|
'to utils.execute: %r') % kwargs)
|
||||||
if run_as_root:
|
|
||||||
|
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
|
||||||
|
if not root_helper:
|
||||||
|
raise NoRootWrapSpecified(
|
||||||
|
message=_('Command requested root, but did not '
|
||||||
|
'specify a root helper.'))
|
||||||
cmd = shlex.split(root_helper) + list(cmd)
|
cmd = shlex.split(root_helper) + list(cmd)
|
||||||
|
|
||||||
cmd = map(str, cmd)
|
cmd = map(str, cmd)
|
||||||
|
|
||||||
while attempts > 0:
|
while attempts > 0:
|
||||||
attempts -= 1
|
attempts -= 1
|
||||||
try:
|
try:
|
||||||
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
|
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
|
||||||
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
||||||
|
|
||||||
|
if os.name == 'nt':
|
||||||
|
preexec_fn = None
|
||||||
|
close_fds = False
|
||||||
|
else:
|
||||||
|
preexec_fn = _subprocess_setup
|
||||||
|
close_fds = True
|
||||||
|
|
||||||
obj = subprocess.Popen(cmd,
|
obj = subprocess.Popen(cmd,
|
||||||
stdin=_PIPE,
|
stdin=_PIPE,
|
||||||
stdout=_PIPE,
|
stdout=_PIPE,
|
||||||
stderr=_PIPE,
|
stderr=_PIPE,
|
||||||
close_fds=True)
|
close_fds=close_fds,
|
||||||
|
preexec_fn=preexec_fn,
|
||||||
|
shell=shell)
|
||||||
result = None
|
result = None
|
||||||
if process_input is not None:
|
if process_input is not None:
|
||||||
result = obj.communicate(process_input)
|
result = obj.communicate(process_input)
|
||||||
@ -110,22 +173,19 @@ def execute(*cmd, **kwargs):
|
|||||||
result = obj.communicate()
|
result = obj.communicate()
|
||||||
obj.stdin.close() # pylint: disable=E1101
|
obj.stdin.close() # pylint: disable=E1101
|
||||||
_returncode = obj.returncode # pylint: disable=E1101
|
_returncode = obj.returncode # pylint: disable=E1101
|
||||||
if _returncode:
|
LOG.log(loglevel, _('Result was %s') % _returncode)
|
||||||
LOG.debug(_('Result was %s') % _returncode)
|
if not ignore_exit_code and _returncode not in check_exit_code:
|
||||||
if (isinstance(check_exit_code, int) and
|
(stdout, stderr) = result
|
||||||
not isinstance(check_exit_code, bool) and
|
raise ProcessExecutionError(exit_code=_returncode,
|
||||||
_returncode != check_exit_code):
|
stdout=stdout,
|
||||||
(stdout, stderr) = result
|
stderr=stderr,
|
||||||
raise ProcessExecutionError(exit_code=_returncode,
|
cmd=' '.join(cmd))
|
||||||
stdout=stdout,
|
|
||||||
stderr=stderr,
|
|
||||||
cmd=' '.join(cmd))
|
|
||||||
return result
|
return result
|
||||||
except ProcessExecutionError:
|
except ProcessExecutionError:
|
||||||
if not attempts:
|
if not attempts:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
LOG.debug(_('%r failed. Retrying.'), cmd)
|
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
|
||||||
if delay_on_retry:
|
if delay_on_retry:
|
||||||
greenthread.sleep(random.randint(20, 200) / 100.0)
|
greenthread.sleep(random.randint(20, 200) / 100.0)
|
||||||
finally:
|
finally:
|
||||||
@ -133,3 +193,63 @@ def execute(*cmd, **kwargs):
|
|||||||
# call clean something up in between calls, without
|
# call clean something up in between calls, without
|
||||||
# it two execute calls in a row hangs the second one
|
# it two execute calls in a row hangs the second one
|
||||||
greenthread.sleep(0)
|
greenthread.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
|
def trycmd(*args, **kwargs):
|
||||||
|
"""A wrapper around execute() to more easily handle warnings and errors.
|
||||||
|
|
||||||
|
Returns an (out, err) tuple of strings containing the output of
|
||||||
|
the command's stdout and stderr. If 'err' is not empty then the
|
||||||
|
command can be considered to have failed.
|
||||||
|
|
||||||
|
:discard_warnings True | False. Defaults to False. If set to True,
|
||||||
|
then for succeeding commands, stderr is cleared
|
||||||
|
|
||||||
|
"""
|
||||||
|
discard_warnings = kwargs.pop('discard_warnings', False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
out, err = execute(*args, **kwargs)
|
||||||
|
failed = False
|
||||||
|
except ProcessExecutionError as exn:
|
||||||
|
out, err = '', str(exn)
|
||||||
|
failed = True
|
||||||
|
|
||||||
|
if not failed and discard_warnings and err:
|
||||||
|
# Handle commands that output to stderr but otherwise succeed
|
||||||
|
err = ''
|
||||||
|
|
||||||
|
return out, err
|
||||||
|
|
||||||
|
|
||||||
|
def ssh_execute(ssh, cmd, process_input=None,
|
||||||
|
addl_env=None, check_exit_code=True):
|
||||||
|
LOG.debug(_('Running cmd (SSH): %s'), cmd)
|
||||||
|
if addl_env:
|
||||||
|
raise InvalidArgumentError(_('Environment not supported over SSH'))
|
||||||
|
|
||||||
|
if process_input:
|
||||||
|
# This is (probably) fixable if we need it...
|
||||||
|
raise InvalidArgumentError(_('process_input not supported over SSH'))
|
||||||
|
|
||||||
|
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
|
||||||
|
channel = stdout_stream.channel
|
||||||
|
|
||||||
|
# NOTE(justinsb): This seems suspicious...
|
||||||
|
# ...other SSH clients have buffering issues with this approach
|
||||||
|
stdout = stdout_stream.read()
|
||||||
|
stderr = stderr_stream.read()
|
||||||
|
stdin_stream.close()
|
||||||
|
|
||||||
|
exit_status = channel.recv_exit_status()
|
||||||
|
|
||||||
|
# exit_status == -1 if no exit code was returned
|
||||||
|
if exit_status != -1:
|
||||||
|
LOG.debug(_('Result was %s') % exit_status)
|
||||||
|
if check_exit_code and exit_status != 0:
|
||||||
|
raise ProcessExecutionError(exit_code=exit_status,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
cmd=cmd)
|
||||||
|
|
||||||
|
return (stdout, stderr)
|
||||||
|
0
designate/openstack/common/py3kcompat/__init__.py
Normal file
0
designate/openstack/common/py3kcompat/__init__.py
Normal file
65
designate/openstack/common/py3kcompat/urlutils.py
Normal file
65
designate/openstack/common/py3kcompat/urlutils.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Python2/Python3 compatibility layer for OpenStack
|
||||||
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
if six.PY3:
|
||||||
|
# python3
|
||||||
|
import urllib.error
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
urlencode = urllib.parse.urlencode
|
||||||
|
urljoin = urllib.parse.urljoin
|
||||||
|
quote = urllib.parse.quote
|
||||||
|
parse_qsl = urllib.parse.parse_qsl
|
||||||
|
unquote = urllib.parse.unquote
|
||||||
|
unquote_plus = urllib.parse.unquote_plus
|
||||||
|
urlparse = urllib.parse.urlparse
|
||||||
|
urlsplit = urllib.parse.urlsplit
|
||||||
|
urlunsplit = urllib.parse.urlunsplit
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib.request.urlopen
|
||||||
|
URLError = urllib.error.URLError
|
||||||
|
pathname2url = urllib.request.pathname2url
|
||||||
|
else:
|
||||||
|
# python2
|
||||||
|
import urllib
|
||||||
|
import urllib2
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
urlencode = urllib.urlencode
|
||||||
|
quote = urllib.quote
|
||||||
|
unquote = urllib.unquote
|
||||||
|
unquote_plus = urllib.unquote_plus
|
||||||
|
|
||||||
|
parse = urlparse
|
||||||
|
parse_qsl = parse.parse_qsl
|
||||||
|
urljoin = parse.urljoin
|
||||||
|
urlparse = parse.urlparse
|
||||||
|
urlsplit = parse.urlsplit
|
||||||
|
urlunsplit = parse.urlunsplit
|
||||||
|
SplitResult = parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib2.urlopen
|
||||||
|
URLError = urllib2.URLError
|
||||||
|
pathname2url = urllib.pathname2url
|
@ -1,16 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
136
designate/openstack/common/rootwrap/cmd.py
Normal file
136
designate/openstack/common/rootwrap/cmd.py
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
# Copyright (c) 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Root wrapper for OpenStack services
|
||||||
|
|
||||||
|
Filters which commands a service is allowed to run as another user.
|
||||||
|
|
||||||
|
To use this with designate, you should set the following in
|
||||||
|
designate.conf:
|
||||||
|
rootwrap_config=/etc/designate/rootwrap.conf
|
||||||
|
|
||||||
|
You also need to let the designate user run designate-rootwrap
|
||||||
|
as root in sudoers:
|
||||||
|
designate ALL = (root) NOPASSWD: /usr/bin/designate-rootwrap
|
||||||
|
/etc/designate/rootwrap.conf *
|
||||||
|
|
||||||
|
Service packaging should deploy .filters files only on nodes where
|
||||||
|
they are needed, to avoid allowing more than is necessary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import ConfigParser
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
import signal
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
RC_UNAUTHORIZED = 99
|
||||||
|
RC_NOCOMMAND = 98
|
||||||
|
RC_BADCONFIG = 97
|
||||||
|
RC_NOEXECFOUND = 96
|
||||||
|
|
||||||
|
|
||||||
|
def _subprocess_setup():
|
||||||
|
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||||
|
# non-Python subprocesses expect.
|
||||||
|
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||||
|
|
||||||
|
|
||||||
|
def _exit_error(execname, message, errorcode, log=True):
|
||||||
|
print("%s: %s" % (execname, message), file=sys.stderr)
|
||||||
|
if log:
|
||||||
|
logging.error(message)
|
||||||
|
sys.exit(errorcode)
|
||||||
|
|
||||||
|
|
||||||
|
def _getlogin():
|
||||||
|
try:
|
||||||
|
return os.getlogin()
|
||||||
|
except OSError:
|
||||||
|
return (os.getenv('USER') or
|
||||||
|
os.getenv('USERNAME') or
|
||||||
|
os.getenv('LOGNAME'))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Split arguments, require at least a command
|
||||||
|
execname = sys.argv.pop(0)
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
|
||||||
|
|
||||||
|
configfile = sys.argv.pop(0)
|
||||||
|
userargs = sys.argv[:]
|
||||||
|
|
||||||
|
# Add ../ to sys.path to allow running from branch
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
|
||||||
|
os.pardir, os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, "designate", "__init__.py")):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from designate.openstack.common.rootwrap import wrapper
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
try:
|
||||||
|
rawconfig = ConfigParser.RawConfigParser()
|
||||||
|
rawconfig.read(configfile)
|
||||||
|
config = wrapper.RootwrapConfig(rawconfig)
|
||||||
|
except ValueError as exc:
|
||||||
|
msg = "Incorrect value in %s: %s" % (configfile, exc.message)
|
||||||
|
_exit_error(execname, msg, RC_BADCONFIG, log=False)
|
||||||
|
except ConfigParser.Error:
|
||||||
|
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
|
||||||
|
RC_BADCONFIG, log=False)
|
||||||
|
|
||||||
|
if config.use_syslog:
|
||||||
|
wrapper.setup_syslog(execname,
|
||||||
|
config.syslog_log_facility,
|
||||||
|
config.syslog_log_level)
|
||||||
|
|
||||||
|
# Execute command if it matches any of the loaded filters
|
||||||
|
filters = wrapper.load_filters(config.filters_path)
|
||||||
|
try:
|
||||||
|
filtermatch = wrapper.match_filter(filters, userargs,
|
||||||
|
exec_dirs=config.exec_dirs)
|
||||||
|
if filtermatch:
|
||||||
|
command = filtermatch.get_command(userargs,
|
||||||
|
exec_dirs=config.exec_dirs)
|
||||||
|
if config.use_syslog:
|
||||||
|
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
|
||||||
|
_getlogin(), pwd.getpwuid(os.getuid())[0],
|
||||||
|
command, filtermatch.name))
|
||||||
|
|
||||||
|
obj = subprocess.Popen(command,
|
||||||
|
stdin=sys.stdin,
|
||||||
|
stdout=sys.stdout,
|
||||||
|
stderr=sys.stderr,
|
||||||
|
preexec_fn=_subprocess_setup,
|
||||||
|
env=filtermatch.get_environment(userargs))
|
||||||
|
obj.wait()
|
||||||
|
sys.exit(obj.returncode)
|
||||||
|
|
||||||
|
except wrapper.FilterMatchNotExecutable as exc:
|
||||||
|
msg = ("Executable not found: %s (filter match = %s)"
|
||||||
|
% (exc.match.exec_path, exc.match.name))
|
||||||
|
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
|
||||||
|
|
||||||
|
except wrapper.NoFilterMatched:
|
||||||
|
msg = ("Unauthorized command: %s (no filter matched)"
|
||||||
|
% ' '.join(userargs))
|
||||||
|
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 OpenStack Foundation.
|
# Copyright (c) 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -20,7 +18,7 @@ import re
|
|||||||
|
|
||||||
|
|
||||||
class CommandFilter(object):
|
class CommandFilter(object):
|
||||||
"""Command filter only checking that the 1st argument matches exec_path"""
|
"""Command filter only checking that the 1st argument matches exec_path."""
|
||||||
|
|
||||||
def __init__(self, exec_path, run_as, *args):
|
def __init__(self, exec_path, run_as, *args):
|
||||||
self.name = ''
|
self.name = ''
|
||||||
@ -30,11 +28,11 @@ class CommandFilter(object):
|
|||||||
self.real_exec = None
|
self.real_exec = None
|
||||||
|
|
||||||
def get_exec(self, exec_dirs=[]):
|
def get_exec(self, exec_dirs=[]):
|
||||||
"""Returns existing executable, or empty string if none found"""
|
"""Returns existing executable, or empty string if none found."""
|
||||||
if self.real_exec is not None:
|
if self.real_exec is not None:
|
||||||
return self.real_exec
|
return self.real_exec
|
||||||
self.real_exec = ""
|
self.real_exec = ""
|
||||||
if self.exec_path.startswith('/'):
|
if os.path.isabs(self.exec_path):
|
||||||
if os.access(self.exec_path, os.X_OK):
|
if os.access(self.exec_path, os.X_OK):
|
||||||
self.real_exec = self.exec_path
|
self.real_exec = self.exec_path
|
||||||
else:
|
else:
|
||||||
@ -46,10 +44,8 @@ class CommandFilter(object):
|
|||||||
return self.real_exec
|
return self.real_exec
|
||||||
|
|
||||||
def match(self, userargs):
|
def match(self, userargs):
|
||||||
"""Only check that the first argument (command) matches exec_path"""
|
"""Only check that the first argument (command) matches exec_path."""
|
||||||
if (os.path.basename(self.exec_path) == userargs[0]):
|
return userargs and os.path.basename(self.exec_path) == userargs[0]
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_command(self, userargs, exec_dirs=[]):
|
def get_command(self, userargs, exec_dirs=[]):
|
||||||
"""Returns command to execute (with sudo -u if run_as != root)."""
|
"""Returns command to execute (with sudo -u if run_as != root)."""
|
||||||
@ -60,16 +56,16 @@ class CommandFilter(object):
|
|||||||
return [to_exec] + userargs[1:]
|
return [to_exec] + userargs[1:]
|
||||||
|
|
||||||
def get_environment(self, userargs):
|
def get_environment(self, userargs):
|
||||||
"""Returns specific environment to set, None if none"""
|
"""Returns specific environment to set, None if none."""
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
class RegExpFilter(CommandFilter):
|
class RegExpFilter(CommandFilter):
|
||||||
"""Command filter doing regexp matching for every argument"""
|
"""Command filter doing regexp matching for every argument."""
|
||||||
|
|
||||||
def match(self, userargs):
|
def match(self, userargs):
|
||||||
# Early skip if command or number of args don't match
|
# Early skip if command or number of args don't match
|
||||||
if (len(self.args) != len(userargs)):
|
if (not userargs or len(self.args) != len(userargs)):
|
||||||
# DENY: argument numbers don't match
|
# DENY: argument numbers don't match
|
||||||
return False
|
return False
|
||||||
# Compare each arg (anchoring pattern explicitly at end of string)
|
# Compare each arg (anchoring pattern explicitly at end of string)
|
||||||
@ -88,40 +84,61 @@ class RegExpFilter(CommandFilter):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
class DnsmasqFilter(CommandFilter):
|
class PathFilter(CommandFilter):
|
||||||
"""Specific filter for the dnsmasq call (which includes env)"""
|
"""Command filter checking that path arguments are within given dirs
|
||||||
|
|
||||||
CONFIG_FILE_ARG = 'CONFIG_FILE'
|
One can specify the following constraints for command arguments:
|
||||||
|
1) pass - pass an argument as is to the resulting command
|
||||||
|
2) some_str - check if an argument is equal to the given string
|
||||||
|
3) abs path - check if a path argument is within the given base dir
|
||||||
|
|
||||||
|
A typical rootwrapper filter entry looks like this:
|
||||||
|
# cmdname: filter name, raw command, user, arg_i_constraint [, ...]
|
||||||
|
chown: PathFilter, /bin/chown, root, nova, /var/lib/images
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
def match(self, userargs):
|
def match(self, userargs):
|
||||||
if (userargs[0] == 'env' and
|
if not userargs or len(userargs) < 2:
|
||||||
userargs[1].startswith(self.CONFIG_FILE_ARG) and
|
return False
|
||||||
userargs[2].startswith('NETWORK_ID=') and
|
|
||||||
userargs[3] == 'dnsmasq'):
|
command, arguments = userargs[0], userargs[1:]
|
||||||
return True
|
|
||||||
return False
|
equal_args_num = len(self.args) == len(arguments)
|
||||||
|
exec_is_valid = super(PathFilter, self).match(userargs)
|
||||||
|
args_equal_or_pass = all(
|
||||||
|
arg == 'pass' or arg == value
|
||||||
|
for arg, value in zip(self.args, arguments)
|
||||||
|
if not os.path.isabs(arg) # arguments not specifying abs paths
|
||||||
|
)
|
||||||
|
paths_are_within_base_dirs = all(
|
||||||
|
os.path.commonprefix([arg, os.path.realpath(value)]) == arg
|
||||||
|
for arg, value in zip(self.args, arguments)
|
||||||
|
if os.path.isabs(arg) # arguments specifying abs paths
|
||||||
|
)
|
||||||
|
|
||||||
|
return (equal_args_num and
|
||||||
|
exec_is_valid and
|
||||||
|
args_equal_or_pass and
|
||||||
|
paths_are_within_base_dirs)
|
||||||
|
|
||||||
def get_command(self, userargs, exec_dirs=[]):
|
def get_command(self, userargs, exec_dirs=[]):
|
||||||
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
command, arguments = userargs[0], userargs[1:]
|
||||||
dnsmasq_pos = userargs.index('dnsmasq')
|
|
||||||
return [to_exec] + userargs[dnsmasq_pos + 1:]
|
|
||||||
|
|
||||||
def get_environment(self, userargs):
|
# convert path values to canonical ones; copy other args as is
|
||||||
env = os.environ.copy()
|
args = [os.path.realpath(value) if os.path.isabs(arg) else value
|
||||||
env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1]
|
for arg, value in zip(self.args, arguments)]
|
||||||
env['NETWORK_ID'] = userargs[2].split('=')[-1]
|
|
||||||
return env
|
|
||||||
|
|
||||||
|
return super(PathFilter, self).get_command([command] + args,
|
||||||
class DeprecatedDnsmasqFilter(DnsmasqFilter):
|
exec_dirs)
|
||||||
"""Variant of dnsmasq filter to support old-style FLAGFILE"""
|
|
||||||
CONFIG_FILE_ARG = 'FLAGFILE'
|
|
||||||
|
|
||||||
|
|
||||||
class KillFilter(CommandFilter):
|
class KillFilter(CommandFilter):
|
||||||
"""Specific filter for the kill calls.
|
"""Specific filter for the kill calls.
|
||||||
|
|
||||||
1st argument is the user to run /bin/kill under
|
1st argument is the user to run /bin/kill under
|
||||||
2nd argument is the location of the affected executable
|
2nd argument is the location of the affected executable
|
||||||
|
if the argument is not absolute, it is checked against $PATH
|
||||||
Subsequent arguments list the accepted signals (if any)
|
Subsequent arguments list the accepted signals (if any)
|
||||||
|
|
||||||
This filter relies on /proc to accurately determine affected
|
This filter relies on /proc to accurately determine affected
|
||||||
@ -132,7 +149,7 @@ class KillFilter(CommandFilter):
|
|||||||
super(KillFilter, self).__init__("/bin/kill", *args)
|
super(KillFilter, self).__init__("/bin/kill", *args)
|
||||||
|
|
||||||
def match(self, userargs):
|
def match(self, userargs):
|
||||||
if userargs[0] != "kill":
|
if not userargs or userargs[0] != "kill":
|
||||||
return False
|
return False
|
||||||
args = list(userargs)
|
args = list(userargs)
|
||||||
if len(args) == 3:
|
if len(args) == 3:
|
||||||
@ -150,31 +167,150 @@ class KillFilter(CommandFilter):
|
|||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
command = os.readlink("/proc/%d/exe" % int(args[1]))
|
command = os.readlink("/proc/%d/exe" % int(args[1]))
|
||||||
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
|
|
||||||
# the end if an executable is updated or deleted
|
|
||||||
if command.endswith(" (deleted)"):
|
|
||||||
command = command[:command.rindex(" ")]
|
|
||||||
if command != self.args[0]:
|
|
||||||
# Affected executable does not match
|
|
||||||
return False
|
|
||||||
except (ValueError, OSError):
|
except (ValueError, OSError):
|
||||||
# Incorrect PID
|
# Incorrect PID
|
||||||
return False
|
return False
|
||||||
return True
|
|
||||||
|
# NOTE(yufang521247): /proc/PID/exe may have '\0' on the
|
||||||
|
# end, because python doen't stop at '\0' when read the
|
||||||
|
# target path.
|
||||||
|
command = command.partition('\0')[0]
|
||||||
|
|
||||||
|
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
|
||||||
|
# the end if an executable is updated or deleted
|
||||||
|
if command.endswith(" (deleted)"):
|
||||||
|
command = command[:-len(" (deleted)")]
|
||||||
|
|
||||||
|
kill_command = self.args[0]
|
||||||
|
|
||||||
|
if os.path.isabs(kill_command):
|
||||||
|
return kill_command == command
|
||||||
|
|
||||||
|
return (os.path.isabs(command) and
|
||||||
|
kill_command == os.path.basename(command) and
|
||||||
|
os.path.dirname(command) in os.environ.get('PATH', ''
|
||||||
|
).split(':'))
|
||||||
|
|
||||||
|
|
||||||
class ReadFileFilter(CommandFilter):
|
class ReadFileFilter(CommandFilter):
|
||||||
"""Specific filter for the utils.read_file_as_root call"""
|
"""Specific filter for the utils.read_file_as_root call."""
|
||||||
|
|
||||||
def __init__(self, file_path, *args):
|
def __init__(self, file_path, *args):
|
||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
|
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
|
||||||
|
|
||||||
def match(self, userargs):
|
def match(self, userargs):
|
||||||
if userargs[0] != 'cat':
|
return (userargs == ['cat', self.file_path])
|
||||||
|
|
||||||
|
|
||||||
|
class IpFilter(CommandFilter):
|
||||||
|
"""Specific filter for the ip utility to that does not match exec."""
|
||||||
|
|
||||||
|
def match(self, userargs):
|
||||||
|
if userargs[0] == 'ip':
|
||||||
|
if userargs[1] == 'netns':
|
||||||
|
return (userargs[2] in ('list', 'add', 'delete'))
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class EnvFilter(CommandFilter):
|
||||||
|
"""Specific filter for the env utility.
|
||||||
|
|
||||||
|
Behaves like CommandFilter, except that it handles
|
||||||
|
leading env A=B.. strings appropriately.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _extract_env(self, arglist):
|
||||||
|
"""Extract all leading NAME=VALUE arguments from arglist."""
|
||||||
|
|
||||||
|
envs = set()
|
||||||
|
for arg in arglist:
|
||||||
|
if '=' not in arg:
|
||||||
|
break
|
||||||
|
envs.add(arg.partition('=')[0])
|
||||||
|
return envs
|
||||||
|
|
||||||
|
def __init__(self, exec_path, run_as, *args):
|
||||||
|
super(EnvFilter, self).__init__(exec_path, run_as, *args)
|
||||||
|
|
||||||
|
env_list = self._extract_env(self.args)
|
||||||
|
# Set exec_path to X when args are in the form of
|
||||||
|
# env A=a B=b C=c X Y Z
|
||||||
|
if "env" in exec_path and len(env_list) < len(self.args):
|
||||||
|
self.exec_path = self.args[len(env_list)]
|
||||||
|
|
||||||
|
def match(self, userargs):
|
||||||
|
# ignore leading 'env'
|
||||||
|
if userargs[0] == 'env':
|
||||||
|
userargs.pop(0)
|
||||||
|
|
||||||
|
# require one additional argument after configured ones
|
||||||
|
if len(userargs) < len(self.args):
|
||||||
return False
|
return False
|
||||||
if userargs[1] != self.file_path:
|
|
||||||
|
# extract all env args
|
||||||
|
user_envs = self._extract_env(userargs)
|
||||||
|
filter_envs = self._extract_env(self.args)
|
||||||
|
user_command = userargs[len(user_envs):len(user_envs) + 1]
|
||||||
|
|
||||||
|
# match first non-env argument with CommandFilter
|
||||||
|
return (super(EnvFilter, self).match(user_command)
|
||||||
|
and len(filter_envs) and user_envs == filter_envs)
|
||||||
|
|
||||||
|
def exec_args(self, userargs):
|
||||||
|
args = userargs[:]
|
||||||
|
|
||||||
|
# ignore leading 'env'
|
||||||
|
if args[0] == 'env':
|
||||||
|
args.pop(0)
|
||||||
|
|
||||||
|
# Throw away leading NAME=VALUE arguments
|
||||||
|
while args and '=' in args[0]:
|
||||||
|
args.pop(0)
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_command(self, userargs, exec_dirs=[]):
|
||||||
|
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
|
||||||
|
return [to_exec] + self.exec_args(userargs)[1:]
|
||||||
|
|
||||||
|
def get_environment(self, userargs):
|
||||||
|
env = os.environ.copy()
|
||||||
|
|
||||||
|
# ignore leading 'env'
|
||||||
|
if userargs[0] == 'env':
|
||||||
|
userargs.pop(0)
|
||||||
|
|
||||||
|
# Handle leading NAME=VALUE pairs
|
||||||
|
for a in userargs:
|
||||||
|
env_name, equals, env_value = a.partition('=')
|
||||||
|
if not equals:
|
||||||
|
break
|
||||||
|
if env_name and env_value:
|
||||||
|
env[env_name] = env_value
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
|
||||||
|
class ChainingFilter(CommandFilter):
|
||||||
|
def exec_args(self, userargs):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class IpNetnsExecFilter(ChainingFilter):
|
||||||
|
"""Specific filter for the ip utility to that does match exec."""
|
||||||
|
|
||||||
|
def match(self, userargs):
|
||||||
|
# Network namespaces currently require root
|
||||||
|
# require <ns> argument
|
||||||
|
if self.run_as != "root" or len(userargs) < 4:
|
||||||
return False
|
return False
|
||||||
if len(userargs) != 2:
|
|
||||||
return False
|
return (userargs[:3] == ['ip', 'netns', 'exec'])
|
||||||
return True
|
|
||||||
|
def exec_args(self, userargs):
|
||||||
|
args = userargs[4:]
|
||||||
|
if args:
|
||||||
|
args[0] = os.path.basename(args[0])
|
||||||
|
return args
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 OpenStack Foundation.
|
# Copyright (c) 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -16,12 +14,13 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import ConfigParser
|
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
|
||||||
from designate.openstack.common.rootwrap import filters
|
from designate.openstack.common.rootwrap import filters
|
||||||
|
|
||||||
|
|
||||||
@ -31,10 +30,7 @@ class NoFilterMatched(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class FilterMatchNotExecutable(Exception):
|
class FilterMatchNotExecutable(Exception):
|
||||||
"""
|
"""Raised when a filter matched but no executable was found."""
|
||||||
This exception is raised when a filter matched but no executable was
|
|
||||||
found.
|
|
||||||
"""
|
|
||||||
def __init__(self, match=None, **kwargs):
|
def __init__(self, match=None, **kwargs):
|
||||||
self.match = match
|
self.match = match
|
||||||
|
|
||||||
@ -49,8 +45,10 @@ class RootwrapConfig(object):
|
|||||||
if config.has_option("DEFAULT", "exec_dirs"):
|
if config.has_option("DEFAULT", "exec_dirs"):
|
||||||
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
|
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
|
||||||
else:
|
else:
|
||||||
|
self.exec_dirs = []
|
||||||
# Use system PATH if exec_dirs is not specified
|
# Use system PATH if exec_dirs is not specified
|
||||||
self.exec_dirs = os.environ["PATH"].split(':')
|
if "PATH" in os.environ:
|
||||||
|
self.exec_dirs = os.environ['PATH'].split(':')
|
||||||
|
|
||||||
# syslog_log_facility
|
# syslog_log_facility
|
||||||
if config.has_option("DEFAULT", "syslog_log_facility"):
|
if config.has_option("DEFAULT", "syslog_log_facility"):
|
||||||
@ -93,7 +91,7 @@ def setup_syslog(execname, facility, level):
|
|||||||
|
|
||||||
|
|
||||||
def build_filter(class_name, *args):
|
def build_filter(class_name, *args):
|
||||||
"""Returns a filter object of class class_name"""
|
"""Returns a filter object of class class_name."""
|
||||||
if not hasattr(filters, class_name):
|
if not hasattr(filters, class_name):
|
||||||
logging.warning("Skipping unknown filter class (%s) specified "
|
logging.warning("Skipping unknown filter class (%s) specified "
|
||||||
"in filter definitions" % class_name)
|
"in filter definitions" % class_name)
|
||||||
@ -103,13 +101,14 @@ def build_filter(class_name, *args):
|
|||||||
|
|
||||||
|
|
||||||
def load_filters(filters_path):
|
def load_filters(filters_path):
|
||||||
"""Load filters from a list of directories"""
|
"""Load filters from a list of directories."""
|
||||||
filterlist = []
|
filterlist = []
|
||||||
for filterdir in filters_path:
|
for filterdir in filters_path:
|
||||||
if not os.path.isdir(filterdir):
|
if not os.path.isdir(filterdir):
|
||||||
continue
|
continue
|
||||||
for filterfile in os.listdir(filterdir):
|
for filterfile in filter(lambda f: not f.startswith('.'),
|
||||||
filterconfig = ConfigParser.RawConfigParser()
|
os.listdir(filterdir)):
|
||||||
|
filterconfig = moves.configparser.RawConfigParser()
|
||||||
filterconfig.read(os.path.join(filterdir, filterfile))
|
filterconfig.read(os.path.join(filterdir, filterfile))
|
||||||
for (name, value) in filterconfig.items("Filters"):
|
for (name, value) in filterconfig.items("Filters"):
|
||||||
filterdefinition = [string.strip(s) for s in value.split(',')]
|
filterdefinition = [string.strip(s) for s in value.split(',')]
|
||||||
@ -121,18 +120,33 @@ def load_filters(filters_path):
|
|||||||
return filterlist
|
return filterlist
|
||||||
|
|
||||||
|
|
||||||
def match_filter(filters, userargs, exec_dirs=[]):
|
def match_filter(filter_list, userargs, exec_dirs=[]):
|
||||||
"""
|
"""Checks user command and arguments through command filters.
|
||||||
Checks user command and arguments through command filters and
|
|
||||||
returns the first matching filter.
|
Returns the first matching filter.
|
||||||
|
|
||||||
Raises NoFilterMatched if no filter matched.
|
Raises NoFilterMatched if no filter matched.
|
||||||
Raises FilterMatchNotExecutable if no executable was found for the
|
Raises FilterMatchNotExecutable if no executable was found for the
|
||||||
best filter match.
|
best filter match.
|
||||||
"""
|
"""
|
||||||
first_not_executable_filter = None
|
first_not_executable_filter = None
|
||||||
|
|
||||||
for f in filters:
|
for f in filter_list:
|
||||||
if f.match(userargs):
|
if f.match(userargs):
|
||||||
|
if isinstance(f, filters.ChainingFilter):
|
||||||
|
# This command calls exec verify that remaining args
|
||||||
|
# matches another filter.
|
||||||
|
def non_chain_filter(fltr):
|
||||||
|
return (fltr.run_as == f.run_as
|
||||||
|
and not isinstance(fltr, filters.ChainingFilter))
|
||||||
|
|
||||||
|
leaf_filters = [fltr for fltr in filter_list
|
||||||
|
if non_chain_filter(fltr)]
|
||||||
|
args = f.exec_args(userargs)
|
||||||
|
if (not args or not match_filter(leaf_filters,
|
||||||
|
args, exec_dirs=exec_dirs)):
|
||||||
|
continue
|
||||||
|
|
||||||
# Try other filters if executable is absent
|
# Try other filters if executable is absent
|
||||||
if not f.get_exec(exec_dirs=exec_dirs):
|
if not f.get_exec(exec_dirs=exec_dirs):
|
||||||
if not first_not_executable_filter:
|
if not first_not_executable_filter:
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -26,13 +24,13 @@ For some wrappers that add message versioning to rpc, see:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import importutils
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import local
|
from designate.openstack.common import local
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -56,13 +54,12 @@ rpc_opts = [
|
|||||||
help='Seconds to wait before a cast expires (TTL). '
|
help='Seconds to wait before a cast expires (TTL). '
|
||||||
'Only supported by impl_zmq.'),
|
'Only supported by impl_zmq.'),
|
||||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
cfg.ListOpt('allowed_rpc_exception_modules',
|
||||||
default=['designate.openstack.common.exception',
|
default=['nova.exception',
|
||||||
'nova.exception',
|
|
||||||
'cinder.exception',
|
'cinder.exception',
|
||||||
'exceptions',
|
'exceptions',
|
||||||
],
|
],
|
||||||
help='Modules of exceptions that are permitted to be recreated'
|
help='Modules of exceptions that are permitted to be recreated'
|
||||||
'upon receiving exception data from an rpc call.'),
|
' upon receiving exception data from an rpc call.'),
|
||||||
cfg.BoolOpt('fake_rabbit',
|
cfg.BoolOpt('fake_rabbit',
|
||||||
default=False,
|
default=False,
|
||||||
help='If passed, use a fake RabbitMQ provider'),
|
help='If passed, use a fake RabbitMQ provider'),
|
||||||
@ -228,7 +225,7 @@ def notify(context, topic, msg, envelope=False):
|
|||||||
|
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
"""Clean up resoruces in use by implementation.
|
"""Clean up resources in use by implementation.
|
||||||
|
|
||||||
Clean up any resources that have been allocated by the RPC implementation.
|
Clean up any resources that have been allocated by the RPC implementation.
|
||||||
This is typically open connections to a messaging service. This function
|
This is typically open connections to a messaging service. This function
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -20,9 +18,9 @@
|
|||||||
"""
|
"""
|
||||||
Shared code between AMQP based openstack.common.rpc implementations.
|
Shared code between AMQP based openstack.common.rpc implementations.
|
||||||
|
|
||||||
The code in this module is shared between the rpc implemenations based on AMQP.
|
The code in this module is shared between the rpc implementations based on
|
||||||
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
|
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
|
||||||
AMQP, but is deprecated and predates this code.
|
uses AMQP, but is deprecated and predates this code.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
@ -34,10 +32,9 @@ from eventlet import greenpool
|
|||||||
from eventlet import pools
|
from eventlet import pools
|
||||||
from eventlet import queue
|
from eventlet import queue
|
||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
# TODO(pekowsk): Remove import cfg and below comment in Havana.
|
|
||||||
# This import should no longer be needed when the amqp_rpc_single_reply_queue
|
|
||||||
# option is removed.
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
from designate.openstack.common import excutils
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
@ -46,12 +43,15 @@ from designate.openstack.common import log as logging
|
|||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
|
|
||||||
# TODO(pekowski): Remove this option in Havana.
|
|
||||||
amqp_opts = [
|
amqp_opts = [
|
||||||
cfg.BoolOpt('amqp_rpc_single_reply_queue',
|
cfg.BoolOpt('amqp_durable_queues',
|
||||||
default=False,
|
default=False,
|
||||||
help='Enable a fast single reply queue if using AMQP based '
|
deprecated_name='rabbit_durable_queues',
|
||||||
'RPC like RabbitMQ or Qpid.'),
|
deprecated_group='DEFAULT',
|
||||||
|
help='Use durable queues in amqp.'),
|
||||||
|
cfg.BoolOpt('amqp_auto_delete',
|
||||||
|
default=False,
|
||||||
|
help='Auto-delete queues in amqp.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
cfg.CONF.register_opts(amqp_opts)
|
cfg.CONF.register_opts(amqp_opts)
|
||||||
@ -83,7 +83,7 @@ class Pool(pools.Pool):
|
|||||||
# is the above "while loop" gets all the cached connections from the
|
# is the above "while loop" gets all the cached connections from the
|
||||||
# pool and closes them, but never returns them to the pool, a pool
|
# pool and closes them, but never returns them to the pool, a pool
|
||||||
# leak. The unit tests hang waiting for an item to be returned to the
|
# leak. The unit tests hang waiting for an item to be returned to the
|
||||||
# pool. The unit tests get here via the teatDown() method. In the run
|
# pool. The unit tests get here via the tearDown() method. In the run
|
||||||
# time code, it gets here via cleanup() and only appears in service.py
|
# time code, it gets here via cleanup() and only appears in service.py
|
||||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
# just before doing a sys.exit(), so cleanup() only happens once and
|
||||||
# the leakage is not a problem.
|
# the leakage is not a problem.
|
||||||
@ -102,19 +102,19 @@ def get_connection_pool(conf, connection_cls):
|
|||||||
|
|
||||||
|
|
||||||
class ConnectionContext(rpc_common.Connection):
|
class ConnectionContext(rpc_common.Connection):
|
||||||
"""The class that is actually returned to the caller of
|
"""The class that is actually returned to the create_connection() caller.
|
||||||
create_connection(). This is essentially a wrapper around
|
|
||||||
Connection that supports 'with'. It can also return a new
|
This is essentially a wrapper around Connection that supports 'with'.
|
||||||
Connection, or one from a pool. The function will also catch
|
It can also return a new Connection, or one from a pool.
|
||||||
when an instance of this class is to be deleted. With that
|
|
||||||
we can return Connections to the pool on exceptions and so
|
The function will also catch when an instance of this class is to be
|
||||||
forth without making the caller be responsible for catching
|
deleted. With that we can return Connections to the pool on exceptions
|
||||||
them. If possible the function makes sure to return a
|
and so forth without making the caller be responsible for catching them.
|
||||||
connection to the pool.
|
If possible the function makes sure to return a connection to the pool.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
||||||
"""Create a new connection, or get one from the pool"""
|
"""Create a new connection, or get one from the pool."""
|
||||||
self.connection = None
|
self.connection = None
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.connection_pool = connection_pool
|
self.connection_pool = connection_pool
|
||||||
@ -127,7 +127,7 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
self.pooled = pooled
|
self.pooled = pooled
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""When with ConnectionContext() is used, return self"""
|
"""When with ConnectionContext() is used, return self."""
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def _done(self):
|
def _done(self):
|
||||||
@ -165,17 +165,19 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
def create_worker(self, topic, proxy, pool_name):
|
def create_worker(self, topic, proxy, pool_name):
|
||||||
self.connection.create_worker(topic, proxy, pool_name)
|
self.connection.create_worker(topic, proxy, pool_name)
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
|
||||||
|
ack_on_error=True):
|
||||||
self.connection.join_consumer_pool(callback,
|
self.connection.join_consumer_pool(callback,
|
||||||
pool_name,
|
pool_name,
|
||||||
topic,
|
topic,
|
||||||
exchange_name)
|
exchange_name,
|
||||||
|
ack_on_error)
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
self.connection.consume_in_thread()
|
self.connection.consume_in_thread()
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""Proxy all other calls to the Connection instance"""
|
"""Proxy all other calls to the Connection instance."""
|
||||||
if self.connection:
|
if self.connection:
|
||||||
return getattr(self.connection, key)
|
return getattr(self.connection, key)
|
||||||
else:
|
else:
|
||||||
@ -183,11 +185,11 @@ class ConnectionContext(rpc_common.Connection):
|
|||||||
|
|
||||||
|
|
||||||
class ReplyProxy(ConnectionContext):
|
class ReplyProxy(ConnectionContext):
|
||||||
""" Connection class for RPC replies / callbacks """
|
"""Connection class for RPC replies / callbacks."""
|
||||||
def __init__(self, conf, connection_pool):
|
def __init__(self, conf, connection_pool):
|
||||||
self._call_waiters = {}
|
self._call_waiters = {}
|
||||||
self._num_call_waiters = 0
|
self._num_call_waiters = 0
|
||||||
self._num_call_waiters_wrn_threshhold = 10
|
self._num_call_waiters_wrn_threshold = 10
|
||||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
self._reply_q = 'reply_' + uuid.uuid4().hex
|
||||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
||||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
self.declare_direct_consumer(self._reply_q, self._process_data)
|
||||||
@ -197,18 +199,20 @@ class ReplyProxy(ConnectionContext):
|
|||||||
msg_id = message_data.pop('_msg_id', None)
|
msg_id = message_data.pop('_msg_id', None)
|
||||||
waiter = self._call_waiters.get(msg_id)
|
waiter = self._call_waiters.get(msg_id)
|
||||||
if not waiter:
|
if not waiter:
|
||||||
LOG.warn(_('no calling threads waiting for msg_id : %s'
|
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
||||||
', message : %s') % (msg_id, message_data))
|
', message : %(data)s'), {'msg_id': msg_id,
|
||||||
|
'data': message_data})
|
||||||
|
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
|
||||||
else:
|
else:
|
||||||
waiter.put(message_data)
|
waiter.put(message_data)
|
||||||
|
|
||||||
def add_call_waiter(self, waiter, msg_id):
|
def add_call_waiter(self, waiter, msg_id):
|
||||||
self._num_call_waiters += 1
|
self._num_call_waiters += 1
|
||||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
|
if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
|
||||||
LOG.warn(_('Number of call waiters is greater than warning '
|
LOG.warn(_('Number of call waiters is greater than warning '
|
||||||
'threshhold: %d. There could be a MulticallProxyWaiter '
|
'threshold: %d. There could be a MulticallProxyWaiter '
|
||||||
'leak.') % self._num_call_waiters_wrn_threshhold)
|
'leak.') % self._num_call_waiters_wrn_threshold)
|
||||||
self._num_call_waiters_wrn_threshhold *= 2
|
self._num_call_waiters_wrn_threshold *= 2
|
||||||
self._call_waiters[msg_id] = waiter
|
self._call_waiters[msg_id] = waiter
|
||||||
|
|
||||||
def del_call_waiter(self, msg_id):
|
def del_call_waiter(self, msg_id):
|
||||||
@ -231,18 +235,13 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|||||||
failure = rpc_common.serialize_remote_exception(failure,
|
failure = rpc_common.serialize_remote_exception(failure,
|
||||||
log_failure)
|
log_failure)
|
||||||
|
|
||||||
try:
|
msg = {'result': reply, 'failure': failure}
|
||||||
msg = {'result': reply, 'failure': failure}
|
|
||||||
except TypeError:
|
|
||||||
msg = {'result': dict((k, repr(v))
|
|
||||||
for k, v in reply.__dict__.iteritems()),
|
|
||||||
'failure': failure}
|
|
||||||
if ending:
|
if ending:
|
||||||
msg['ending'] = True
|
msg['ending'] = True
|
||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
# If a reply_q exists, add the msg_id to the reply and pass the
|
||||||
# reply_q to direct_send() to use it as the response queue.
|
# reply_q to direct_send() to use it as the response queue.
|
||||||
# Otherwise use the msg_id for backward compatibilty.
|
# Otherwise use the msg_id for backward compatibility.
|
||||||
if reply_q:
|
if reply_q:
|
||||||
msg['_msg_id'] = msg_id
|
msg['_msg_id'] = msg_id
|
||||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
||||||
@ -251,7 +250,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
class RpcContext(rpc_common.CommonRpcContext):
|
||||||
"""Context that supports replying to a rpc.call"""
|
"""Context that supports replying to a rpc.call."""
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.msg_id = kwargs.pop('msg_id', None)
|
self.msg_id = kwargs.pop('msg_id', None)
|
||||||
self.reply_q = kwargs.pop('reply_q', None)
|
self.reply_q = kwargs.pop('reply_q', None)
|
||||||
@ -301,8 +300,14 @@ def pack_context(msg, context):
|
|||||||
for args at some point.
|
for args at some point.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
context_d = dict([('_context_%s' % key, value)
|
if isinstance(context, dict):
|
||||||
for (key, value) in context.to_dict().iteritems()])
|
context_d = dict([('_context_%s' % key, value)
|
||||||
|
for (key, value) in six.iteritems(context)])
|
||||||
|
else:
|
||||||
|
context_d = dict([('_context_%s' % key, value)
|
||||||
|
for (key, value) in
|
||||||
|
six.iteritems(context.to_dict())])
|
||||||
|
|
||||||
msg.update(context_d)
|
msg.update(context_d)
|
||||||
|
|
||||||
|
|
||||||
@ -338,8 +343,9 @@ def _add_unique_id(msg):
|
|||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
class _ThreadPoolWithWait(object):
|
||||||
"""Base class for a delayed invocation manager used by
|
"""Base class for a delayed invocation manager.
|
||||||
the Connection class to start up green threads
|
|
||||||
|
Used by the Connection class to start up green threads
|
||||||
to handle incoming messages.
|
to handle incoming messages.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -354,25 +360,48 @@ class _ThreadPoolWithWait(object):
|
|||||||
|
|
||||||
|
|
||||||
class CallbackWrapper(_ThreadPoolWithWait):
|
class CallbackWrapper(_ThreadPoolWithWait):
|
||||||
"""Wraps a straight callback to allow it to be invoked in a green
|
"""Wraps a straight callback.
|
||||||
thread.
|
|
||||||
|
Allows it to be invoked in a green thread.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf, callback, connection_pool):
|
def __init__(self, conf, callback, connection_pool,
|
||||||
"""
|
wait_for_consumers=False):
|
||||||
|
"""Initiates CallbackWrapper object.
|
||||||
|
|
||||||
:param conf: cfg.CONF instance
|
:param conf: cfg.CONF instance
|
||||||
:param callback: a callable (probably a function)
|
:param callback: a callable (probably a function)
|
||||||
:param connection_pool: connection pool as returned by
|
:param connection_pool: connection pool as returned by
|
||||||
get_connection_pool()
|
get_connection_pool()
|
||||||
|
:param wait_for_consumers: wait for all green threads to
|
||||||
|
complete and raise the last
|
||||||
|
caught exception, if any.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
super(CallbackWrapper, self).__init__(
|
super(CallbackWrapper, self).__init__(
|
||||||
conf=conf,
|
conf=conf,
|
||||||
connection_pool=connection_pool,
|
connection_pool=connection_pool,
|
||||||
)
|
)
|
||||||
self.callback = callback
|
self.callback = callback
|
||||||
|
self.wait_for_consumers = wait_for_consumers
|
||||||
|
self.exc_info = None
|
||||||
|
|
||||||
|
def _wrap(self, message_data, **kwargs):
|
||||||
|
"""Wrap the callback invocation to catch exceptions.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.callback(message_data, **kwargs)
|
||||||
|
except Exception:
|
||||||
|
self.exc_info = sys.exc_info()
|
||||||
|
|
||||||
def __call__(self, message_data):
|
def __call__(self, message_data):
|
||||||
self.pool.spawn_n(self.callback, message_data)
|
self.exc_info = None
|
||||||
|
self.pool.spawn_n(self._wrap, message_data)
|
||||||
|
|
||||||
|
if self.wait_for_consumers:
|
||||||
|
self.pool.waitall()
|
||||||
|
if self.exc_info:
|
||||||
|
six.reraise(self.exc_info[1], None, self.exc_info[2])
|
||||||
|
|
||||||
|
|
||||||
class ProxyCallback(_ThreadPoolWithWait):
|
class ProxyCallback(_ThreadPoolWithWait):
|
||||||
@ -408,15 +437,17 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
ctxt = unpack_context(self.conf, message_data)
|
ctxt = unpack_context(self.conf, message_data)
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
args = message_data.get('args', {})
|
args = message_data.get('args', {})
|
||||||
version = message_data.get('version', None)
|
version = message_data.get('version')
|
||||||
|
namespace = message_data.get('namespace')
|
||||||
if not method:
|
if not method:
|
||||||
LOG.warn(_('no method for message: %s') % message_data)
|
LOG.warn(_('no method for message: %s') % message_data)
|
||||||
ctxt.reply(_('No method for message: %s') % message_data,
|
ctxt.reply(_('No method for message: %s') % message_data,
|
||||||
connection_pool=self.connection_pool)
|
connection_pool=self.connection_pool)
|
||||||
return
|
return
|
||||||
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
|
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
||||||
|
namespace, args)
|
||||||
|
|
||||||
def _process_data(self, ctxt, version, method, args):
|
def _process_data(self, ctxt, version, method, namespace, args):
|
||||||
"""Process a message in a new thread.
|
"""Process a message in a new thread.
|
||||||
|
|
||||||
If the proxy object we have has a dispatch method
|
If the proxy object we have has a dispatch method
|
||||||
@ -427,7 +458,8 @@ class ProxyCallback(_ThreadPoolWithWait):
|
|||||||
"""
|
"""
|
||||||
ctxt.update_store()
|
ctxt.update_store()
|
||||||
try:
|
try:
|
||||||
rval = self.proxy.dispatch(ctxt, version, method, **args)
|
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
||||||
|
**args)
|
||||||
# Check if the result was a generator
|
# Check if the result was a generator
|
||||||
if inspect.isgenerator(rval):
|
if inspect.isgenerator(rval):
|
||||||
for x in rval:
|
for x in rval:
|
||||||
@ -487,7 +519,7 @@ class MulticallProxyWaiter(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
"""Return a result until we get a reply with an 'ending" flag"""
|
"""Return a result until we get a reply with an 'ending' flag."""
|
||||||
if self._done:
|
if self._done:
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
while True:
|
while True:
|
||||||
@ -509,61 +541,8 @@ class MulticallProxyWaiter(object):
|
|||||||
yield result
|
yield result
|
||||||
|
|
||||||
|
|
||||||
#TODO(pekowski): Remove MulticallWaiter() in Havana.
|
|
||||||
class MulticallWaiter(object):
|
|
||||||
def __init__(self, conf, connection, timeout):
|
|
||||||
self._connection = connection
|
|
||||||
self._iterator = connection.iterconsume(timeout=timeout or
|
|
||||||
conf.rpc_response_timeout)
|
|
||||||
self._result = None
|
|
||||||
self._done = False
|
|
||||||
self._got_ending = False
|
|
||||||
self._conf = conf
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def done(self):
|
|
||||||
if self._done:
|
|
||||||
return
|
|
||||||
self._done = True
|
|
||||||
self._iterator.close()
|
|
||||||
self._iterator = None
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def __call__(self, data):
|
|
||||||
"""The consume() callback will call this. Store the result."""
|
|
||||||
self.msg_id_cache.check_duplicate_message(data)
|
|
||||||
if data['failure']:
|
|
||||||
failure = data['failure']
|
|
||||||
self._result = rpc_common.deserialize_remote_exception(self._conf,
|
|
||||||
failure)
|
|
||||||
|
|
||||||
elif data.get('ending', False):
|
|
||||||
self._got_ending = True
|
|
||||||
else:
|
|
||||||
self._result = data['result']
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
"""Return a result until we get a 'None' response from consumer"""
|
|
||||||
if self._done:
|
|
||||||
raise StopIteration
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self._iterator.next()
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
self.done()
|
|
||||||
if self._got_ending:
|
|
||||||
self.done()
|
|
||||||
raise StopIteration
|
|
||||||
result = self._result
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
self.done()
|
|
||||||
raise result
|
|
||||||
yield result
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new, connection_pool):
|
def create_connection(conf, new, connection_pool):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
return ConnectionContext(conf, connection_pool, pooled=not new)
|
||||||
|
|
||||||
|
|
||||||
@ -572,14 +551,6 @@ _reply_proxy_create_sem = semaphore.Semaphore()
|
|||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
||||||
"""Make a call that returns multiple times."""
|
"""Make a call that returns multiple times."""
|
||||||
# TODO(pekowski): Remove all these comments in Havana.
|
|
||||||
# For amqp_rpc_single_reply_queue = False,
|
|
||||||
# Can't use 'with' for multicall, as it returns an iterator
|
|
||||||
# that will continue to use the connection. When it's done,
|
|
||||||
# connection.close() will get called which will put it back into
|
|
||||||
# the pool
|
|
||||||
# For amqp_rpc_single_reply_queue = True,
|
|
||||||
# The 'with' statement is mandatory for closing the connection
|
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
@ -587,21 +558,13 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
|
|||||||
_add_unique_id(msg)
|
_add_unique_id(msg)
|
||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
|
|
||||||
# TODO(pekowski): Remove this flag and the code under the if clause
|
with _reply_proxy_create_sem:
|
||||||
# in Havana.
|
if not connection_pool.reply_proxy:
|
||||||
if not conf.amqp_rpc_single_reply_queue:
|
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
||||||
conn = ConnectionContext(conf, connection_pool)
|
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
||||||
wait_msg = MulticallWaiter(conf, conn, timeout)
|
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
||||||
conn.declare_direct_consumer(msg_id, wait_msg)
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
||||||
else:
|
|
||||||
with _reply_proxy_create_sem:
|
|
||||||
if not connection_pool.reply_proxy:
|
|
||||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
|
||||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
|
||||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
|
||||||
return wait_msg
|
return wait_msg
|
||||||
|
|
||||||
|
|
||||||
@ -661,7 +624,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope):
|
|||||||
pack_context(msg, context)
|
pack_context(msg, context)
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
with ConnectionContext(conf, connection_pool) as conn:
|
||||||
if envelope:
|
if envelope:
|
||||||
msg = rpc_common.serialize_msg(msg, force_envelope=True)
|
msg = rpc_common.serialize_msg(msg)
|
||||||
conn.notify_send(topic, msg)
|
conn.notify_send(topic, msg)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -22,18 +20,21 @@ import sys
|
|||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import importutils
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import jsonutils
|
from designate.openstack.common import jsonutils
|
||||||
from designate.openstack.common import local
|
from designate.openstack.common import local
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
|
from designate.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
_RPC_ENVELOPE_VERSION = '2.0'
|
||||||
'''RPC Envelope Version.
|
'''RPC Envelope Version.
|
||||||
|
|
||||||
This version number applies to the top level structure of messages sent out.
|
This version number applies to the top level structure of messages sent out.
|
||||||
@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the
|
|||||||
serialization done inside the rpc layer. See serialize_msg() and
|
serialization done inside the rpc layer. See serialize_msg() and
|
||||||
deserialize_msg().
|
deserialize_msg().
|
||||||
|
|
||||||
The current message format (version 2.0) is very simple. It is:
|
The current message format (version 2.0) is very simple. It is::
|
||||||
|
|
||||||
{
|
{
|
||||||
'oslo.version': <RPC Envelope Version as a String>,
|
'oslo.version': <RPC Envelope Version as a String>,
|
||||||
@ -64,34 +65,31 @@ We will JSON encode the application message payload. The message envelope,
|
|||||||
which includes the JSON encoded application message body, will be passed down
|
which includes the JSON encoded application message body, will be passed down
|
||||||
to the messaging libraries as a dict.
|
to the messaging libraries as a dict.
|
||||||
'''
|
'''
|
||||||
_RPC_ENVELOPE_VERSION = '2.0'
|
|
||||||
|
|
||||||
_VERSION_KEY = 'oslo.version'
|
_VERSION_KEY = 'oslo.version'
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
_MESSAGE_KEY = 'oslo.message'
|
||||||
|
|
||||||
|
_REMOTE_POSTFIX = '_Remote'
|
||||||
# TODO(russellb) Turn this on after Grizzly.
|
|
||||||
_SEND_RPC_ENVELOPE = False
|
|
||||||
|
|
||||||
|
|
||||||
class RPCException(Exception):
|
class RPCException(Exception):
|
||||||
message = _("An unknown RPC related exception occurred.")
|
msg_fmt = _("An unknown RPC related exception occurred.")
|
||||||
|
|
||||||
def __init__(self, message=None, **kwargs):
|
def __init__(self, message=None, **kwargs):
|
||||||
self.kwargs = kwargs
|
self.kwargs = kwargs
|
||||||
|
|
||||||
if not message:
|
if not message:
|
||||||
try:
|
try:
|
||||||
message = self.message % kwargs
|
message = self.msg_fmt % kwargs
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# kwargs doesn't match a variable in the message
|
# kwargs doesn't match a variable in the message
|
||||||
# log the issue and the kwargs
|
# log the issue and the kwargs
|
||||||
LOG.exception(_('Exception in string format operation'))
|
LOG.exception(_('Exception in string format operation'))
|
||||||
for name, value in kwargs.iteritems():
|
for name, value in six.iteritems(kwargs):
|
||||||
LOG.error("%s: %s" % (name, value))
|
LOG.error("%s: %s" % (name, value))
|
||||||
# at least get the core message out if something happened
|
# at least get the core message out if something happened
|
||||||
message = self.message
|
message = self.msg_fmt
|
||||||
|
|
||||||
super(RPCException, self).__init__(message)
|
super(RPCException, self).__init__(message)
|
||||||
|
|
||||||
@ -105,7 +103,7 @@ class RemoteError(RPCException):
|
|||||||
contains all of the relevant info.
|
contains all of the relevant info.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
||||||
|
|
||||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||||
self.exc_type = exc_type
|
self.exc_type = exc_type
|
||||||
@ -122,27 +120,50 @@ class Timeout(RPCException):
|
|||||||
This exception is raised if the rpc_response_timeout is reached while
|
This exception is raised if the rpc_response_timeout is reached while
|
||||||
waiting for a response from the remote side.
|
waiting for a response from the remote side.
|
||||||
"""
|
"""
|
||||||
message = _("Timeout while waiting on RPC response.")
|
msg_fmt = _('Timeout while waiting on RPC response - '
|
||||||
|
'topic: "%(topic)s", RPC method: "%(method)s" '
|
||||||
|
'info: "%(info)s"')
|
||||||
|
|
||||||
|
def __init__(self, info=None, topic=None, method=None):
|
||||||
|
"""Initiates Timeout object.
|
||||||
|
|
||||||
|
:param info: Extra info to convey to the user
|
||||||
|
:param topic: The topic that the rpc call was sent to
|
||||||
|
:param rpc_method_name: The name of the rpc method being
|
||||||
|
called
|
||||||
|
"""
|
||||||
|
self.info = info
|
||||||
|
self.topic = topic
|
||||||
|
self.method = method
|
||||||
|
super(Timeout, self).__init__(
|
||||||
|
None,
|
||||||
|
info=info or _('<unknown>'),
|
||||||
|
topic=topic or _('<unknown>'),
|
||||||
|
method=method or _('<unknown>'))
|
||||||
|
|
||||||
|
|
||||||
class DuplicateMessageError(RPCException):
|
class DuplicateMessageError(RPCException):
|
||||||
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
||||||
|
|
||||||
|
|
||||||
class InvalidRPCConnectionReuse(RPCException):
|
class InvalidRPCConnectionReuse(RPCException):
|
||||||
message = _("Invalid reuse of an RPC connection.")
|
msg_fmt = _("Invalid reuse of an RPC connection.")
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcVersion(RPCException):
|
class UnsupportedRpcVersion(RPCException):
|
||||||
message = _("Specified RPC version, %(version)s, not supported by "
|
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
|
||||||
"this endpoint.")
|
"this endpoint.")
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
class UnsupportedRpcEnvelopeVersion(RPCException):
|
||||||
message = _("Specified RPC envelope version, %(version)s, "
|
msg_fmt = _("Specified RPC envelope version, %(version)s, "
|
||||||
"not supported by this endpoint.")
|
"not supported by this endpoint.")
|
||||||
|
|
||||||
|
|
||||||
|
class RpcVersionCapError(RPCException):
|
||||||
|
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
class Connection(object):
|
||||||
"""A connection, returned by rpc.create_connection().
|
"""A connection, returned by rpc.create_connection().
|
||||||
|
|
||||||
@ -202,9 +223,9 @@ class Connection(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
"""Register as a member of a group of consumers.
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
|
Uses given topic from the specified exchange.
|
||||||
Exactly one member of a given pool will receive each message.
|
Exactly one member of a given pool will receive each message.
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
A message will be delivered to multiple pools, if more than
|
||||||
@ -239,41 +260,20 @@ class Connection(object):
|
|||||||
|
|
||||||
def _safe_log(log_func, msg, msg_data):
|
def _safe_log(log_func, msg, msg_data):
|
||||||
"""Sanitizes the msg_data field before logging."""
|
"""Sanitizes the msg_data field before logging."""
|
||||||
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
|
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
|
||||||
'run_instance': [('args', 'admin_password')],
|
|
||||||
'route_message': [('args', 'message', 'args', 'method_info',
|
|
||||||
'method_kwargs', 'password'),
|
|
||||||
('args', 'message', 'args', 'method_info',
|
|
||||||
'method_kwargs', 'admin_password')]}
|
|
||||||
|
|
||||||
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
|
def _fix_passwords(d):
|
||||||
has_context_token = '_context_auth_token' in msg_data
|
"""Sanitizes the password fields in the dictionary."""
|
||||||
has_token = 'auth_token' in msg_data
|
for k in six.iterkeys(d):
|
||||||
|
if k.lower().find('password') != -1:
|
||||||
|
d[k] = '<SANITIZED>'
|
||||||
|
elif k.lower() in SANITIZE:
|
||||||
|
d[k] = '<SANITIZED>'
|
||||||
|
elif isinstance(d[k], dict):
|
||||||
|
_fix_passwords(d[k])
|
||||||
|
return d
|
||||||
|
|
||||||
if not any([has_method, has_context_token, has_token]):
|
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
|
||||||
return log_func(msg, msg_data)
|
|
||||||
|
|
||||||
msg_data = copy.deepcopy(msg_data)
|
|
||||||
|
|
||||||
if has_method:
|
|
||||||
for arg in SANITIZE.get(msg_data['method'], []):
|
|
||||||
try:
|
|
||||||
d = msg_data
|
|
||||||
for elem in arg[:-1]:
|
|
||||||
d = d[elem]
|
|
||||||
d[arg[-1]] = '<SANITIZED>'
|
|
||||||
except KeyError, e:
|
|
||||||
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
|
|
||||||
{'item': arg,
|
|
||||||
'err': e})
|
|
||||||
|
|
||||||
if has_context_token:
|
|
||||||
msg_data['_context_auth_token'] = '<SANITIZED>'
|
|
||||||
|
|
||||||
if has_token:
|
|
||||||
msg_data['auth_token'] = '<SANITIZED>'
|
|
||||||
|
|
||||||
return log_func(msg, msg_data)
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_remote_exception(failure_info, log_failure=True):
|
def serialize_remote_exception(failure_info, log_failure=True):
|
||||||
@ -285,17 +285,27 @@ def serialize_remote_exception(failure_info, log_failure=True):
|
|||||||
tb = traceback.format_exception(*failure_info)
|
tb = traceback.format_exception(*failure_info)
|
||||||
failure = failure_info[1]
|
failure = failure_info[1]
|
||||||
if log_failure:
|
if log_failure:
|
||||||
LOG.error(_("Returning exception %s to caller"), unicode(failure))
|
LOG.error(_("Returning exception %s to caller"),
|
||||||
|
six.text_type(failure))
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
if hasattr(failure, 'kwargs'):
|
if hasattr(failure, 'kwargs'):
|
||||||
kwargs = failure.kwargs
|
kwargs = failure.kwargs
|
||||||
|
|
||||||
|
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
||||||
|
# exceptions. Lets turn it back into the original exception type.
|
||||||
|
cls_name = str(failure.__class__.__name__)
|
||||||
|
mod_name = str(failure.__class__.__module__)
|
||||||
|
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
||||||
|
mod_name.endswith(_REMOTE_POSTFIX)):
|
||||||
|
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
||||||
|
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'class': str(failure.__class__.__name__),
|
'class': cls_name,
|
||||||
'module': str(failure.__class__.__module__),
|
'module': mod_name,
|
||||||
'message': unicode(failure),
|
'message': six.text_type(failure),
|
||||||
'tb': tb,
|
'tb': tb,
|
||||||
'args': failure.args,
|
'args': failure.args,
|
||||||
'kwargs': kwargs
|
'kwargs': kwargs
|
||||||
@ -325,14 +335,15 @@ def deserialize_remote_exception(conf, data):
|
|||||||
if not issubclass(klass, Exception):
|
if not issubclass(klass, Exception):
|
||||||
raise TypeError("Can only deserialize Exceptions")
|
raise TypeError("Can only deserialize Exceptions")
|
||||||
|
|
||||||
failure = klass(**failure.get('kwargs', {}))
|
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
||||||
except (AttributeError, TypeError, ImportError):
|
except (AttributeError, TypeError, ImportError):
|
||||||
return RemoteError(name, failure.get('message'), trace)
|
return RemoteError(name, failure.get('message'), trace)
|
||||||
|
|
||||||
ex_type = type(failure)
|
ex_type = type(failure)
|
||||||
str_override = lambda self: message
|
str_override = lambda self: message
|
||||||
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
|
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
||||||
{'__str__': str_override, '__unicode__': str_override})
|
{'__str__': str_override, '__unicode__': str_override})
|
||||||
|
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
||||||
try:
|
try:
|
||||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
||||||
# as the new type for the exception. This only works on user defined
|
# as the new type for the exception. This only works on user defined
|
||||||
@ -394,10 +405,11 @@ class CommonRpcContext(object):
|
|||||||
|
|
||||||
|
|
||||||
class ClientException(Exception):
|
class ClientException(Exception):
|
||||||
"""This encapsulates some actual exception that is expected to be
|
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
||||||
hit by an RPC proxy object. Merely instantiating it records the
|
|
||||||
current exception information, which will be passed back to the
|
Merely instantiating it records the current exception information, which
|
||||||
RPC client without exceptional logging."""
|
will be passed back to the RPC client without exceptional logging.
|
||||||
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._exc_info = sys.exc_info()
|
self._exc_info = sys.exc_info()
|
||||||
|
|
||||||
@ -405,7 +417,7 @@ class ClientException(Exception):
|
|||||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
def catch_client_exception(exceptions, func, *args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
if type(e) in exceptions:
|
if type(e) in exceptions:
|
||||||
raise ClientException()
|
raise ClientException()
|
||||||
else:
|
else:
|
||||||
@ -414,11 +426,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs):
|
|||||||
|
|
||||||
def client_exceptions(*exceptions):
|
def client_exceptions(*exceptions):
|
||||||
"""Decorator for manager methods that raise expected exceptions.
|
"""Decorator for manager methods that raise expected exceptions.
|
||||||
|
|
||||||
Marking a Manager method with this decorator allows the declaration
|
Marking a Manager method with this decorator allows the declaration
|
||||||
of expected exceptions that the RPC layer should not consider fatal,
|
of expected exceptions that the RPC layer should not consider fatal,
|
||||||
and not log as if they were generated in a real error scenario. Note
|
and not log as if they were generated in a real error scenario. Note
|
||||||
that this will cause listed exceptions to be wrapped in a
|
that this will cause listed exceptions to be wrapped in a
|
||||||
ClientException, which is used internally by the RPC layer."""
|
ClientException, which is used internally by the RPC layer.
|
||||||
|
"""
|
||||||
def outer(func):
|
def outer(func):
|
||||||
def inner(*args, **kwargs):
|
def inner(*args, **kwargs):
|
||||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
return catch_client_exception(exceptions, func, *args, **kwargs)
|
||||||
@ -426,25 +440,18 @@ def client_exceptions(*exceptions):
|
|||||||
return outer
|
return outer
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(sirp): we should deprecate this in favor of
|
||||||
|
# using `versionutils.is_compatible` directly
|
||||||
def version_is_compatible(imp_version, version):
|
def version_is_compatible(imp_version, version):
|
||||||
"""Determine whether versions are compatible.
|
"""Determine whether versions are compatible.
|
||||||
|
|
||||||
:param imp_version: The version implemented
|
:param imp_version: The version implemented
|
||||||
:param version: The version requested by an incoming message.
|
:param version: The version requested by an incoming message.
|
||||||
"""
|
"""
|
||||||
version_parts = version.split('.')
|
return versionutils.is_compatible(version, imp_version)
|
||||||
imp_version_parts = imp_version.split('.')
|
|
||||||
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
|
|
||||||
return False
|
|
||||||
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_msg(raw_msg, force_envelope=False):
|
def serialize_msg(raw_msg):
|
||||||
if not _SEND_RPC_ENVELOPE and not force_envelope:
|
|
||||||
return raw_msg
|
|
||||||
|
|
||||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
||||||
# information about this format.
|
# information about this format.
|
||||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -83,7 +81,10 @@ On the client side, the same changes should be made as in example 1. The
|
|||||||
minimum version that supports the new parameter should be specified.
|
minimum version that supports the new parameter should be specified.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
from designate.openstack.common.rpc import serializer as rpc_serializer
|
||||||
|
|
||||||
|
|
||||||
class RpcDispatcher(object):
|
class RpcDispatcher(object):
|
||||||
@ -93,23 +94,48 @@ class RpcDispatcher(object):
|
|||||||
contains a list of underlying managers that have an API_VERSION attribute.
|
contains a list of underlying managers that have an API_VERSION attribute.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, callbacks):
|
def __init__(self, callbacks, serializer=None):
|
||||||
"""Initialize the rpc dispatcher.
|
"""Initialize the rpc dispatcher.
|
||||||
|
|
||||||
:param callbacks: List of proxy objects that are an instance
|
:param callbacks: List of proxy objects that are an instance
|
||||||
of a class with rpc methods exposed. Each proxy
|
of a class with rpc methods exposed. Each proxy
|
||||||
object should have an RPC_API_VERSION attribute.
|
object should have an RPC_API_VERSION attribute.
|
||||||
|
:param serializer: The Serializer object that will be used to
|
||||||
|
deserialize arguments before the method call and
|
||||||
|
to serialize the result after it returns.
|
||||||
"""
|
"""
|
||||||
self.callbacks = callbacks
|
self.callbacks = callbacks
|
||||||
|
if serializer is None:
|
||||||
|
serializer = rpc_serializer.NoOpSerializer()
|
||||||
|
self.serializer = serializer
|
||||||
super(RpcDispatcher, self).__init__()
|
super(RpcDispatcher, self).__init__()
|
||||||
|
|
||||||
def dispatch(self, ctxt, version, method, **kwargs):
|
def _deserialize_args(self, context, kwargs):
|
||||||
|
"""Helper method called to deserialize args before dispatch.
|
||||||
|
|
||||||
|
This calls our serializer on each argument, returning a new set of
|
||||||
|
args that have been deserialized.
|
||||||
|
|
||||||
|
:param context: The request context
|
||||||
|
:param kwargs: The arguments to be deserialized
|
||||||
|
:returns: A new set of deserialized args
|
||||||
|
"""
|
||||||
|
new_kwargs = dict()
|
||||||
|
for argname, arg in six.iteritems(kwargs):
|
||||||
|
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
||||||
|
arg)
|
||||||
|
return new_kwargs
|
||||||
|
|
||||||
|
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
||||||
"""Dispatch a message based on a requested version.
|
"""Dispatch a message based on a requested version.
|
||||||
|
|
||||||
:param ctxt: The request context
|
:param ctxt: The request context
|
||||||
:param version: The requested API version from the incoming message
|
:param version: The requested API version from the incoming message
|
||||||
:param method: The method requested to be called by the incoming
|
:param method: The method requested to be called by the incoming
|
||||||
message.
|
message.
|
||||||
|
:param namespace: The namespace for the requested method. If None,
|
||||||
|
the dispatcher will look for a method on a callback
|
||||||
|
object with no namespace set.
|
||||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
:param kwargs: A dict of keyword arguments to be passed to the method.
|
||||||
|
|
||||||
:returns: Whatever is returned by the underlying method that gets
|
:returns: Whatever is returned by the underlying method that gets
|
||||||
@ -120,17 +146,31 @@ class RpcDispatcher(object):
|
|||||||
|
|
||||||
had_compatible = False
|
had_compatible = False
|
||||||
for proxyobj in self.callbacks:
|
for proxyobj in self.callbacks:
|
||||||
if hasattr(proxyobj, 'RPC_API_VERSION'):
|
# Check for namespace compatibility
|
||||||
|
try:
|
||||||
|
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
||||||
|
except AttributeError:
|
||||||
|
cb_namespace = None
|
||||||
|
|
||||||
|
if namespace != cb_namespace:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check for version compatibility
|
||||||
|
try:
|
||||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
rpc_api_version = proxyobj.RPC_API_VERSION
|
||||||
else:
|
except AttributeError:
|
||||||
rpc_api_version = '1.0'
|
rpc_api_version = '1.0'
|
||||||
|
|
||||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
||||||
version)
|
version)
|
||||||
had_compatible = had_compatible or is_compatible
|
had_compatible = had_compatible or is_compatible
|
||||||
|
|
||||||
if not hasattr(proxyobj, method):
|
if not hasattr(proxyobj, method):
|
||||||
continue
|
continue
|
||||||
if is_compatible:
|
if is_compatible:
|
||||||
return getattr(proxyobj, method)(ctxt, **kwargs)
|
kwargs = self._deserialize_args(ctxt, kwargs)
|
||||||
|
result = getattr(proxyobj, method)(ctxt, **kwargs)
|
||||||
|
return self.serializer.serialize_entity(ctxt, result)
|
||||||
|
|
||||||
if had_compatible:
|
if had_compatible:
|
||||||
raise AttributeError("No such RPC function '%s'" % method)
|
raise AttributeError("No such RPC function '%s'" % method)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -26,6 +24,7 @@ import json
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
import six
|
||||||
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
@ -57,18 +56,19 @@ class Consumer(object):
|
|||||||
self.topic = topic
|
self.topic = topic
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
|
|
||||||
def call(self, context, version, method, args, timeout):
|
def call(self, context, version, method, namespace, args, timeout):
|
||||||
done = eventlet.event.Event()
|
done = eventlet.event.Event()
|
||||||
|
|
||||||
def _inner():
|
def _inner():
|
||||||
ctxt = RpcContext.from_dict(context.to_dict())
|
ctxt = RpcContext.from_dict(context.to_dict())
|
||||||
try:
|
try:
|
||||||
rval = self.proxy.dispatch(context, version, method, **args)
|
rval = self.proxy.dispatch(context, version, method,
|
||||||
|
namespace, **args)
|
||||||
res = []
|
res = []
|
||||||
# Caller might have called ctxt.reply() manually
|
# Caller might have called ctxt.reply() manually
|
||||||
for (reply, failure) in ctxt._response:
|
for (reply, failure) in ctxt._response:
|
||||||
if failure:
|
if failure:
|
||||||
raise failure[0], failure[1], failure[2]
|
six.reraise(failure[0], failure[1], failure[2])
|
||||||
res.append(reply)
|
res.append(reply)
|
||||||
# if ending not 'sent'...we might have more data to
|
# if ending not 'sent'...we might have more data to
|
||||||
# return from the function itself
|
# return from the function itself
|
||||||
@ -121,7 +121,7 @@ class Connection(object):
|
|||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return Connection()
|
return Connection()
|
||||||
|
|
||||||
|
|
||||||
@ -140,13 +140,15 @@ def multicall(conf, context, topic, msg, timeout=None):
|
|||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version', None)
|
||||||
|
namespace = msg.get('namespace', None)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
consumer = CONSUMERS[topic][0]
|
consumer = CONSUMERS[topic][0]
|
||||||
except (KeyError, IndexError):
|
except (KeyError, IndexError):
|
||||||
return iter([None])
|
raise rpc_common.Timeout("No consumers available")
|
||||||
else:
|
else:
|
||||||
return consumer.call(context, version, method, args, timeout)
|
return consumer.call(context, version, method, namespace, args,
|
||||||
|
timeout)
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
def call(conf, context, topic, msg, timeout=None):
|
||||||
@ -176,16 +178,17 @@ def cleanup():
|
|||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
def fanout_cast(conf, context, topic, msg):
|
||||||
"""Cast to all consumers of a topic"""
|
"""Cast to all consumers of a topic."""
|
||||||
check_serialize(msg)
|
check_serialize(msg)
|
||||||
method = msg.get('method')
|
method = msg.get('method')
|
||||||
if not method:
|
if not method:
|
||||||
return
|
return
|
||||||
args = msg.get('args', {})
|
args = msg.get('args', {})
|
||||||
version = msg.get('version', None)
|
version = msg.get('version', None)
|
||||||
|
namespace = msg.get('namespace', None)
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
for consumer in CONSUMERS.get(topic, []):
|
||||||
try:
|
try:
|
||||||
consumer.call(context, version, method, args, None)
|
consumer.call(context, version, method, namespace, args, None)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -18,7 +16,6 @@ import functools
|
|||||||
import itertools
|
import itertools
|
||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
@ -29,16 +26,22 @@ import kombu.connection
|
|||||||
import kombu.entity
|
import kombu.entity
|
||||||
import kombu.messaging
|
import kombu.messaging
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
from designate.openstack.common import excutils
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import network_utils
|
from designate.openstack.common import network_utils
|
||||||
from designate.openstack.common.rpc import amqp as rpc_amqp
|
from designate.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
from designate.openstack.common import sslutils
|
||||||
|
|
||||||
kombu_opts = [
|
kombu_opts = [
|
||||||
cfg.StrOpt('kombu_ssl_version',
|
cfg.StrOpt('kombu_ssl_version',
|
||||||
default='',
|
default='',
|
||||||
help='SSL version to use (valid only if SSL enabled)'),
|
help='SSL version to use (valid only if SSL enabled). '
|
||||||
|
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
|
||||||
|
'be available on some distributions'
|
||||||
|
),
|
||||||
cfg.StrOpt('kombu_ssl_keyfile',
|
cfg.StrOpt('kombu_ssl_keyfile',
|
||||||
default='',
|
default='',
|
||||||
help='SSL key file (valid only if SSL enabled)'),
|
help='SSL key file (valid only if SSL enabled)'),
|
||||||
@ -82,9 +85,6 @@ kombu_opts = [
|
|||||||
default=0,
|
default=0,
|
||||||
help='maximum retries with trying to connect to RabbitMQ '
|
help='maximum retries with trying to connect to RabbitMQ '
|
||||||
'(the default of 0 implies an infinite retry count)'),
|
'(the default of 0 implies an infinite retry count)'),
|
||||||
cfg.BoolOpt('rabbit_durable_queues',
|
|
||||||
default=False,
|
|
||||||
help='use durable queues in RabbitMQ'),
|
|
||||||
cfg.BoolOpt('rabbit_ha_queues',
|
cfg.BoolOpt('rabbit_ha_queues',
|
||||||
default=False,
|
default=False,
|
||||||
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
|
||||||
@ -129,15 +129,40 @@ class ConsumerBase(object):
|
|||||||
self.tag = str(tag)
|
self.tag = str(tag)
|
||||||
self.kwargs = kwargs
|
self.kwargs = kwargs
|
||||||
self.queue = None
|
self.queue = None
|
||||||
|
self.ack_on_error = kwargs.get('ack_on_error', True)
|
||||||
self.reconnect(channel)
|
self.reconnect(channel)
|
||||||
|
|
||||||
def reconnect(self, channel):
|
def reconnect(self, channel):
|
||||||
"""Re-declare the queue after a rabbit reconnect"""
|
"""Re-declare the queue after a rabbit reconnect."""
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.kwargs['channel'] = channel
|
self.kwargs['channel'] = channel
|
||||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
self.queue = kombu.entity.Queue(**self.kwargs)
|
||||||
self.queue.declare()
|
self.queue.declare()
|
||||||
|
|
||||||
|
def _callback_handler(self, message, callback):
|
||||||
|
"""Call callback with deserialized message.
|
||||||
|
|
||||||
|
Messages that are processed without exception are ack'ed.
|
||||||
|
|
||||||
|
If the message processing generates an exception, it will be
|
||||||
|
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
msg = rpc_common.deserialize_msg(message.payload)
|
||||||
|
callback(msg)
|
||||||
|
except Exception:
|
||||||
|
if self.ack_on_error:
|
||||||
|
LOG.exception(_("Failed to process message"
|
||||||
|
" ... skipping it."))
|
||||||
|
message.ack()
|
||||||
|
else:
|
||||||
|
LOG.exception(_("Failed to process message"
|
||||||
|
" ... will requeue."))
|
||||||
|
message.requeue()
|
||||||
|
else:
|
||||||
|
message.ack()
|
||||||
|
|
||||||
def consume(self, *args, **kwargs):
|
def consume(self, *args, **kwargs):
|
||||||
"""Actually declare the consumer on the amqp channel. This will
|
"""Actually declare the consumer on the amqp channel. This will
|
||||||
start the flow of messages from the queue. Using the
|
start the flow of messages from the queue. Using the
|
||||||
@ -150,8 +175,6 @@ class ConsumerBase(object):
|
|||||||
If kwargs['nowait'] is True, then this call will block until
|
If kwargs['nowait'] is True, then this call will block until
|
||||||
a message is read.
|
a message is read.
|
||||||
|
|
||||||
Messages will automatically be acked if the callback doesn't
|
|
||||||
raise an exception
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
options = {'consumer_tag': self.tag}
|
options = {'consumer_tag': self.tag}
|
||||||
@ -162,21 +185,15 @@ class ConsumerBase(object):
|
|||||||
|
|
||||||
def _callback(raw_message):
|
def _callback(raw_message):
|
||||||
message = self.channel.message_to_python(raw_message)
|
message = self.channel.message_to_python(raw_message)
|
||||||
try:
|
self._callback_handler(message, callback)
|
||||||
msg = rpc_common.deserialize_msg(message.payload)
|
|
||||||
callback(msg)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
|
||||||
finally:
|
|
||||||
message.ack()
|
|
||||||
|
|
||||||
self.queue.consume(*args, callback=_callback, **options)
|
self.queue.consume(*args, callback=_callback, **options)
|
||||||
|
|
||||||
def cancel(self):
|
def cancel(self):
|
||||||
"""Cancel the consuming from the queue, if it has started"""
|
"""Cancel the consuming from the queue, if it has started."""
|
||||||
try:
|
try:
|
||||||
self.queue.cancel(self.tag)
|
self.queue.cancel(self.tag)
|
||||||
except KeyError, e:
|
except KeyError as e:
|
||||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
# NOTE(comstud): Kludge to get around a amqplib bug
|
||||||
if str(e) != "u'%s'" % self.tag:
|
if str(e) != "u'%s'" % self.tag:
|
||||||
raise
|
raise
|
||||||
@ -184,7 +201,7 @@ class ConsumerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
class DirectConsumer(ConsumerBase):
|
||||||
"""Queue/consumer class for 'direct'"""
|
"""Queue/consumer class for 'direct'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
||||||
"""Init a 'direct' queue.
|
"""Init a 'direct' queue.
|
||||||
@ -216,7 +233,7 @@ class DirectConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
class TopicConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'topic'"""
|
"""Consumer class for 'topic'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
||||||
exchange_name=None, **kwargs):
|
exchange_name=None, **kwargs):
|
||||||
@ -233,9 +250,9 @@ class TopicConsumer(ConsumerBase):
|
|||||||
Other kombu options may be passed as keyword arguments
|
Other kombu options may be passed as keyword arguments
|
||||||
"""
|
"""
|
||||||
# Default options
|
# Default options
|
||||||
options = {'durable': conf.rabbit_durable_queues,
|
options = {'durable': conf.amqp_durable_queues,
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
'queue_arguments': _get_queue_arguments(conf),
|
||||||
'auto_delete': False,
|
'auto_delete': conf.amqp_auto_delete,
|
||||||
'exclusive': False}
|
'exclusive': False}
|
||||||
options.update(kwargs)
|
options.update(kwargs)
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||||
@ -253,7 +270,7 @@ class TopicConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
class FanoutConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'fanout'"""
|
"""Consumer class for 'fanout'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
||||||
"""Init a 'fanout' queue.
|
"""Init a 'fanout' queue.
|
||||||
@ -286,7 +303,7 @@ class FanoutConsumer(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
class Publisher(object):
|
||||||
"""Base Publisher class"""
|
"""Base Publisher class."""
|
||||||
|
|
||||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
"""Init the Publisher class with the exchange_name, routing_key,
|
||||||
@ -298,7 +315,7 @@ class Publisher(object):
|
|||||||
self.reconnect(channel)
|
self.reconnect(channel)
|
||||||
|
|
||||||
def reconnect(self, channel):
|
def reconnect(self, channel):
|
||||||
"""Re-establish the Producer after a rabbit reconnection"""
|
"""Re-establish the Producer after a rabbit reconnection."""
|
||||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
||||||
**self.kwargs)
|
**self.kwargs)
|
||||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
||||||
@ -306,7 +323,7 @@ class Publisher(object):
|
|||||||
routing_key=self.routing_key)
|
routing_key=self.routing_key)
|
||||||
|
|
||||||
def send(self, msg, timeout=None):
|
def send(self, msg, timeout=None):
|
||||||
"""Send a message"""
|
"""Send a message."""
|
||||||
if timeout:
|
if timeout:
|
||||||
#
|
#
|
||||||
# AMQP TTL is in milliseconds when set in the header.
|
# AMQP TTL is in milliseconds when set in the header.
|
||||||
@ -317,7 +334,7 @@ class Publisher(object):
|
|||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
class DirectPublisher(Publisher):
|
||||||
"""Publisher class for 'direct'"""
|
"""Publisher class for 'direct'."""
|
||||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
def __init__(self, conf, channel, msg_id, **kwargs):
|
||||||
"""init a 'direct' publisher.
|
"""init a 'direct' publisher.
|
||||||
|
|
||||||
@ -333,14 +350,14 @@ class DirectPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publisher class for 'topic'"""
|
"""Publisher class for 'topic'."""
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
"""init a 'topic' publisher.
|
"""init a 'topic' publisher.
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
Kombu options may be passed as keyword args to override defaults
|
||||||
"""
|
"""
|
||||||
options = {'durable': conf.rabbit_durable_queues,
|
options = {'durable': conf.amqp_durable_queues,
|
||||||
'auto_delete': False,
|
'auto_delete': conf.amqp_auto_delete,
|
||||||
'exclusive': False}
|
'exclusive': False}
|
||||||
options.update(kwargs)
|
options.update(kwargs)
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||||
@ -352,7 +369,7 @@ class TopicPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
class FanoutPublisher(Publisher):
|
||||||
"""Publisher class for 'fanout'"""
|
"""Publisher class for 'fanout'."""
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
"""init a 'fanout' publisher.
|
"""init a 'fanout' publisher.
|
||||||
|
|
||||||
@ -367,10 +384,10 @@ class FanoutPublisher(Publisher):
|
|||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(TopicPublisher):
|
class NotifyPublisher(TopicPublisher):
|
||||||
"""Publisher class for 'notify'"""
|
"""Publisher class for 'notify'."""
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
def __init__(self, conf, channel, topic, **kwargs):
|
||||||
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
|
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
|
||||||
self.queue_arguments = _get_queue_arguments(conf)
|
self.queue_arguments = _get_queue_arguments(conf)
|
||||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
||||||
|
|
||||||
@ -428,7 +445,7 @@ class Connection(object):
|
|||||||
'virtual_host': self.conf.rabbit_virtual_host,
|
'virtual_host': self.conf.rabbit_virtual_host,
|
||||||
}
|
}
|
||||||
|
|
||||||
for sp_key, value in server_params.iteritems():
|
for sp_key, value in six.iteritems(server_params):
|
||||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
||||||
params[p_key] = value
|
params[p_key] = value
|
||||||
|
|
||||||
@ -447,13 +464,15 @@ class Connection(object):
|
|||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def _fetch_ssl_params(self):
|
def _fetch_ssl_params(self):
|
||||||
"""Handles fetching what ssl params
|
"""Handles fetching what ssl params should be used for the connection
|
||||||
should be used for the connection (if any)"""
|
(if any).
|
||||||
|
"""
|
||||||
ssl_params = dict()
|
ssl_params = dict()
|
||||||
|
|
||||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
||||||
if self.conf.kombu_ssl_version:
|
if self.conf.kombu_ssl_version:
|
||||||
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
|
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
|
||||||
|
self.conf.kombu_ssl_version)
|
||||||
if self.conf.kombu_ssl_keyfile:
|
if self.conf.kombu_ssl_keyfile:
|
||||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
||||||
if self.conf.kombu_ssl_certfile:
|
if self.conf.kombu_ssl_certfile:
|
||||||
@ -464,12 +483,8 @@ class Connection(object):
|
|||||||
# future with this?
|
# future with this?
|
||||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
||||||
|
|
||||||
if not ssl_params:
|
# Return the extended behavior or just have the default behavior
|
||||||
# Just have the default behavior
|
return ssl_params or True
|
||||||
return True
|
|
||||||
else:
|
|
||||||
# Return the extended behavior
|
|
||||||
return ssl_params
|
|
||||||
|
|
||||||
def _connect(self, params):
|
def _connect(self, params):
|
||||||
"""Connect to rabbit. Re-establish any queues that may have
|
"""Connect to rabbit. Re-establish any queues that may have
|
||||||
@ -520,7 +535,7 @@ class Connection(object):
|
|||||||
return
|
return
|
||||||
except (IOError, self.connection_errors) as e:
|
except (IOError, self.connection_errors) as e:
|
||||||
pass
|
pass
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||||
# to return an error not covered by its transport
|
# to return an error not covered by its transport
|
||||||
# connection_errors in the case of a timeout waiting for
|
# connection_errors in the case of a timeout waiting for
|
||||||
@ -536,13 +551,11 @@ class Connection(object):
|
|||||||
log_info.update(params)
|
log_info.update(params)
|
||||||
|
|
||||||
if self.max_retries and attempt == self.max_retries:
|
if self.max_retries and attempt == self.max_retries:
|
||||||
LOG.error(_('Unable to connect to AMQP server on '
|
msg = _('Unable to connect to AMQP server on '
|
||||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
'%(hostname)s:%(port)d after %(max_retries)d '
|
||||||
'tries: %(err_str)s') % log_info)
|
'tries: %(err_str)s') % log_info
|
||||||
# NOTE(comstud): Copied from original code. There's
|
LOG.error(msg)
|
||||||
# really no better recourse because if this was a queue we
|
raise rpc_common.RPCException(msg)
|
||||||
# need to consume on, we have no way to consume anymore.
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if attempt == 1:
|
if attempt == 1:
|
||||||
sleep_time = self.interval_start or 1
|
sleep_time = self.interval_start or 1
|
||||||
@ -561,10 +574,10 @@ class Connection(object):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return method(*args, **kwargs)
|
return method(*args, **kwargs)
|
||||||
except (self.connection_errors, socket.timeout, IOError), e:
|
except (self.connection_errors, socket.timeout, IOError) as e:
|
||||||
if error_callback:
|
if error_callback:
|
||||||
error_callback(e)
|
error_callback(e)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
# NOTE(comstud): Unfortunately it's possible for amqplib
|
||||||
# to return an error not covered by its transport
|
# to return an error not covered by its transport
|
||||||
# connection_errors in the case of a timeout waiting for
|
# connection_errors in the case of a timeout waiting for
|
||||||
@ -578,18 +591,18 @@ class Connection(object):
|
|||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def get_channel(self):
|
def get_channel(self):
|
||||||
"""Convenience call for bin/clear_rabbit_queues"""
|
"""Convenience call for bin/clear_rabbit_queues."""
|
||||||
return self.channel
|
return self.channel
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Close/release this connection"""
|
"""Close/release this connection."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.connection.release()
|
self.connection.release()
|
||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
"""Reset a connection so it can be used again"""
|
"""Reset a connection so it can be used again."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.channel.close()
|
self.channel.close()
|
||||||
@ -611,14 +624,14 @@ class Connection(object):
|
|||||||
|
|
||||||
def _declare_consumer():
|
def _declare_consumer():
|
||||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
||||||
self.consumer_num.next())
|
six.next(self.consumer_num))
|
||||||
self.consumers.append(consumer)
|
self.consumers.append(consumer)
|
||||||
return consumer
|
return consumer
|
||||||
|
|
||||||
return self.ensure(_connect_error, _declare_consumer)
|
return self.ensure(_connect_error, _declare_consumer)
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
def iterconsume(self, limit=None, timeout=None):
|
||||||
"""Return an iterator that will consume from all queues/consumers"""
|
"""Return an iterator that will consume from all queues/consumers."""
|
||||||
|
|
||||||
info = {'do_consume': True}
|
info = {'do_consume': True}
|
||||||
|
|
||||||
@ -634,8 +647,8 @@ class Connection(object):
|
|||||||
|
|
||||||
def _consume():
|
def _consume():
|
||||||
if info['do_consume']:
|
if info['do_consume']:
|
||||||
queues_head = self.consumers[:-1]
|
queues_head = self.consumers[:-1] # not fanout.
|
||||||
queues_tail = self.consumers[-1]
|
queues_tail = self.consumers[-1] # fanout
|
||||||
for queue in queues_head:
|
for queue in queues_head:
|
||||||
queue.consume(nowait=True)
|
queue.consume(nowait=True)
|
||||||
queues_tail.consume(nowait=False)
|
queues_tail.consume(nowait=False)
|
||||||
@ -648,7 +661,7 @@ class Connection(object):
|
|||||||
yield self.ensure(_error_callback, _consume)
|
yield self.ensure(_error_callback, _consume)
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
def cancel_consumer_thread(self):
|
||||||
"""Cancel a consumer thread"""
|
"""Cancel a consumer thread."""
|
||||||
if self.consumer_thread is not None:
|
if self.consumer_thread is not None:
|
||||||
self.consumer_thread.kill()
|
self.consumer_thread.kill()
|
||||||
try:
|
try:
|
||||||
@ -663,7 +676,7 @@ class Connection(object):
|
|||||||
proxy_cb.wait()
|
proxy_cb.wait()
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
||||||
"""Send to a publisher based on the publisher class"""
|
"""Send to a publisher based on the publisher class."""
|
||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
@ -684,45 +697,47 @@ class Connection(object):
|
|||||||
self.declare_consumer(DirectConsumer, topic, callback)
|
self.declare_consumer(DirectConsumer, topic, callback)
|
||||||
|
|
||||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
||||||
exchange_name=None):
|
exchange_name=None, ack_on_error=True):
|
||||||
"""Create a 'topic' consumer."""
|
"""Create a 'topic' consumer."""
|
||||||
self.declare_consumer(functools.partial(TopicConsumer,
|
self.declare_consumer(functools.partial(TopicConsumer,
|
||||||
name=queue_name,
|
name=queue_name,
|
||||||
exchange_name=exchange_name,
|
exchange_name=exchange_name,
|
||||||
|
ack_on_error=ack_on_error,
|
||||||
),
|
),
|
||||||
topic, callback)
|
topic, callback)
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
def declare_fanout_consumer(self, topic, callback):
|
||||||
"""Create a 'fanout' consumer"""
|
"""Create a 'fanout' consumer."""
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
def direct_send(self, msg_id, msg):
|
||||||
"""Send a 'direct' message"""
|
"""Send a 'direct' message."""
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
def topic_send(self, topic, msg, timeout=None):
|
||||||
"""Send a 'topic' message"""
|
"""Send a 'topic' message."""
|
||||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
def fanout_send(self, topic, msg):
|
||||||
"""Send a 'fanout' message"""
|
"""Send a 'fanout' message."""
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
self.publisher_send(FanoutPublisher, topic, msg)
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
def notify_send(self, topic, msg, **kwargs):
|
||||||
"""Send a notify message on a topic"""
|
"""Send a notify message on a topic."""
|
||||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
"""Consume from all queues/consumers"""
|
"""Consume from all queues/consumers."""
|
||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
it.next()
|
six.next(it)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return
|
return
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Consumer from all queues/consumers in a greenthread"""
|
"""Consumer from all queues/consumers in a greenthread."""
|
||||||
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consumer_thread():
|
def _consumer_thread():
|
||||||
try:
|
try:
|
||||||
self.consume()
|
self.consume()
|
||||||
@ -733,7 +748,7 @@ class Connection(object):
|
|||||||
return self.consumer_thread
|
return self.consumer_thread
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
def create_consumer(self, topic, proxy, fanout=False):
|
||||||
"""Create a consumer that calls a method in a proxy object"""
|
"""Create a consumer that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -745,7 +760,7 @@ class Connection(object):
|
|||||||
self.declare_topic_consumer(topic, proxy_cb)
|
self.declare_topic_consumer(topic, proxy_cb)
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
def create_worker(self, topic, proxy, pool_name):
|
||||||
"""Create a worker that calls a method in a proxy object"""
|
"""Create a worker that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -753,7 +768,7 @@ class Connection(object):
|
|||||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
def join_consumer_pool(self, callback, pool_name, topic,
|
||||||
exchange_name=None):
|
exchange_name=None, ack_on_error=True):
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
"""Register as a member of a group of consumers for a given topic from
|
||||||
the specified exchange.
|
the specified exchange.
|
||||||
|
|
||||||
@ -767,6 +782,7 @@ class Connection(object):
|
|||||||
callback=callback,
|
callback=callback,
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||||
Connection),
|
Connection),
|
||||||
|
wait_for_consumers=not ack_on_error
|
||||||
)
|
)
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
self.proxy_callbacks.append(callback_wrapper)
|
||||||
self.declare_topic_consumer(
|
self.declare_topic_consumer(
|
||||||
@ -774,11 +790,12 @@ class Connection(object):
|
|||||||
topic=topic,
|
topic=topic,
|
||||||
exchange_name=exchange_name,
|
exchange_name=exchange_name,
|
||||||
callback=callback_wrapper,
|
callback=callback_wrapper,
|
||||||
|
ack_on_error=ack_on_error,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return rpc_amqp.create_connection(
|
return rpc_amqp.create_connection(
|
||||||
conf, new,
|
conf, new,
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
rpc_amqp.get_connection_pool(conf, Connection))
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
# Copyright 2011 - 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@ -18,12 +16,13 @@
|
|||||||
import functools
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
import time
|
import time
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
from designate.openstack.common import excutils
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import importutils
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import jsonutils
|
from designate.openstack.common import jsonutils
|
||||||
@ -31,6 +30,7 @@ from designate.openstack.common import log as logging
|
|||||||
from designate.openstack.common.rpc import amqp as rpc_amqp
|
from designate.openstack.common.rpc import amqp as rpc_amqp
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
|
qpid_codec = importutils.try_import("qpid.codec010")
|
||||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
||||||
|
|
||||||
@ -65,6 +65,17 @@ qpid_opts = [
|
|||||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
cfg.BoolOpt('qpid_tcp_nodelay',
|
||||||
default=True,
|
default=True,
|
||||||
help='Disable Nagle algorithm'),
|
help='Disable Nagle algorithm'),
|
||||||
|
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
|
||||||
|
# this file could probably use some additional refactoring so that the
|
||||||
|
# differences between each version are split into different classes.
|
||||||
|
cfg.IntOpt('qpid_topology_version',
|
||||||
|
default=1,
|
||||||
|
help="The qpid topology version to use. Version 1 is what "
|
||||||
|
"was originally used by impl_qpid. Version 2 includes "
|
||||||
|
"some backwards-incompatible changes that allow broker "
|
||||||
|
"federation to work. Users should update to version 2 "
|
||||||
|
"when they are able to take everything down, as it "
|
||||||
|
"requires a clean break."),
|
||||||
]
|
]
|
||||||
|
|
||||||
cfg.CONF.register_opts(qpid_opts)
|
cfg.CONF.register_opts(qpid_opts)
|
||||||
@ -72,10 +83,17 @@ cfg.CONF.register_opts(qpid_opts)
|
|||||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
||||||
|
|
||||||
|
|
||||||
|
def raise_invalid_topology_version(conf):
|
||||||
|
msg = (_("Invalid value for qpid_topology_version: %d") %
|
||||||
|
conf.qpid_topology_version)
|
||||||
|
LOG.error(msg)
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
class ConsumerBase(object):
|
||||||
"""Consumer base class."""
|
"""Consumer base class."""
|
||||||
|
|
||||||
def __init__(self, session, callback, node_name, node_opts,
|
def __init__(self, conf, session, callback, node_name, node_opts,
|
||||||
link_name, link_opts):
|
link_name, link_opts):
|
||||||
"""Declare a queue on an amqp session.
|
"""Declare a queue on an amqp session.
|
||||||
|
|
||||||
@ -93,34 +111,55 @@ class ConsumerBase(object):
|
|||||||
self.receiver = None
|
self.receiver = None
|
||||||
self.session = None
|
self.session = None
|
||||||
|
|
||||||
addr_opts = {
|
if conf.qpid_topology_version == 1:
|
||||||
"create": "always",
|
addr_opts = {
|
||||||
"node": {
|
"create": "always",
|
||||||
"type": "topic",
|
"node": {
|
||||||
"x-declare": {
|
"type": "topic",
|
||||||
|
"x-declare": {
|
||||||
|
"durable": True,
|
||||||
|
"auto-delete": True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"link": {
|
||||||
"durable": True,
|
"durable": True,
|
||||||
"auto-delete": True,
|
"x-declare": {
|
||||||
|
"durable": False,
|
||||||
|
"auto-delete": True,
|
||||||
|
"exclusive": False,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
"link": {
|
addr_opts["node"]["x-declare"].update(node_opts)
|
||||||
"name": link_name,
|
elif conf.qpid_topology_version == 2:
|
||||||
"durable": True,
|
addr_opts = {
|
||||||
"x-declare": {
|
"link": {
|
||||||
"durable": False,
|
"x-declare": {
|
||||||
"auto-delete": True,
|
"auto-delete": True,
|
||||||
"exclusive": False,
|
"exclusive": False,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
else:
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
addr_opts["link"]["x-declare"].update(link_opts)
|
addr_opts["link"]["x-declare"].update(link_opts)
|
||||||
|
if link_name:
|
||||||
|
addr_opts["link"]["name"] = link_name
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||||
|
|
||||||
self.reconnect(session)
|
self.connect(session)
|
||||||
|
|
||||||
|
def connect(self, session):
|
||||||
|
"""Declare the receiver on connect."""
|
||||||
|
self._declare_receiver(session)
|
||||||
|
|
||||||
def reconnect(self, session):
|
def reconnect(self, session):
|
||||||
"""Re-declare the receiver after a qpid reconnect"""
|
"""Re-declare the receiver after a qpid reconnect."""
|
||||||
|
self._declare_receiver(session)
|
||||||
|
|
||||||
|
def _declare_receiver(self, session):
|
||||||
self.session = session
|
self.session = session
|
||||||
self.receiver = session.receiver(self.address)
|
self.receiver = session.receiver(self.address)
|
||||||
self.receiver.capacity = 1
|
self.receiver.capacity = 1
|
||||||
@ -142,7 +181,7 @@ class ConsumerBase(object):
|
|||||||
msg.content_type = 'amqp/map'
|
msg.content_type = 'amqp/map'
|
||||||
|
|
||||||
def consume(self):
|
def consume(self):
|
||||||
"""Fetch the message and pass it to the callback object"""
|
"""Fetch the message and pass it to the callback object."""
|
||||||
message = self.receiver.fetch()
|
message = self.receiver.fetch()
|
||||||
try:
|
try:
|
||||||
self._unpack_json_msg(message)
|
self._unpack_json_msg(message)
|
||||||
@ -151,14 +190,18 @@ class ConsumerBase(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
LOG.exception(_("Failed to process message... skipping it."))
|
||||||
finally:
|
finally:
|
||||||
|
# TODO(sandy): Need support for optional ack_on_error.
|
||||||
self.session.acknowledge(message)
|
self.session.acknowledge(message)
|
||||||
|
|
||||||
def get_receiver(self):
|
def get_receiver(self):
|
||||||
return self.receiver
|
return self.receiver
|
||||||
|
|
||||||
|
def get_node_name(self):
|
||||||
|
return self.address.split(';')[0]
|
||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
class DirectConsumer(ConsumerBase):
|
||||||
"""Queue/consumer class for 'direct'"""
|
"""Queue/consumer class for 'direct'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, msg_id, callback):
|
def __init__(self, conf, session, msg_id, callback):
|
||||||
"""Init a 'direct' queue.
|
"""Init a 'direct' queue.
|
||||||
@ -168,15 +211,30 @@ class DirectConsumer(ConsumerBase):
|
|||||||
'callback' is the callback to call when messages are received
|
'callback' is the callback to call when messages are received
|
||||||
"""
|
"""
|
||||||
|
|
||||||
super(DirectConsumer, self).__init__(session, callback,
|
link_opts = {
|
||||||
"%s/%s" % (msg_id, msg_id),
|
"auto-delete": conf.amqp_auto_delete,
|
||||||
{"type": "direct"},
|
"exclusive": True,
|
||||||
msg_id,
|
"durable": conf.amqp_durable_queues,
|
||||||
{"exclusive": True})
|
}
|
||||||
|
|
||||||
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (msg_id, msg_id)
|
||||||
|
node_opts = {"type": "direct"}
|
||||||
|
link_name = msg_id
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.direct/%s" % msg_id
|
||||||
|
node_opts = {}
|
||||||
|
link_name = None
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(DirectConsumer, self).__init__(conf, session, callback,
|
||||||
|
node_name, node_opts, link_name,
|
||||||
|
link_opts)
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
class TopicConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'topic'"""
|
"""Consumer class for 'topic'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback, name=None,
|
def __init__(self, conf, session, topic, callback, name=None,
|
||||||
exchange_name=None):
|
exchange_name=None):
|
||||||
@ -190,13 +248,24 @@ class TopicConsumer(ConsumerBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
||||||
super(TopicConsumer, self).__init__(session, callback,
|
link_opts = {
|
||||||
"%s/%s" % (exchange_name, topic),
|
"auto-delete": conf.amqp_auto_delete,
|
||||||
{}, name or topic, {})
|
"durable": conf.amqp_durable_queues,
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
|
||||||
|
{}, name or topic, link_opts)
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
class FanoutConsumer(ConsumerBase):
|
||||||
"""Consumer class for 'fanout'"""
|
"""Consumer class for 'fanout'."""
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback):
|
def __init__(self, conf, session, topic, callback):
|
||||||
"""Init a 'fanout' queue.
|
"""Init a 'fanout' queue.
|
||||||
@ -205,90 +274,165 @@ class FanoutConsumer(ConsumerBase):
|
|||||||
'topic' is the topic to listen on
|
'topic' is the topic to listen on
|
||||||
'callback' is the callback to call when messages are received
|
'callback' is the callback to call when messages are received
|
||||||
"""
|
"""
|
||||||
|
self.conf = conf
|
||||||
|
|
||||||
super(FanoutConsumer, self).__init__(
|
link_opts = {"exclusive": True}
|
||||||
session, callback,
|
|
||||||
"%s_fanout" % topic,
|
if conf.qpid_topology_version == 1:
|
||||||
{"durable": False, "type": "fanout"},
|
node_name = "%s_fanout" % topic
|
||||||
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
|
node_opts = {"durable": False, "type": "fanout"}
|
||||||
{"exclusive": True})
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/fanout/%s" % topic
|
||||||
|
node_opts = {}
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(FanoutConsumer, self).__init__(conf, session, callback,
|
||||||
|
node_name, node_opts, None,
|
||||||
|
link_opts)
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
class Publisher(object):
|
||||||
"""Base Publisher class"""
|
"""Base Publisher class."""
|
||||||
|
|
||||||
def __init__(self, session, node_name, node_opts=None):
|
def __init__(self, conf, session, node_name, node_opts=None):
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
"""Init the Publisher class with the exchange_name, routing_key,
|
||||||
and other options
|
and other options
|
||||||
"""
|
"""
|
||||||
self.sender = None
|
self.sender = None
|
||||||
self.session = session
|
self.session = session
|
||||||
|
|
||||||
addr_opts = {
|
if conf.qpid_topology_version == 1:
|
||||||
"create": "always",
|
addr_opts = {
|
||||||
"node": {
|
"create": "always",
|
||||||
"type": "topic",
|
"node": {
|
||||||
"x-declare": {
|
"type": "topic",
|
||||||
"durable": False,
|
"x-declare": {
|
||||||
# auto-delete isn't implemented for exchanges in qpid,
|
"durable": False,
|
||||||
# but put in here anyway
|
# auto-delete isn't implemented for exchanges in qpid,
|
||||||
"auto-delete": True,
|
# but put in here anyway
|
||||||
|
"auto-delete": True,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
if node_opts:
|
||||||
if node_opts:
|
addr_opts["node"]["x-declare"].update(node_opts)
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
self.address = node_name
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
self.reconnect(session)
|
self.reconnect(session)
|
||||||
|
|
||||||
def reconnect(self, session):
|
def reconnect(self, session):
|
||||||
"""Re-establish the Sender after a reconnection"""
|
"""Re-establish the Sender after a reconnection."""
|
||||||
self.sender = session.sender(self.address)
|
self.sender = session.sender(self.address)
|
||||||
|
|
||||||
|
def _pack_json_msg(self, msg):
|
||||||
|
"""Qpid cannot serialize dicts containing strings longer than 65535
|
||||||
|
characters. This function dumps the message content to a JSON
|
||||||
|
string, which Qpid is able to handle.
|
||||||
|
|
||||||
|
:param msg: May be either a Qpid Message object or a bare dict.
|
||||||
|
:returns: A Qpid Message with its content field JSON encoded.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
msg.content = jsonutils.dumps(msg.content)
|
||||||
|
except AttributeError:
|
||||||
|
# Need to have a Qpid message so we can set the content_type.
|
||||||
|
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
||||||
|
msg.content_type = JSON_CONTENT_TYPE
|
||||||
|
return msg
|
||||||
|
|
||||||
def send(self, msg):
|
def send(self, msg):
|
||||||
"""Send a message"""
|
"""Send a message."""
|
||||||
|
try:
|
||||||
|
# Check if Qpid can encode the message
|
||||||
|
check_msg = msg
|
||||||
|
if not hasattr(check_msg, 'content_type'):
|
||||||
|
check_msg = qpid_messaging.Message(msg)
|
||||||
|
content_type = check_msg.content_type
|
||||||
|
enc, dec = qpid_messaging.message.get_codec(content_type)
|
||||||
|
enc(check_msg.content)
|
||||||
|
except qpid_codec.CodecException:
|
||||||
|
# This means the message couldn't be serialized as a dict.
|
||||||
|
msg = self._pack_json_msg(msg)
|
||||||
self.sender.send(msg)
|
self.sender.send(msg)
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
class DirectPublisher(Publisher):
|
||||||
"""Publisher class for 'direct'"""
|
"""Publisher class for 'direct'."""
|
||||||
def __init__(self, conf, session, msg_id):
|
def __init__(self, conf, session, msg_id):
|
||||||
"""Init a 'direct' publisher."""
|
"""Init a 'direct' publisher."""
|
||||||
super(DirectPublisher, self).__init__(session, msg_id,
|
|
||||||
{"type": "Direct"})
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = msg_id
|
||||||
|
node_opts = {"type": "direct"}
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.direct/%s" % msg_id
|
||||||
|
node_opts = {}
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(DirectPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publisher class for 'topic'"""
|
"""Publisher class for 'topic'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""Init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||||
super(TopicPublisher, self).__init__(session,
|
|
||||||
"%s/%s" % (exchange_name, topic))
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(TopicPublisher, self).__init__(conf, session, node_name)
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
class FanoutPublisher(Publisher):
|
||||||
"""Publisher class for 'fanout'"""
|
"""Publisher class for 'fanout'."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'fanout' publisher.
|
"""Init a 'fanout' publisher.
|
||||||
"""
|
"""
|
||||||
super(FanoutPublisher, self).__init__(
|
|
||||||
session,
|
if conf.qpid_topology_version == 1:
|
||||||
"%s_fanout" % topic, {"type": "fanout"})
|
node_name = "%s_fanout" % topic
|
||||||
|
node_opts = {"type": "fanout"}
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/fanout/%s" % topic
|
||||||
|
node_opts = {}
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(FanoutPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(Publisher):
|
class NotifyPublisher(Publisher):
|
||||||
"""Publisher class for notifications"""
|
"""Publisher class for notifications."""
|
||||||
def __init__(self, conf, session, topic):
|
def __init__(self, conf, session, topic):
|
||||||
"""init a 'topic' publisher.
|
"""Init a 'topic' publisher.
|
||||||
"""
|
"""
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
exchange_name = rpc_amqp.get_control_exchange(conf)
|
||||||
super(NotifyPublisher, self).__init__(session,
|
node_opts = {"durable": True}
|
||||||
"%s/%s" % (exchange_name, topic),
|
|
||||||
{"durable": True})
|
if conf.qpid_topology_version == 1:
|
||||||
|
node_name = "%s/%s" % (exchange_name, topic)
|
||||||
|
elif conf.qpid_topology_version == 2:
|
||||||
|
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
||||||
|
else:
|
||||||
|
raise_invalid_topology_version()
|
||||||
|
|
||||||
|
super(NotifyPublisher, self).__init__(conf, session, node_name,
|
||||||
|
node_opts)
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
class Connection(object):
|
||||||
@ -339,7 +483,7 @@ class Connection(object):
|
|||||||
# Reconnection is done by self.reconnect()
|
# Reconnection is done by self.reconnect()
|
||||||
self.connection.reconnect = False
|
self.connection.reconnect = False
|
||||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
self.connection.heartbeat = self.conf.qpid_heartbeat
|
||||||
self.connection.protocol = self.conf.qpid_protocol
|
self.connection.transport = self.conf.qpid_protocol
|
||||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
||||||
|
|
||||||
def _register_consumer(self, consumer):
|
def _register_consumer(self, consumer):
|
||||||
@ -349,7 +493,7 @@ class Connection(object):
|
|||||||
return self.consumers[str(receiver)]
|
return self.consumers[str(receiver)]
|
||||||
|
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
"""Handles reconnecting and re-establishing sessions and queues"""
|
"""Handles reconnecting and re-establishing sessions and queues."""
|
||||||
attempt = 0
|
attempt = 0
|
||||||
delay = 1
|
delay = 1
|
||||||
while True:
|
while True:
|
||||||
@ -366,7 +510,7 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
self.connection_create(broker)
|
self.connection_create(broker)
|
||||||
self.connection.open()
|
self.connection.open()
|
||||||
except qpid_exceptions.ConnectionError, e:
|
except qpid_exceptions.ConnectionError as e:
|
||||||
msg_dict = dict(e=e, delay=delay)
|
msg_dict = dict(e=e, delay=delay)
|
||||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
msg = _("Unable to connect to AMQP server: %(e)s. "
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
"Sleeping %(delay)s seconds") % msg_dict
|
||||||
@ -383,7 +527,7 @@ class Connection(object):
|
|||||||
consumers = self.consumers
|
consumers = self.consumers
|
||||||
self.consumers = {}
|
self.consumers = {}
|
||||||
|
|
||||||
for consumer in consumers.itervalues():
|
for consumer in six.itervalues(consumers):
|
||||||
consumer.reconnect(self.session)
|
consumer.reconnect(self.session)
|
||||||
self._register_consumer(consumer)
|
self._register_consumer(consumer)
|
||||||
|
|
||||||
@ -394,20 +538,26 @@ class Connection(object):
|
|||||||
try:
|
try:
|
||||||
return method(*args, **kwargs)
|
return method(*args, **kwargs)
|
||||||
except (qpid_exceptions.Empty,
|
except (qpid_exceptions.Empty,
|
||||||
qpid_exceptions.ConnectionError), e:
|
qpid_exceptions.ConnectionError) as e:
|
||||||
if error_callback:
|
if error_callback:
|
||||||
error_callback(e)
|
error_callback(e)
|
||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Close/release this connection"""
|
"""Close/release this connection."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.connection.close()
|
try:
|
||||||
|
self.connection.close()
|
||||||
|
except Exception:
|
||||||
|
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
||||||
|
# causes confusion; there's really nothing useful we can do with
|
||||||
|
# them.
|
||||||
|
pass
|
||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
"""Reset a connection so it can be used again"""
|
"""Reset a connection so it can be used again."""
|
||||||
self.cancel_consumer_thread()
|
self.cancel_consumer_thread()
|
||||||
self.wait_on_proxy_callbacks()
|
self.wait_on_proxy_callbacks()
|
||||||
self.session.close()
|
self.session.close()
|
||||||
@ -431,7 +581,7 @@ class Connection(object):
|
|||||||
return self.ensure(_connect_error, _declare_consumer)
|
return self.ensure(_connect_error, _declare_consumer)
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
def iterconsume(self, limit=None, timeout=None):
|
||||||
"""Return an iterator that will consume from all queues/consumers"""
|
"""Return an iterator that will consume from all queues/consumers."""
|
||||||
|
|
||||||
def _error_callback(exc):
|
def _error_callback(exc):
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
if isinstance(exc, qpid_exceptions.Empty):
|
||||||
@ -455,7 +605,7 @@ class Connection(object):
|
|||||||
yield self.ensure(_error_callback, _consume)
|
yield self.ensure(_error_callback, _consume)
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
def cancel_consumer_thread(self):
|
||||||
"""Cancel a consumer thread"""
|
"""Cancel a consumer thread."""
|
||||||
if self.consumer_thread is not None:
|
if self.consumer_thread is not None:
|
||||||
self.consumer_thread.kill()
|
self.consumer_thread.kill()
|
||||||
try:
|
try:
|
||||||
@ -470,7 +620,7 @@ class Connection(object):
|
|||||||
proxy_cb.wait()
|
proxy_cb.wait()
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg):
|
def publisher_send(self, cls, topic, msg):
|
||||||
"""Send to a publisher based on the publisher class"""
|
"""Send to a publisher based on the publisher class."""
|
||||||
|
|
||||||
def _connect_error(exc):
|
def _connect_error(exc):
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
log_info = {'topic': topic, 'err_str': str(exc)}
|
||||||
@ -500,15 +650,15 @@ class Connection(object):
|
|||||||
topic, callback)
|
topic, callback)
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
def declare_fanout_consumer(self, topic, callback):
|
||||||
"""Create a 'fanout' consumer"""
|
"""Create a 'fanout' consumer."""
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
self.declare_consumer(FanoutConsumer, topic, callback)
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
def direct_send(self, msg_id, msg):
|
||||||
"""Send a 'direct' message"""
|
"""Send a 'direct' message."""
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
self.publisher_send(DirectPublisher, msg_id, msg)
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
def topic_send(self, topic, msg, timeout=None):
|
||||||
"""Send a 'topic' message"""
|
"""Send a 'topic' message."""
|
||||||
#
|
#
|
||||||
# We want to create a message with attributes, e.g. a TTL. We
|
# We want to create a message with attributes, e.g. a TTL. We
|
||||||
# don't really need to keep 'msg' in its JSON format any longer
|
# don't really need to keep 'msg' in its JSON format any longer
|
||||||
@ -523,24 +673,25 @@ class Connection(object):
|
|||||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
self.publisher_send(TopicPublisher, topic, qpid_message)
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
def fanout_send(self, topic, msg):
|
||||||
"""Send a 'fanout' message"""
|
"""Send a 'fanout' message."""
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
self.publisher_send(FanoutPublisher, topic, msg)
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
def notify_send(self, topic, msg, **kwargs):
|
||||||
"""Send a notify message on a topic"""
|
"""Send a notify message on a topic."""
|
||||||
self.publisher_send(NotifyPublisher, topic, msg)
|
self.publisher_send(NotifyPublisher, topic, msg)
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
"""Consume from all queues/consumers"""
|
"""Consume from all queues/consumers."""
|
||||||
it = self.iterconsume(limit=limit)
|
it = self.iterconsume(limit=limit)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
it.next()
|
six.next(it)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return
|
return
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Consumer from all queues/consumers in a greenthread"""
|
"""Consumer from all queues/consumers in a greenthread."""
|
||||||
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consumer_thread():
|
def _consumer_thread():
|
||||||
try:
|
try:
|
||||||
self.consume()
|
self.consume()
|
||||||
@ -551,7 +702,7 @@ class Connection(object):
|
|||||||
return self.consumer_thread
|
return self.consumer_thread
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
def create_consumer(self, topic, proxy, fanout=False):
|
||||||
"""Create a consumer that calls a method in a proxy object"""
|
"""Create a consumer that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -567,7 +718,7 @@ class Connection(object):
|
|||||||
return consumer
|
return consumer
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
def create_worker(self, topic, proxy, pool_name):
|
||||||
"""Create a worker that calls a method in a proxy object"""
|
"""Create a worker that calls a method in a proxy object."""
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
proxy_cb = rpc_amqp.ProxyCallback(
|
||||||
self.conf, proxy,
|
self.conf, proxy,
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
rpc_amqp.get_connection_pool(self.conf, Connection))
|
||||||
@ -581,7 +732,7 @@ class Connection(object):
|
|||||||
return consumer
|
return consumer
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
def join_consumer_pool(self, callback, pool_name, topic,
|
||||||
exchange_name=None):
|
exchange_name=None, ack_on_error=True):
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
"""Register as a member of a group of consumers for a given topic from
|
||||||
the specified exchange.
|
the specified exchange.
|
||||||
|
|
||||||
@ -595,6 +746,7 @@ class Connection(object):
|
|||||||
callback=callback,
|
callback=callback,
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
||||||
Connection),
|
Connection),
|
||||||
|
wait_for_consumers=not ack_on_error
|
||||||
)
|
)
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
self.proxy_callbacks.append(callback_wrapper)
|
||||||
|
|
||||||
@ -610,7 +762,7 @@ class Connection(object):
|
|||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
def create_connection(conf, new=True):
|
||||||
"""Create a connection"""
|
"""Create a connection."""
|
||||||
return rpc_amqp.create_connection(
|
return rpc_amqp.create_connection(
|
||||||
conf, new,
|
conf, new,
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
rpc_amqp.get_connection_pool(conf, Connection))
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -25,12 +23,13 @@ import uuid
|
|||||||
import eventlet
|
import eventlet
|
||||||
import greenlet
|
import greenlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
from designate.openstack.common import excutils
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import importutils
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import jsonutils
|
from designate.openstack.common import jsonutils
|
||||||
from designate.openstack.common import processutils as utils
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
|
||||||
zmq = importutils.try_import('eventlet.green.zmq')
|
zmq = importutils.try_import('eventlet.green.zmq')
|
||||||
@ -85,8 +84,8 @@ matchmaker = None # memoized matchmaker object
|
|||||||
|
|
||||||
|
|
||||||
def _serialize(data):
|
def _serialize(data):
|
||||||
"""
|
"""Serialization wrapper.
|
||||||
Serialization wrapper
|
|
||||||
We prefer using JSON, but it cannot encode all types.
|
We prefer using JSON, but it cannot encode all types.
|
||||||
Error if a developer passes us bad data.
|
Error if a developer passes us bad data.
|
||||||
"""
|
"""
|
||||||
@ -98,18 +97,15 @@ def _serialize(data):
|
|||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
def _deserialize(data):
|
||||||
"""
|
"""Deserialization wrapper."""
|
||||||
Deserialization wrapper
|
|
||||||
"""
|
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
LOG.debug(_("Deserializing: %s"), data)
|
||||||
return jsonutils.loads(data)
|
return jsonutils.loads(data)
|
||||||
|
|
||||||
|
|
||||||
class ZmqSocket(object):
|
class ZmqSocket(object):
|
||||||
"""
|
"""A tiny wrapper around ZeroMQ.
|
||||||
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
|
|
||||||
and connection management.
|
|
||||||
|
|
||||||
|
Simplifies the send/recv protocol and connection management.
|
||||||
Can be used as a Context (supports the 'with' statement).
|
Can be used as a Context (supports the 'with' statement).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -180,7 +176,7 @@ class ZmqSocket(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# We must unsubscribe, or we'll leak descriptors.
|
# We must unsubscribe, or we'll leak descriptors.
|
||||||
if len(self.subscriptions) > 0:
|
if self.subscriptions:
|
||||||
for f in self.subscriptions:
|
for f in self.subscriptions:
|
||||||
try:
|
try:
|
||||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
||||||
@ -196,38 +192,36 @@ class ZmqSocket(object):
|
|||||||
# it would be much worse if some of the code calling this
|
# it would be much worse if some of the code calling this
|
||||||
# were to fail. For now, lets log, and later evaluate
|
# were to fail. For now, lets log, and later evaluate
|
||||||
# if we can safely raise here.
|
# if we can safely raise here.
|
||||||
LOG.error("ZeroMQ socket could not be closed.")
|
LOG.error(_("ZeroMQ socket could not be closed."))
|
||||||
self.sock = None
|
self.sock = None
|
||||||
|
|
||||||
def recv(self):
|
def recv(self, **kwargs):
|
||||||
if not self.can_recv:
|
if not self.can_recv:
|
||||||
raise RPCException(_("You cannot recv on this socket."))
|
raise RPCException(_("You cannot recv on this socket."))
|
||||||
return self.sock.recv_multipart()
|
return self.sock.recv_multipart(**kwargs)
|
||||||
|
|
||||||
def send(self, data):
|
def send(self, data, **kwargs):
|
||||||
if not self.can_send:
|
if not self.can_send:
|
||||||
raise RPCException(_("You cannot send on this socket."))
|
raise RPCException(_("You cannot send on this socket."))
|
||||||
self.sock.send_multipart(data)
|
self.sock.send_multipart(data, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ZmqClient(object):
|
class ZmqClient(object):
|
||||||
"""Client for ZMQ sockets."""
|
"""Client for ZMQ sockets."""
|
||||||
|
|
||||||
def __init__(self, addr, socket_type=None, bind=False):
|
def __init__(self, addr):
|
||||||
if socket_type is None:
|
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
||||||
socket_type = zmq.PUSH
|
|
||||||
self.outq = ZmqSocket(addr, socket_type, bind=bind)
|
|
||||||
|
|
||||||
def cast(self, msg_id, topic, data, envelope=False):
|
def cast(self, msg_id, topic, data, envelope):
|
||||||
msg_id = msg_id or 0
|
msg_id = msg_id or 0
|
||||||
|
|
||||||
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
|
if not envelope:
|
||||||
self.outq.send(map(bytes,
|
self.outq.send(map(bytes,
|
||||||
(msg_id, topic, 'cast', _serialize(data))))
|
(msg_id, topic, 'cast', _serialize(data))))
|
||||||
return
|
return
|
||||||
|
|
||||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
||||||
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
|
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
|
||||||
self.outq.send(map(bytes,
|
self.outq.send(map(bytes,
|
||||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
||||||
|
|
||||||
@ -276,12 +270,13 @@ class InternalContext(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
result = proxy.dispatch(
|
result = proxy.dispatch(
|
||||||
ctx, data['version'], data['method'], **data['args'])
|
ctx, data['version'], data['method'],
|
||||||
|
data.get('namespace'), **data['args'])
|
||||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
return ConsumerBase.normalize_reply(result, ctx.replies)
|
||||||
except greenlet.GreenletExit:
|
except greenlet.GreenletExit:
|
||||||
# ignore these since they are just from shutdowns
|
# ignore these since they are just from shutdowns
|
||||||
pass
|
pass
|
||||||
except rpc_common.ClientException, e:
|
except rpc_common.ClientException as e:
|
||||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||||
e._exc_info[1])
|
e._exc_info[1])
|
||||||
return {'exc':
|
return {'exc':
|
||||||
@ -295,11 +290,16 @@ class InternalContext(object):
|
|||||||
def reply(self, ctx, proxy,
|
def reply(self, ctx, proxy,
|
||||||
msg_id=None, context=None, topic=None, msg=None):
|
msg_id=None, context=None, topic=None, msg=None):
|
||||||
"""Reply to a casted call."""
|
"""Reply to a casted call."""
|
||||||
# Our real method is curried into msg['args']
|
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
||||||
|
# this may be able to be removed earlier than
|
||||||
|
# 'I' if ConsumerBase.process were refactored.
|
||||||
|
if type(msg) is list:
|
||||||
|
payload = msg[-1]
|
||||||
|
else:
|
||||||
|
payload = msg
|
||||||
|
|
||||||
child_ctx = RpcContext.unmarshal(msg[0])
|
|
||||||
response = ConsumerBase.normalize_reply(
|
response = ConsumerBase.normalize_reply(
|
||||||
self._get_response(child_ctx, proxy, topic, msg[1]),
|
self._get_response(ctx, proxy, topic, payload),
|
||||||
ctx.replies)
|
ctx.replies)
|
||||||
|
|
||||||
LOG.debug(_("Sending reply"))
|
LOG.debug(_("Sending reply"))
|
||||||
@ -346,20 +346,18 @@ class ConsumerBase(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
proxy.dispatch(ctx, data['version'],
|
proxy.dispatch(ctx, data['version'],
|
||||||
data['method'], **data['args'])
|
data['method'], data.get('namespace'), **data['args'])
|
||||||
|
|
||||||
|
|
||||||
class ZmqBaseReactor(ConsumerBase):
|
class ZmqBaseReactor(ConsumerBase):
|
||||||
"""
|
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
||||||
A consumer class implementing a
|
|
||||||
centralized casting broker (PULL-PUSH)
|
Used for RoundRobin requests.
|
||||||
for RoundRobin requests.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
super(ZmqBaseReactor, self).__init__()
|
super(ZmqBaseReactor, self).__init__()
|
||||||
|
|
||||||
self.mapping = {}
|
|
||||||
self.proxies = {}
|
self.proxies = {}
|
||||||
self.threads = []
|
self.threads = []
|
||||||
self.sockets = []
|
self.sockets = []
|
||||||
@ -367,9 +365,8 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
|
|
||||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
||||||
|
|
||||||
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
|
def register(self, proxy, in_addr, zmq_type_in,
|
||||||
zmq_type_out=None, in_bind=True, out_bind=True,
|
in_bind=True, subscribe=None):
|
||||||
subscribe=None):
|
|
||||||
|
|
||||||
LOG.info(_("Registering reactor"))
|
LOG.info(_("Registering reactor"))
|
||||||
|
|
||||||
@ -385,22 +382,8 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
|
|
||||||
LOG.info(_("In reactor registered"))
|
LOG.info(_("In reactor registered"))
|
||||||
|
|
||||||
if not out_addr:
|
|
||||||
return
|
|
||||||
|
|
||||||
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
|
|
||||||
raise RPCException("Bad output socktype")
|
|
||||||
|
|
||||||
# Items push out.
|
|
||||||
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
|
|
||||||
|
|
||||||
self.mapping[inq] = outq
|
|
||||||
self.mapping[outq] = inq
|
|
||||||
self.sockets.append(outq)
|
|
||||||
|
|
||||||
LOG.info(_("Out reactor registered"))
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
|
@excutils.forever_retry_uncaught_exceptions
|
||||||
def _consume(sock):
|
def _consume(sock):
|
||||||
LOG.info(_("Consuming socket"))
|
LOG.info(_("Consuming socket"))
|
||||||
while True:
|
while True:
|
||||||
@ -424,10 +407,9 @@ class ZmqBaseReactor(ConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class ZmqProxy(ZmqBaseReactor):
|
class ZmqProxy(ZmqBaseReactor):
|
||||||
"""
|
"""A consumer class implementing a topic-based proxy.
|
||||||
A consumer class implementing a
|
|
||||||
topic-based proxy, forwarding to
|
Forwards to IPC sockets.
|
||||||
IPC sockets.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
@ -440,11 +422,8 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
def consume(self, sock):
|
def consume(self, sock):
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||||
|
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
data = sock.recv(copy=False)
|
||||||
data = sock.recv()
|
topic = data[1].bytes
|
||||||
topic = data[1]
|
|
||||||
|
|
||||||
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
|
|
||||||
|
|
||||||
if topic.startswith('fanout~'):
|
if topic.startswith('fanout~'):
|
||||||
sock_type = zmq.PUB
|
sock_type = zmq.PUB
|
||||||
@ -486,9 +465,7 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
while(True):
|
while(True):
|
||||||
data = self.topic_proxy[topic].get()
|
data = self.topic_proxy[topic].get()
|
||||||
out_sock.send(data)
|
out_sock.send(data, copy=False)
|
||||||
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
|
|
||||||
{'data': data})
|
|
||||||
|
|
||||||
wait_sock_creation = eventlet.event.Event()
|
wait_sock_creation = eventlet.event.Event()
|
||||||
eventlet.spawn(publisher, wait_sock_creation)
|
eventlet.spawn(publisher, wait_sock_creation)
|
||||||
@ -501,37 +478,34 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
self.topic_proxy[topic].put_nowait(data)
|
||||||
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
|
|
||||||
{'data': data})
|
|
||||||
except eventlet.queue.Full:
|
except eventlet.queue.Full:
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
LOG.error(_("Local per-topic backlog buffer full for topic "
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
"%(topic)s. Dropping message.") % {'topic': topic})
|
||||||
|
|
||||||
def consume_in_thread(self):
|
def consume_in_thread(self):
|
||||||
"""Runs the ZmqProxy service"""
|
"""Runs the ZmqProxy service."""
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
ipc_dir = CONF.rpc_zmq_ipc_dir
|
||||||
consume_in = "tcp://%s:%s" % \
|
consume_in = "tcp://%s:%s" % \
|
||||||
(CONF.rpc_zmq_bind_address,
|
(CONF.rpc_zmq_bind_address,
|
||||||
CONF.rpc_zmq_port)
|
CONF.rpc_zmq_port)
|
||||||
consumption_proxy = InternalContext(None)
|
consumption_proxy = InternalContext(None)
|
||||||
|
|
||||||
if not os.path.isdir(ipc_dir):
|
try:
|
||||||
try:
|
os.makedirs(ipc_dir)
|
||||||
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
|
except os.error:
|
||||||
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
|
if not os.path.isdir(ipc_dir):
|
||||||
ipc_dir, run_as_root=True)
|
|
||||||
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
|
|
||||||
except utils.ProcessExecutionError:
|
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create IPC directory %s") %
|
LOG.error(_("Required IPC directory does not exist at"
|
||||||
(ipc_dir, ))
|
" %s") % (ipc_dir, ))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.register(consumption_proxy,
|
self.register(consumption_proxy,
|
||||||
consume_in,
|
consume_in,
|
||||||
zmq.PULL,
|
zmq.PULL)
|
||||||
out_bind=True)
|
|
||||||
except zmq.ZMQError:
|
except zmq.ZMQError:
|
||||||
|
if os.access(ipc_dir, os.X_OK):
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error(_("Permission denied to IPC directory at"
|
||||||
|
" %s") % (ipc_dir, ))
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
||||||
"Socket may already be in use."))
|
"Socket may already be in use."))
|
||||||
@ -541,24 +515,24 @@ class ZmqProxy(ZmqBaseReactor):
|
|||||||
|
|
||||||
def unflatten_envelope(packenv):
|
def unflatten_envelope(packenv):
|
||||||
"""Unflattens the RPC envelope.
|
"""Unflattens the RPC envelope.
|
||||||
Takes a list and returns a dictionary.
|
|
||||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
Takes a list and returns a dictionary.
|
||||||
|
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
||||||
"""
|
"""
|
||||||
i = iter(packenv)
|
i = iter(packenv)
|
||||||
h = {}
|
h = {}
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
k = i.next()
|
k = six.next(i)
|
||||||
h[k] = i.next()
|
h[k] = six.next(i)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return h
|
return h
|
||||||
|
|
||||||
|
|
||||||
class ZmqReactor(ZmqBaseReactor):
|
class ZmqReactor(ZmqBaseReactor):
|
||||||
"""
|
"""A consumer class implementing a consumer for messages.
|
||||||
A consumer class implementing a
|
|
||||||
consumer for messages. Can also be
|
Can also be used as a 1:1 proxy
|
||||||
used as a 1:1 proxy
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
@ -568,11 +542,6 @@ class ZmqReactor(ZmqBaseReactor):
|
|||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||||
data = sock.recv()
|
data = sock.recv()
|
||||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||||
if sock in self.mapping:
|
|
||||||
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
|
|
||||||
'data': data})
|
|
||||||
self.mapping[sock].send(data)
|
|
||||||
return
|
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
proxy = self.proxies[sock]
|
||||||
|
|
||||||
@ -685,8 +654,8 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
'method': '-reply',
|
'method': '-reply',
|
||||||
'args': {
|
'args': {
|
||||||
'msg_id': msg_id,
|
'msg_id': msg_id,
|
||||||
'context': mcontext,
|
|
||||||
'topic': reply_topic,
|
'topic': reply_topic,
|
||||||
|
# TODO(ewindisch): safe to remove mcontext in I.
|
||||||
'msg': [mcontext, msg]
|
'msg': [mcontext, msg]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -745,10 +714,9 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||||||
|
|
||||||
def _multi_send(method, context, topic, msg, timeout=None,
|
def _multi_send(method, context, topic, msg, timeout=None,
|
||||||
envelope=False, _msg_id=None):
|
envelope=False, _msg_id=None):
|
||||||
"""
|
"""Wraps the sending of messages.
|
||||||
Wraps the sending of messages,
|
|
||||||
dispatches to the matchmaker and sends
|
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||||
message to all relevant hosts.
|
|
||||||
"""
|
"""
|
||||||
conf = CONF
|
conf = CONF
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||||
@ -757,7 +725,7 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
|||||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
# Don't stack if we have no matchmaker results
|
||||||
if len(queues) == 0:
|
if not queues:
|
||||||
LOG.warn(_("No matchmaker results. Not casting."))
|
LOG.warn(_("No matchmaker results. Not casting."))
|
||||||
# While not strictly a timeout, callers know how to handle
|
# While not strictly a timeout, callers know how to handle
|
||||||
# this exception and a timeout isn't too big a lie.
|
# this exception and a timeout isn't too big a lie.
|
||||||
@ -805,8 +773,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
def notify(conf, context, topic, msg, envelope):
|
||||||
"""
|
"""Send notification event.
|
||||||
Send notification event.
|
|
||||||
Notifications are sent to topic-priority.
|
Notifications are sent to topic-priority.
|
||||||
This differs from the AMQP drivers which send to topic.priority.
|
This differs from the AMQP drivers which send to topic.priority.
|
||||||
"""
|
"""
|
||||||
@ -840,6 +808,11 @@ def _get_ctxt():
|
|||||||
def _get_matchmaker(*args, **kwargs):
|
def _get_matchmaker(*args, **kwargs):
|
||||||
global matchmaker
|
global matchmaker
|
||||||
if not matchmaker:
|
if not matchmaker:
|
||||||
matchmaker = importutils.import_object(
|
mm = CONF.rpc_zmq_matchmaker
|
||||||
CONF.rpc_zmq_matchmaker, *args, **kwargs)
|
if mm.endswith('matchmaker.MatchMakerRing'):
|
||||||
|
mm.replace('matchmaker', 'matchmaker_ring')
|
||||||
|
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
|
||||||
|
' %(new)s instead') % dict(
|
||||||
|
orig=CONF.rpc_zmq_matchmaker, new=mm))
|
||||||
|
matchmaker = importutils.import_object(mm, *args, **kwargs)
|
||||||
return matchmaker
|
return matchmaker
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
# Copyright 2011 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -19,8 +17,6 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
@ -30,10 +26,6 @@ from designate.openstack.common import log as logging
|
|||||||
|
|
||||||
|
|
||||||
matchmaker_opts = [
|
matchmaker_opts = [
|
||||||
# Matchmaker ring file
|
|
||||||
cfg.StrOpt('matchmaker_ringfile',
|
|
||||||
default='/etc/nova/matchmaker_ring.json',
|
|
||||||
help='Matchmaker ring file (JSON)'),
|
|
||||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
cfg.IntOpt('matchmaker_heartbeat_freq',
|
||||||
default=300,
|
default=300,
|
||||||
help='Heartbeat frequency'),
|
help='Heartbeat frequency'),
|
||||||
@ -54,8 +46,8 @@ class MatchMakerException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class Exchange(object):
|
class Exchange(object):
|
||||||
"""
|
"""Implements lookups.
|
||||||
Implements lookups.
|
|
||||||
Subclass this to support hashtables, dns, etc.
|
Subclass this to support hashtables, dns, etc.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -66,9 +58,7 @@ class Exchange(object):
|
|||||||
|
|
||||||
|
|
||||||
class Binding(object):
|
class Binding(object):
|
||||||
"""
|
"""A binding on which to perform a lookup."""
|
||||||
A binding on which to perform a lookup.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -77,10 +67,10 @@ class Binding(object):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerBase(object):
|
class MatchMakerBase(object):
|
||||||
"""
|
"""Match Maker Base Class.
|
||||||
Match Maker Base Class.
|
|
||||||
Build off HeartbeatMatchMakerBase if building a
|
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
||||||
heartbeat-capable MatchMaker.
|
MatchMaker.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
||||||
@ -90,58 +80,47 @@ class MatchMakerBase(object):
|
|||||||
'registration or heartbeat.')
|
'registration or heartbeat.')
|
||||||
|
|
||||||
def register(self, key, host):
|
def register(self, key, host):
|
||||||
"""
|
"""Register a host on a backend.
|
||||||
Register a host on a backend.
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
Heartbeats, if applicable, may keepalive registration.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
def ack_alive(self, key, host):
|
||||||
"""
|
"""Acknowledge that a key.host is alive.
|
||||||
Acknowledge that a key.host is alive.
|
|
||||||
Used internally for updating heartbeats,
|
Used internally for updating heartbeats, but may also be used
|
||||||
but may also be used publically to acknowledge
|
publicly to acknowledge a system is alive (i.e. rpc message
|
||||||
a system is alive (i.e. rpc message successfully
|
successfully sent to host)
|
||||||
sent to host)
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
def is_alive(self, topic, host):
|
||||||
"""
|
"""Checks if a host is alive."""
|
||||||
Checks if a host is alive.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def expire(self, topic, host):
|
def expire(self, topic, host):
|
||||||
"""
|
"""Explicitly expire a host's registration."""
|
||||||
Explicitly expire a host's registration.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def send_heartbeats(self):
|
def send_heartbeats(self):
|
||||||
"""
|
"""Send all heartbeats.
|
||||||
Send all heartbeats.
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||||
which loops this method.
|
which loops this method.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def unregister(self, key, host):
|
def unregister(self, key, host):
|
||||||
"""
|
"""Unregister a topic."""
|
||||||
Unregister a topic.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
"""
|
"""Spawn heartbeat greenthread."""
|
||||||
Spawn heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
def stop_heartbeat(self):
|
||||||
"""
|
"""Destroys the heartbeat greenthread."""
|
||||||
Destroys the heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def add_binding(self, binding, rule, last=True):
|
def add_binding(self, binding, rule, last=True):
|
||||||
@ -168,10 +147,10 @@ class MatchMakerBase(object):
|
|||||||
|
|
||||||
|
|
||||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
class HeartbeatMatchMakerBase(MatchMakerBase):
|
||||||
"""
|
"""Base for a heart-beat capable MatchMaker.
|
||||||
Base for a heart-beat capable MatchMaker.
|
|
||||||
Provides common methods for registering,
|
Provides common methods for registering, unregistering, and maintaining
|
||||||
unregistering, and maintaining heartbeats.
|
heartbeats.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.hosts = set()
|
self.hosts = set()
|
||||||
@ -181,8 +160,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
super(HeartbeatMatchMakerBase, self).__init__()
|
super(HeartbeatMatchMakerBase, self).__init__()
|
||||||
|
|
||||||
def send_heartbeats(self):
|
def send_heartbeats(self):
|
||||||
"""
|
"""Send all heartbeats.
|
||||||
Send all heartbeats.
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
Use start_heartbeat to spawn a heartbeat greenthread,
|
||||||
which loops this method.
|
which loops this method.
|
||||||
"""
|
"""
|
||||||
@ -190,32 +169,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.ack_alive(key, host)
|
self.ack_alive(key, host)
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
def ack_alive(self, key, host):
|
||||||
"""
|
"""Acknowledge that a host.topic is alive.
|
||||||
Acknowledge that a host.topic is alive.
|
|
||||||
Used internally for updating heartbeats,
|
Used internally for updating heartbeats, but may also be used
|
||||||
but may also be used publically to acknowledge
|
publicly to acknowledge a system is alive (i.e. rpc message
|
||||||
a system is alive (i.e. rpc message successfully
|
successfully sent to host)
|
||||||
sent to host)
|
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement ack_alive")
|
raise NotImplementedError("Must implement ack_alive")
|
||||||
|
|
||||||
def backend_register(self, key, host):
|
def backend_register(self, key, host):
|
||||||
"""
|
"""Implements registration logic.
|
||||||
Implements registration logic.
|
|
||||||
Called by register(self,key,host)
|
Called by register(self,key,host)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement backend_register")
|
raise NotImplementedError("Must implement backend_register")
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
def backend_unregister(self, key, key_host):
|
||||||
"""
|
"""Implements de-registration logic.
|
||||||
Implements de-registration logic.
|
|
||||||
Called by unregister(self,key,host)
|
Called by unregister(self,key,host)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Must implement backend_unregister")
|
raise NotImplementedError("Must implement backend_unregister")
|
||||||
|
|
||||||
def register(self, key, host):
|
def register(self, key, host):
|
||||||
"""
|
"""Register a host on a backend.
|
||||||
Register a host on a backend.
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
Heartbeats, if applicable, may keepalive registration.
|
||||||
"""
|
"""
|
||||||
self.hosts.add(host)
|
self.hosts.add(host)
|
||||||
@ -227,25 +205,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self.ack_alive(key, host)
|
self.ack_alive(key, host)
|
||||||
|
|
||||||
def unregister(self, key, host):
|
def unregister(self, key, host):
|
||||||
"""
|
"""Unregister a topic."""
|
||||||
Unregister a topic.
|
|
||||||
"""
|
|
||||||
if (key, host) in self.host_topic:
|
if (key, host) in self.host_topic:
|
||||||
del self.host_topic[(key, host)]
|
del self.host_topic[(key, host)]
|
||||||
|
|
||||||
self.hosts.discard(host)
|
self.hosts.discard(host)
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
self.backend_unregister(key, '.'.join((key, host)))
|
||||||
|
|
||||||
LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host)))
|
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
||||||
|
{'key': key, 'host': host})
|
||||||
|
|
||||||
def start_heartbeat(self):
|
def start_heartbeat(self):
|
||||||
"""
|
"""Implementation of MatchMakerBase.start_heartbeat.
|
||||||
Implementation of MatchMakerBase.start_heartbeat
|
|
||||||
Launches greenthread looping send_heartbeats(),
|
Launches greenthread looping send_heartbeats(),
|
||||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
yielding for CONF.matchmaker_heartbeat_freq seconds
|
||||||
between iterations.
|
between iterations.
|
||||||
"""
|
"""
|
||||||
if len(self.hosts) == 0:
|
if not self.hosts:
|
||||||
raise MatchMakerException(
|
raise MatchMakerException(
|
||||||
_("Register before starting heartbeat."))
|
_("Register before starting heartbeat."))
|
||||||
|
|
||||||
@ -257,45 +234,37 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
|
|||||||
self._heart = eventlet.spawn(do_heartbeat)
|
self._heart = eventlet.spawn(do_heartbeat)
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
def stop_heartbeat(self):
|
||||||
"""
|
"""Destroys the heartbeat greenthread."""
|
||||||
Destroys the heartbeat greenthread.
|
|
||||||
"""
|
|
||||||
if self._heart:
|
if self._heart:
|
||||||
self._heart.kill()
|
self._heart.kill()
|
||||||
|
|
||||||
|
|
||||||
class DirectBinding(Binding):
|
class DirectBinding(Binding):
|
||||||
"""
|
"""Specifies a host in the key via a '.' character.
|
||||||
Specifies a host in the key via a '.' character
|
|
||||||
Although dots are used in the key, the behavior here is
|
Although dots are used in the key, the behavior here is
|
||||||
that it maps directly to a host, thus direct.
|
that it maps directly to a host, thus direct.
|
||||||
"""
|
"""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if '.' in key:
|
return '.' in key
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class TopicBinding(Binding):
|
class TopicBinding(Binding):
|
||||||
"""
|
"""Where a 'bare' key without dots.
|
||||||
Where a 'bare' key without dots.
|
|
||||||
AMQP generally considers topic exchanges to be those *with* dots,
|
AMQP generally considers topic exchanges to be those *with* dots,
|
||||||
but we deviate here in terminology as the behavior here matches
|
but we deviate here in terminology as the behavior here matches
|
||||||
that of a topic exchange (whereas where there are dots, behavior
|
that of a topic exchange (whereas where there are dots, behavior
|
||||||
matches that of a direct exchange.
|
matches that of a direct exchange.
|
||||||
"""
|
"""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if '.' not in key:
|
return '.' not in key
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutBinding(Binding):
|
class FanoutBinding(Binding):
|
||||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
||||||
def test(self, key):
|
def test(self, key):
|
||||||
if key.startswith('fanout~'):
|
return key.startswith('fanout~')
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class StubExchange(Exchange):
|
class StubExchange(Exchange):
|
||||||
@ -304,67 +273,6 @@ class StubExchange(Exchange):
|
|||||||
return [(key, None)]
|
return [(key, None)]
|
||||||
|
|
||||||
|
|
||||||
class RingExchange(Exchange):
|
|
||||||
"""
|
|
||||||
Match Maker where hosts are loaded from a static file containing
|
|
||||||
a hashmap (JSON formatted).
|
|
||||||
|
|
||||||
__init__ takes optional ring dictionary argument, otherwise
|
|
||||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
|
||||||
"""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RingExchange, self).__init__()
|
|
||||||
|
|
||||||
if ring:
|
|
||||||
self.ring = ring
|
|
||||||
else:
|
|
||||||
fh = open(CONF.matchmaker_ringfile, 'r')
|
|
||||||
self.ring = json.load(fh)
|
|
||||||
fh.close()
|
|
||||||
|
|
||||||
self.ring0 = {}
|
|
||||||
for k in self.ring.keys():
|
|
||||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
|
||||||
|
|
||||||
def _ring_has(self, key):
|
|
||||||
if key in self.ring0:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class RoundRobinRingExchange(RingExchange):
|
|
||||||
"""A Topic Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RoundRobinRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
if not self._ring_has(key):
|
|
||||||
LOG.warn(
|
|
||||||
_("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (key, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
host = next(self.ring0[key])
|
|
||||||
return [(key + '.' + host, host)]
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutRingExchange(RingExchange):
|
|
||||||
"""Fanout Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(FanoutRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
# Assume starts with "fanout~", strip it for lookup.
|
|
||||||
nkey = key.split('fanout~')[1:][0]
|
|
||||||
if not self._ring_has(nkey):
|
|
||||||
LOG.warn(
|
|
||||||
_("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (nkey, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
|
||||||
|
|
||||||
|
|
||||||
class LocalhostExchange(Exchange):
|
class LocalhostExchange(Exchange):
|
||||||
"""Exchange where all direct topics are local."""
|
"""Exchange where all direct topics are local."""
|
||||||
def __init__(self, host='localhost'):
|
def __init__(self, host='localhost'):
|
||||||
@ -376,8 +284,8 @@ class LocalhostExchange(Exchange):
|
|||||||
|
|
||||||
|
|
||||||
class DirectExchange(Exchange):
|
class DirectExchange(Exchange):
|
||||||
"""
|
"""Exchange where all topic keys are split, sending to second half.
|
||||||
Exchange where all topic keys are split, sending to second half.
|
|
||||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -388,20 +296,9 @@ class DirectExchange(Exchange):
|
|||||||
return [(key, e)]
|
return [(key, e)]
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerRing(MatchMakerBase):
|
|
||||||
"""
|
|
||||||
Match Maker where hosts are loaded from a static hashmap.
|
|
||||||
"""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(MatchMakerRing, self).__init__()
|
|
||||||
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
|
|
||||||
self.add_binding(DirectBinding(), DirectExchange())
|
|
||||||
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerLocalhost(MatchMakerBase):
|
class MatchMakerLocalhost(MatchMakerBase):
|
||||||
"""
|
"""Match Maker where all bare topics resolve to localhost.
|
||||||
Match Maker where all bare topics resolve to localhost.
|
|
||||||
Useful for testing.
|
Useful for testing.
|
||||||
"""
|
"""
|
||||||
def __init__(self, host='localhost'):
|
def __init__(self, host='localhost'):
|
||||||
@ -412,13 +309,13 @@ class MatchMakerLocalhost(MatchMakerBase):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerStub(MatchMakerBase):
|
class MatchMakerStub(MatchMakerBase):
|
||||||
"""
|
"""Match Maker where topics are untouched.
|
||||||
Match Maker where topics are untouched.
|
|
||||||
Useful for testing, or for AMQP/brokered queues.
|
Useful for testing, or for AMQP/brokered queues.
|
||||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
Will not work where knowledge of hosts is known (i.e. zeromq)
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(MatchMakerLocalhost, self).__init__()
|
super(MatchMakerStub, self).__init__()
|
||||||
|
|
||||||
self.add_binding(FanoutBinding(), StubExchange())
|
self.add_binding(FanoutBinding(), StubExchange())
|
||||||
self.add_binding(DirectBinding(), StubExchange())
|
self.add_binding(DirectBinding(), StubExchange())
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudscaling Group, Inc
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -55,8 +53,8 @@ class RedisExchange(mm_common.Exchange):
|
|||||||
|
|
||||||
|
|
||||||
class RedisTopicExchange(RedisExchange):
|
class RedisTopicExchange(RedisExchange):
|
||||||
"""
|
"""Exchange where all topic keys are split, sending to second half.
|
||||||
Exchange where all topic keys are split, sending to second half.
|
|
||||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
i.e. "compute.host" sends a message to "compute" running on "host"
|
||||||
"""
|
"""
|
||||||
def run(self, topic):
|
def run(self, topic):
|
||||||
@ -77,9 +75,7 @@ class RedisTopicExchange(RedisExchange):
|
|||||||
|
|
||||||
|
|
||||||
class RedisFanoutExchange(RedisExchange):
|
class RedisFanoutExchange(RedisExchange):
|
||||||
"""
|
"""Return a list of all hosts."""
|
||||||
Return a list of all hosts.
|
|
||||||
"""
|
|
||||||
def run(self, topic):
|
def run(self, topic):
|
||||||
topic = topic.split('~', 1)[1]
|
topic = topic.split('~', 1)[1]
|
||||||
hosts = self.redis.smembers(topic)
|
hosts = self.redis.smembers(topic)
|
||||||
@ -90,16 +86,14 @@ class RedisFanoutExchange(RedisExchange):
|
|||||||
|
|
||||||
|
|
||||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
||||||
"""
|
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
||||||
MatchMaker registering and looking-up hosts with a Redis server.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(MatchMakerRedis, self).__init__()
|
super(MatchMakerRedis, self).__init__()
|
||||||
|
|
||||||
if not redis:
|
if not redis:
|
||||||
raise ImportError("Failed to import module redis.")
|
raise ImportError("Failed to import module redis.")
|
||||||
|
|
||||||
self.redis = redis.StrictRedis(
|
self.redis = redis.Redis(
|
||||||
host=CONF.matchmaker_redis.host,
|
host=CONF.matchmaker_redis.host,
|
||||||
port=CONF.matchmaker_redis.port,
|
port=CONF.matchmaker_redis.port,
|
||||||
password=CONF.matchmaker_redis.password)
|
password=CONF.matchmaker_redis.password)
|
||||||
|
106
designate/openstack/common/rpc/matchmaker_ring.py
Normal file
106
designate/openstack/common/rpc/matchmaker_ring.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# Copyright 2011-2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
The MatchMaker classes should except a Topic or Fanout exchange key and
|
||||||
|
return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
from designate.openstack.common.rpc import matchmaker as mm
|
||||||
|
|
||||||
|
|
||||||
|
matchmaker_opts = [
|
||||||
|
# Matchmaker ring file
|
||||||
|
cfg.StrOpt('ringfile',
|
||||||
|
deprecated_name='matchmaker_ringfile',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default='/etc/oslo/matchmaker_ring.json',
|
||||||
|
help='Matchmaker ring file (JSON)'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RingExchange(mm.Exchange):
|
||||||
|
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
||||||
|
|
||||||
|
__init__ takes optional ring dictionary argument, otherwise
|
||||||
|
loads the ringfile from CONF.mathcmaker_ringfile.
|
||||||
|
"""
|
||||||
|
def __init__(self, ring=None):
|
||||||
|
super(RingExchange, self).__init__()
|
||||||
|
|
||||||
|
if ring:
|
||||||
|
self.ring = ring
|
||||||
|
else:
|
||||||
|
fh = open(CONF.matchmaker_ring.ringfile, 'r')
|
||||||
|
self.ring = json.load(fh)
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
self.ring0 = {}
|
||||||
|
for k in self.ring.keys():
|
||||||
|
self.ring0[k] = itertools.cycle(self.ring[k])
|
||||||
|
|
||||||
|
def _ring_has(self, key):
|
||||||
|
return key in self.ring0
|
||||||
|
|
||||||
|
|
||||||
|
class RoundRobinRingExchange(RingExchange):
|
||||||
|
"""A Topic Exchange based on a hashmap."""
|
||||||
|
def __init__(self, ring=None):
|
||||||
|
super(RoundRobinRingExchange, self).__init__(ring)
|
||||||
|
|
||||||
|
def run(self, key):
|
||||||
|
if not self._ring_has(key):
|
||||||
|
LOG.warn(
|
||||||
|
_("No key defining hosts for topic '%s', "
|
||||||
|
"see ringfile") % (key, )
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
host = next(self.ring0[key])
|
||||||
|
return [(key + '.' + host, host)]
|
||||||
|
|
||||||
|
|
||||||
|
class FanoutRingExchange(RingExchange):
|
||||||
|
"""Fanout Exchange based on a hashmap."""
|
||||||
|
def __init__(self, ring=None):
|
||||||
|
super(FanoutRingExchange, self).__init__(ring)
|
||||||
|
|
||||||
|
def run(self, key):
|
||||||
|
# Assume starts with "fanout~", strip it for lookup.
|
||||||
|
nkey = key.split('fanout~')[1:][0]
|
||||||
|
if not self._ring_has(nkey):
|
||||||
|
LOG.warn(
|
||||||
|
_("No key defining hosts for topic '%s', "
|
||||||
|
"see ringfile") % (nkey, )
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
||||||
|
|
||||||
|
|
||||||
|
class MatchMakerRing(mm.MatchMakerBase):
|
||||||
|
"""Match Maker where hosts are loaded from a static hashmap."""
|
||||||
|
def __init__(self, ring=None):
|
||||||
|
super(MatchMakerRing, self).__init__()
|
||||||
|
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
||||||
|
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
|
||||||
|
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
|
@ -1,6 +1,4 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# Copyright 2012-2013 Red Hat, Inc.
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -21,8 +19,11 @@ For more information about rpc API version numbers, see:
|
|||||||
rpc/dispatcher.py
|
rpc/dispatcher.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from designate.openstack.common import rpc
|
from designate.openstack.common import rpc
|
||||||
|
from designate.openstack.common.rpc import common as rpc_common
|
||||||
|
from designate.openstack.common.rpc import serializer as rpc_serializer
|
||||||
|
|
||||||
|
|
||||||
class RpcProxy(object):
|
class RpcProxy(object):
|
||||||
@ -34,16 +35,28 @@ class RpcProxy(object):
|
|||||||
rpc API.
|
rpc API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, topic, default_version):
|
# The default namespace, which can be overridden in a subclass.
|
||||||
|
RPC_API_NAMESPACE = None
|
||||||
|
|
||||||
|
def __init__(self, topic, default_version, version_cap=None,
|
||||||
|
serializer=None):
|
||||||
"""Initialize an RpcProxy.
|
"""Initialize an RpcProxy.
|
||||||
|
|
||||||
:param topic: The topic to use for all messages.
|
:param topic: The topic to use for all messages.
|
||||||
:param default_version: The default API version to request in all
|
:param default_version: The default API version to request in all
|
||||||
outgoing messages. This can be overridden on a per-message
|
outgoing messages. This can be overridden on a per-message
|
||||||
basis.
|
basis.
|
||||||
|
:param version_cap: Optionally cap the maximum version used for sent
|
||||||
|
messages.
|
||||||
|
:param serializer: Optionaly (de-)serialize entities with a
|
||||||
|
provided helper.
|
||||||
"""
|
"""
|
||||||
self.topic = topic
|
self.topic = topic
|
||||||
self.default_version = default_version
|
self.default_version = default_version
|
||||||
|
self.version_cap = version_cap
|
||||||
|
if serializer is None:
|
||||||
|
serializer = rpc_serializer.NoOpSerializer()
|
||||||
|
self.serializer = serializer
|
||||||
super(RpcProxy, self).__init__()
|
super(RpcProxy, self).__init__()
|
||||||
|
|
||||||
def _set_version(self, msg, vers):
|
def _set_version(self, msg, vers):
|
||||||
@ -52,15 +65,44 @@ class RpcProxy(object):
|
|||||||
:param msg: The message having a version added to it.
|
:param msg: The message having a version added to it.
|
||||||
:param vers: The version number to add to the message.
|
:param vers: The version number to add to the message.
|
||||||
"""
|
"""
|
||||||
msg['version'] = vers if vers else self.default_version
|
v = vers if vers else self.default_version
|
||||||
|
if (self.version_cap and not
|
||||||
|
rpc_common.version_is_compatible(self.version_cap, v)):
|
||||||
|
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
|
||||||
|
msg['version'] = v
|
||||||
|
|
||||||
def _get_topic(self, topic):
|
def _get_topic(self, topic):
|
||||||
"""Return the topic to use for a message."""
|
"""Return the topic to use for a message."""
|
||||||
return topic if topic else self.topic
|
return topic if topic else self.topic
|
||||||
|
|
||||||
|
def can_send_version(self, version):
|
||||||
|
"""Check to see if a version is compatible with the version cap."""
|
||||||
|
return (not self.version_cap or
|
||||||
|
rpc_common.version_is_compatible(self.version_cap, version))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_msg(method, **kwargs):
|
def make_namespaced_msg(method, namespace, **kwargs):
|
||||||
return {'method': method, 'args': kwargs}
|
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
||||||
|
|
||||||
|
def make_msg(self, method, **kwargs):
|
||||||
|
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
def _serialize_msg_args(self, context, kwargs):
|
||||||
|
"""Helper method called to serialize message arguments.
|
||||||
|
|
||||||
|
This calls our serializer on each argument, returning a new
|
||||||
|
set of args that have been serialized.
|
||||||
|
|
||||||
|
:param context: The request context
|
||||||
|
:param kwargs: The arguments to serialize
|
||||||
|
:returns: A new set of serialized arguments
|
||||||
|
"""
|
||||||
|
new_kwargs = dict()
|
||||||
|
for argname, arg in six.iteritems(kwargs):
|
||||||
|
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
||||||
|
arg)
|
||||||
|
return new_kwargs
|
||||||
|
|
||||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||||
"""rpc.call() a remote method.
|
"""rpc.call() a remote method.
|
||||||
@ -68,16 +110,23 @@ class RpcProxy(object):
|
|||||||
:param context: The request context
|
:param context: The request context
|
||||||
:param msg: The message to send, including the method and args.
|
:param msg: The message to send, including the method and args.
|
||||||
:param topic: Override the topic for this message.
|
:param topic: Override the topic for this message.
|
||||||
|
:param version: (Optional) Override the requested API version in this
|
||||||
|
message.
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
:param timeout: (Optional) A timeout to use when waiting for the
|
||||||
response. If no timeout is specified, a default timeout will be
|
response. If no timeout is specified, a default timeout will be
|
||||||
used that is usually sufficient.
|
used that is usually sufficient.
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: The return value from the remote method.
|
:returns: The return value from the remote method.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
return rpc.call(context, self._get_topic(topic), msg, timeout)
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
|
real_topic = self._get_topic(topic)
|
||||||
|
try:
|
||||||
|
result = rpc.call(context, real_topic, msg, timeout)
|
||||||
|
return self.serializer.deserialize_entity(context, result)
|
||||||
|
except rpc.common.Timeout as exc:
|
||||||
|
raise rpc.common.Timeout(
|
||||||
|
exc.info, real_topic, msg.get('method'))
|
||||||
|
|
||||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
||||||
"""rpc.multicall() a remote method.
|
"""rpc.multicall() a remote method.
|
||||||
@ -85,17 +134,24 @@ class RpcProxy(object):
|
|||||||
:param context: The request context
|
:param context: The request context
|
||||||
:param msg: The message to send, including the method and args.
|
:param msg: The message to send, including the method and args.
|
||||||
:param topic: Override the topic for this message.
|
:param topic: Override the topic for this message.
|
||||||
|
:param version: (Optional) Override the requested API version in this
|
||||||
|
message.
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
:param timeout: (Optional) A timeout to use when waiting for the
|
||||||
response. If no timeout is specified, a default timeout will be
|
response. If no timeout is specified, a default timeout will be
|
||||||
used that is usually sufficient.
|
used that is usually sufficient.
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: An iterator that lets you process each of the returned values
|
:returns: An iterator that lets you process each of the returned values
|
||||||
from the remote method as they arrive.
|
from the remote method as they arrive.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
return rpc.multicall(context, self._get_topic(topic), msg, timeout)
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
|
real_topic = self._get_topic(topic)
|
||||||
|
try:
|
||||||
|
result = rpc.multicall(context, real_topic, msg, timeout)
|
||||||
|
return self.serializer.deserialize_entity(context, result)
|
||||||
|
except rpc.common.Timeout as exc:
|
||||||
|
raise rpc.common.Timeout(
|
||||||
|
exc.info, real_topic, msg.get('method'))
|
||||||
|
|
||||||
def cast(self, context, msg, topic=None, version=None):
|
def cast(self, context, msg, topic=None, version=None):
|
||||||
"""rpc.cast() a remote method.
|
"""rpc.cast() a remote method.
|
||||||
@ -110,6 +166,7 @@ class RpcProxy(object):
|
|||||||
remote method.
|
remote method.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
rpc.cast(context, self._get_topic(topic), msg)
|
rpc.cast(context, self._get_topic(topic), msg)
|
||||||
|
|
||||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||||
@ -125,6 +182,7 @@ class RpcProxy(object):
|
|||||||
from the remote method.
|
from the remote method.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
||||||
|
|
||||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
def cast_to_server(self, context, server_params, msg, topic=None,
|
||||||
@ -143,6 +201,7 @@ class RpcProxy(object):
|
|||||||
return values.
|
return values.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
||||||
|
|
||||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
||||||
@ -161,5 +220,6 @@ class RpcProxy(object):
|
|||||||
return values.
|
return values.
|
||||||
"""
|
"""
|
||||||
self._set_version(msg, version)
|
self._set_version(msg, version)
|
||||||
|
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
||||||
rpc.fanout_cast_to_server(context, server_params,
|
rpc.fanout_cast_to_server(context, server_params,
|
||||||
self._get_topic(topic), msg)
|
self._get_topic(topic), msg)
|
||||||
|
54
designate/openstack/common/rpc/serializer.py
Normal file
54
designate/openstack/common/rpc/serializer.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provides the definition of an RPC serialization handler"""
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class Serializer(object):
|
||||||
|
"""Generic (de-)serialization definition base class."""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def serialize_entity(self, context, entity):
|
||||||
|
"""Serialize something to primitive form.
|
||||||
|
|
||||||
|
:param context: Security context
|
||||||
|
:param entity: Entity to be serialized
|
||||||
|
:returns: Serialized form of entity
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def deserialize_entity(self, context, entity):
|
||||||
|
"""Deserialize something from primitive form.
|
||||||
|
|
||||||
|
:param context: Security context
|
||||||
|
:param entity: Primitive to be deserialized
|
||||||
|
:returns: Deserialized form of entity
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NoOpSerializer(Serializer):
|
||||||
|
"""A serializer that does nothing."""
|
||||||
|
|
||||||
|
def serialize_entity(self, context, entity):
|
||||||
|
return entity
|
||||||
|
|
||||||
|
def deserialize_entity(self, context, entity):
|
||||||
|
return entity
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@ -30,11 +28,13 @@ LOG = logging.getLogger(__name__)
|
|||||||
class Service(service.Service):
|
class Service(service.Service):
|
||||||
"""Service object for binaries running on hosts.
|
"""Service object for binaries running on hosts.
|
||||||
|
|
||||||
A service enables rpc by listening to queues based on topic and host."""
|
A service enables rpc by listening to queues based on topic and host.
|
||||||
def __init__(self, host, topic, manager=None):
|
"""
|
||||||
|
def __init__(self, host, topic, manager=None, serializer=None):
|
||||||
super(Service, self).__init__()
|
super(Service, self).__init__()
|
||||||
self.host = host
|
self.host = host
|
||||||
self.topic = topic
|
self.topic = topic
|
||||||
|
self.serializer = serializer
|
||||||
if manager is None:
|
if manager is None:
|
||||||
self.manager = self
|
self.manager = self
|
||||||
else:
|
else:
|
||||||
@ -47,7 +47,8 @@ class Service(service.Service):
|
|||||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
LOG.debug(_("Creating Consumer connection for Service %s") %
|
||||||
self.topic)
|
self.topic)
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
|
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
||||||
|
self.serializer)
|
||||||
|
|
||||||
# Share this same connection for these Consumers
|
# Share this same connection for these Consumers
|
||||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2011 OpenStack Foundation
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -13,17 +12,27 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.notifier import rpc_notifier
|
from designate.openstack.common import rpc
|
||||||
|
from designate.openstack.common.rpc import impl_zmq
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(rpc.rpc_opts)
|
||||||
|
CONF.register_opts(impl_zmq.zmq_opts)
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
def main():
|
||||||
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
|
CONF(sys.argv[1:], project='oslo')
|
||||||
|
logging.setup("oslo")
|
||||||
|
|
||||||
LOG.deprecated(_("The rabbit_notifier is now deprecated."
|
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
||||||
" Please use rpc_notifier instead."))
|
reactor.consume_in_thread()
|
||||||
rpc_notifier.notify(context, message)
|
reactor.wait()
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@ -20,14 +18,23 @@
|
|||||||
"""Generic Node base class for all workers that run on hosts."""
|
"""Generic Node base class for all workers that run on hosts."""
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
|
import logging as std_logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Importing just the symbol here because the io module does not
|
||||||
|
# exist in Python 2.6.
|
||||||
|
from io import UnsupportedOperation # noqa
|
||||||
|
except ImportError:
|
||||||
|
# Python 2.6
|
||||||
|
UnsupportedOperation = None
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
import logging as std_logging
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate.openstack.common import eventlet_backdoor
|
from designate.openstack.common import eventlet_backdoor
|
||||||
@ -42,6 +49,53 @@ CONF = cfg.CONF
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _sighup_supported():
|
||||||
|
return hasattr(signal, 'SIGHUP')
|
||||||
|
|
||||||
|
|
||||||
|
def _is_daemon():
|
||||||
|
# The process group for a foreground process will match the
|
||||||
|
# process group of the controlling terminal. If those values do
|
||||||
|
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||||
|
# the process is running in the background as a daemon.
|
||||||
|
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||||
|
try:
|
||||||
|
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ENOTTY:
|
||||||
|
# Assume we are a daemon because there is no terminal.
|
||||||
|
is_daemon = True
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except UnsupportedOperation:
|
||||||
|
# Could not get the fileno for stdout, so we must be a daemon.
|
||||||
|
is_daemon = True
|
||||||
|
return is_daemon
|
||||||
|
|
||||||
|
|
||||||
|
def _is_sighup_and_daemon(signo):
|
||||||
|
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||||
|
# Avoid checking if we are a daemon, because the signal isn't
|
||||||
|
# SIGHUP.
|
||||||
|
return False
|
||||||
|
return _is_daemon()
|
||||||
|
|
||||||
|
|
||||||
|
def _signo_to_signame(signo):
|
||||||
|
signals = {signal.SIGTERM: 'SIGTERM',
|
||||||
|
signal.SIGINT: 'SIGINT'}
|
||||||
|
if _sighup_supported():
|
||||||
|
signals[signal.SIGHUP] = 'SIGHUP'
|
||||||
|
return signals[signo]
|
||||||
|
|
||||||
|
|
||||||
|
def _set_signals_handler(handler):
|
||||||
|
signal.signal(signal.SIGTERM, handler)
|
||||||
|
signal.signal(signal.SIGINT, handler)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, handler)
|
||||||
|
|
||||||
|
|
||||||
class Launcher(object):
|
class Launcher(object):
|
||||||
"""Launch one or more services and wait for them to complete."""
|
"""Launch one or more services and wait for them to complete."""
|
||||||
|
|
||||||
@ -51,19 +105,8 @@ class Launcher(object):
|
|||||||
:returns: None
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._services = threadgroup.ThreadGroup()
|
self.services = Services()
|
||||||
eventlet_backdoor.initialize_if_enabled()
|
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def run_service(service):
|
|
||||||
"""Start and wait for a service to finish.
|
|
||||||
|
|
||||||
:param service: service to run and wait for.
|
|
||||||
:returns: None
|
|
||||||
|
|
||||||
"""
|
|
||||||
service.start()
|
|
||||||
service.wait()
|
|
||||||
|
|
||||||
def launch_service(self, service):
|
def launch_service(self, service):
|
||||||
"""Load and start the given service.
|
"""Load and start the given service.
|
||||||
@ -72,7 +115,8 @@ class Launcher(object):
|
|||||||
:returns: None
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._services.add_thread(self.run_service, service)
|
service.backdoor_port = self.backdoor_port
|
||||||
|
self.services.add(service)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""Stop all services which are currently running.
|
"""Stop all services which are currently running.
|
||||||
@ -80,7 +124,7 @@ class Launcher(object):
|
|||||||
:returns: None
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._services.stop()
|
self.services.stop()
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
"""Waits until all services have been stopped, and then returns.
|
"""Waits until all services have been stopped, and then returns.
|
||||||
@ -88,7 +132,16 @@ class Launcher(object):
|
|||||||
:returns: None
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._services.wait()
|
self.services.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
"""Reload config files and restart service.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
cfg.CONF.reload_config_files()
|
||||||
|
self.services.restart()
|
||||||
|
|
||||||
|
|
||||||
class SignalExit(SystemExit):
|
class SignalExit(SystemExit):
|
||||||
@ -100,33 +153,48 @@ class SignalExit(SystemExit):
|
|||||||
class ServiceLauncher(Launcher):
|
class ServiceLauncher(Launcher):
|
||||||
def _handle_signal(self, signo, frame):
|
def _handle_signal(self, signo, frame):
|
||||||
# Allow the process to be killed again and die from natural causes
|
# Allow the process to be killed again and die from natural causes
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
||||||
|
|
||||||
raise SignalExit(signo)
|
raise SignalExit(signo)
|
||||||
|
|
||||||
def wait(self):
|
def handle_signal(self):
|
||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
_set_signals_handler(self._handle_signal)
|
||||||
signal.signal(signal.SIGINT, self._handle_signal)
|
|
||||||
|
def _wait_for_exit_or_signal(self, ready_callback=None):
|
||||||
|
status = None
|
||||||
|
signo = 0
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
LOG.debug(_('Full set of CONF:'))
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
status = None
|
|
||||||
try:
|
try:
|
||||||
|
if ready_callback:
|
||||||
|
ready_callback()
|
||||||
super(ServiceLauncher, self).wait()
|
super(ServiceLauncher, self).wait()
|
||||||
except SignalExit as exc:
|
except SignalExit as exc:
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
signame = _signo_to_signame(exc.signo)
|
||||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
LOG.info(_('Caught %s, exiting'), signame)
|
||||||
status = exc.code
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
status = exc.code
|
status = exc.code
|
||||||
finally:
|
finally:
|
||||||
if rpc:
|
|
||||||
rpc.cleanup()
|
|
||||||
self.stop()
|
self.stop()
|
||||||
return status
|
if rpc:
|
||||||
|
try:
|
||||||
|
rpc.cleanup()
|
||||||
|
except Exception:
|
||||||
|
# We're shutting down, so it doesn't matter at this point.
|
||||||
|
LOG.exception(_('Exception during rpc cleanup.'))
|
||||||
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def wait(self, ready_callback=None):
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||||
|
if not _is_sighup_and_daemon(signo):
|
||||||
|
return status
|
||||||
|
self.restart()
|
||||||
|
|
||||||
|
|
||||||
class ServiceWrapper(object):
|
class ServiceWrapper(object):
|
||||||
@ -138,23 +206,29 @@ class ServiceWrapper(object):
|
|||||||
|
|
||||||
|
|
||||||
class ProcessLauncher(object):
|
class ProcessLauncher(object):
|
||||||
def __init__(self):
|
def __init__(self, wait_interval=0.01):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param wait_interval: The interval to sleep for between checks
|
||||||
|
of child process exit.
|
||||||
|
"""
|
||||||
self.children = {}
|
self.children = {}
|
||||||
self.sigcaught = None
|
self.sigcaught = None
|
||||||
self.running = True
|
self.running = True
|
||||||
|
self.wait_interval = wait_interval
|
||||||
rfd, self.writepipe = os.pipe()
|
rfd, self.writepipe = os.pipe()
|
||||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||||
|
self.handle_signal()
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
def handle_signal(self):
|
||||||
signal.signal(signal.SIGINT, self._handle_signal)
|
_set_signals_handler(self._handle_signal)
|
||||||
|
|
||||||
def _handle_signal(self, signo, frame):
|
def _handle_signal(self, signo, frame):
|
||||||
self.sigcaught = signo
|
self.sigcaught = signo
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|
||||||
# Allow the process to be killed again and die from natural causes
|
# Allow the process to be killed again and die from natural causes
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
_set_signals_handler(signal.SIG_DFL)
|
||||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
||||||
|
|
||||||
def _pipe_watcher(self):
|
def _pipe_watcher(self):
|
||||||
# This will block until the write end is closed when the parent
|
# This will block until the write end is closed when the parent
|
||||||
@ -165,16 +239,49 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def _child_process(self, service):
|
def _child_process_handle_signal(self):
|
||||||
# Setup child signal handlers differently
|
# Setup child signal handlers differently
|
||||||
def _sigterm(*args):
|
def _sigterm(*args):
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
raise SignalExit(signal.SIGTERM)
|
raise SignalExit(signal.SIGTERM)
|
||||||
|
|
||||||
|
def _sighup(*args):
|
||||||
|
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||||
|
raise SignalExit(signal.SIGHUP)
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, _sigterm)
|
signal.signal(signal.SIGTERM, _sigterm)
|
||||||
|
if _sighup_supported():
|
||||||
|
signal.signal(signal.SIGHUP, _sighup)
|
||||||
# Block SIGINT and let the parent send us a SIGTERM
|
# Block SIGINT and let the parent send us a SIGTERM
|
||||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
def _child_wait_for_exit_or_signal(self, launcher):
|
||||||
|
status = 0
|
||||||
|
signo = 0
|
||||||
|
|
||||||
|
# NOTE(johannes): All exceptions are caught to ensure this
|
||||||
|
# doesn't fallback into the loop spawning children. It would
|
||||||
|
# be bad for a child to spawn more children.
|
||||||
|
try:
|
||||||
|
launcher.wait()
|
||||||
|
except SignalExit as exc:
|
||||||
|
signame = _signo_to_signame(exc.signo)
|
||||||
|
LOG.info(_('Caught %s, exiting'), signame)
|
||||||
|
status = exc.code
|
||||||
|
signo = exc.signo
|
||||||
|
except SystemExit as exc:
|
||||||
|
status = exc.code
|
||||||
|
except BaseException:
|
||||||
|
LOG.exception(_('Unhandled exception'))
|
||||||
|
status = 2
|
||||||
|
finally:
|
||||||
|
launcher.stop()
|
||||||
|
|
||||||
|
return status, signo
|
||||||
|
|
||||||
|
def _child_process(self, service):
|
||||||
|
self._child_process_handle_signal()
|
||||||
|
|
||||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||||
# fd with parent and/or siblings, which would be bad
|
# fd with parent and/or siblings, which would be bad
|
||||||
eventlet.hubs.use_hub()
|
eventlet.hubs.use_hub()
|
||||||
@ -188,7 +295,8 @@ class ProcessLauncher(object):
|
|||||||
random.seed()
|
random.seed()
|
||||||
|
|
||||||
launcher = Launcher()
|
launcher = Launcher()
|
||||||
launcher.run_service(service)
|
launcher.launch_service(service)
|
||||||
|
return launcher
|
||||||
|
|
||||||
def _start_child(self, wrap):
|
def _start_child(self, wrap):
|
||||||
if len(wrap.forktimes) > wrap.workers:
|
if len(wrap.forktimes) > wrap.workers:
|
||||||
@ -206,24 +314,13 @@ class ProcessLauncher(object):
|
|||||||
|
|
||||||
pid = os.fork()
|
pid = os.fork()
|
||||||
if pid == 0:
|
if pid == 0:
|
||||||
# NOTE(johannes): All exceptions are caught to ensure this
|
launcher = self._child_process(wrap.service)
|
||||||
# doesn't fallback into the loop spawning children. It would
|
while True:
|
||||||
# be bad for a child to spawn more children.
|
self._child_process_handle_signal()
|
||||||
status = 0
|
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||||
try:
|
if not _is_sighup_and_daemon(signo):
|
||||||
self._child_process(wrap.service)
|
break
|
||||||
except SignalExit as exc:
|
launcher.restart()
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
|
||||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
|
||||||
LOG.info(_('Caught %s, exiting'), signame)
|
|
||||||
status = exc.code
|
|
||||||
except SystemExit as exc:
|
|
||||||
status = exc.code
|
|
||||||
except BaseException:
|
|
||||||
LOG.exception(_('Unhandled exception'))
|
|
||||||
status = 2
|
|
||||||
finally:
|
|
||||||
wrap.service.stop()
|
|
||||||
|
|
||||||
os._exit(status)
|
os._exit(status)
|
||||||
|
|
||||||
@ -269,28 +366,37 @@ class ProcessLauncher(object):
|
|||||||
wrap.children.remove(pid)
|
wrap.children.remove(pid)
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
def wait(self):
|
def _respawn_children(self):
|
||||||
"""Loop waiting on children to die and respawning as necessary"""
|
|
||||||
|
|
||||||
LOG.debug(_('Full set of CONF:'))
|
|
||||||
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
|
||||||
|
|
||||||
while self.running:
|
while self.running:
|
||||||
wrap = self._wait_child()
|
wrap = self._wait_child()
|
||||||
if not wrap:
|
if not wrap:
|
||||||
# Yield to other threads if no children have exited
|
# Yield to other threads if no children have exited
|
||||||
# Sleep for a short time to avoid excessive CPU usage
|
# Sleep for a short time to avoid excessive CPU usage
|
||||||
# (see bug #1095346)
|
# (see bug #1095346)
|
||||||
eventlet.greenthread.sleep(.01)
|
eventlet.greenthread.sleep(self.wait_interval)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
while self.running and len(wrap.children) < wrap.workers:
|
while self.running and len(wrap.children) < wrap.workers:
|
||||||
self._start_child(wrap)
|
self._start_child(wrap)
|
||||||
|
|
||||||
if self.sigcaught:
|
def wait(self):
|
||||||
signame = {signal.SIGTERM: 'SIGTERM',
|
"""Loop waiting on children to die and respawning as necessary."""
|
||||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
|
||||||
LOG.info(_('Caught %s, stopping children'), signame)
|
LOG.debug(_('Full set of CONF:'))
|
||||||
|
CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
self.handle_signal()
|
||||||
|
self._respawn_children()
|
||||||
|
if self.sigcaught:
|
||||||
|
signame = _signo_to_signame(self.sigcaught)
|
||||||
|
LOG.info(_('Caught %s, stopping children'), signame)
|
||||||
|
if not _is_sighup_and_daemon(self.sigcaught):
|
||||||
|
break
|
||||||
|
|
||||||
|
for pid in self.children:
|
||||||
|
os.kill(pid, signal.SIGHUP)
|
||||||
|
self.running = True
|
||||||
|
self.sigcaught = None
|
||||||
|
|
||||||
for pid in self.children:
|
for pid in self.children:
|
||||||
try:
|
try:
|
||||||
@ -312,15 +418,71 @@ class Service(object):
|
|||||||
def __init__(self, threads=1000):
|
def __init__(self, threads=1000):
|
||||||
self.tg = threadgroup.ThreadGroup(threads)
|
self.tg = threadgroup.ThreadGroup(threads)
|
||||||
|
|
||||||
|
# signal that the service is done shutting itself down:
|
||||||
|
self._done = threading.Event()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self._done = threading.Event()
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.tg.stop()
|
self.tg.stop()
|
||||||
|
self.tg.wait()
|
||||||
|
# Signal that service cleanup is done:
|
||||||
|
self._done.set()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
self._done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class Services(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.services = []
|
||||||
|
self.tg = threadgroup.ThreadGroup()
|
||||||
|
self.done = threading.Event()
|
||||||
|
|
||||||
|
def add(self, service):
|
||||||
|
self.services.append(service)
|
||||||
|
self.tg.add_thread(self.run_service, service, self.done)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
# wait for graceful shutdown of services:
|
||||||
|
for service in self.services:
|
||||||
|
service.stop()
|
||||||
|
service.wait()
|
||||||
|
|
||||||
|
# Each service has performed cleanup, now signal that the run_service
|
||||||
|
# wrapper threads can now die:
|
||||||
|
self.done.set()
|
||||||
|
|
||||||
|
# reap threads:
|
||||||
|
self.tg.stop()
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
self.tg.wait()
|
self.tg.wait()
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
self.stop()
|
||||||
|
self.done = threading.Event()
|
||||||
|
for restart_service in self.services:
|
||||||
|
restart_service.reset()
|
||||||
|
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_service(service, done):
|
||||||
|
"""Service start wrapper.
|
||||||
|
|
||||||
|
:param service: service to run
|
||||||
|
:param done: event to wait on until a shutdown is triggered
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
service.start()
|
||||||
|
done.wait()
|
||||||
|
|
||||||
|
|
||||||
def launch(service, workers=None):
|
def launch(service, workers=None):
|
||||||
if workers:
|
if workers:
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# Copyright 2013 IBM Corp.
|
||||||
|
|
||||||
# Copyright 2013 IBM
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -78,3 +76,23 @@ def wrap(sock):
|
|||||||
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
||||||
|
|
||||||
return ssl.wrap_socket(sock, **ssl_kwargs)
|
return ssl.wrap_socket(sock, **ssl_kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
_SSL_PROTOCOLS = {
|
||||||
|
"tlsv1": ssl.PROTOCOL_TLSv1,
|
||||||
|
"sslv23": ssl.PROTOCOL_SSLv23,
|
||||||
|
"sslv3": ssl.PROTOCOL_SSLv3
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def validate_ssl_version(version):
|
||||||
|
key = version.lower()
|
||||||
|
try:
|
||||||
|
return _SSL_PROTOCOLS[key]
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -19,15 +17,35 @@
|
|||||||
System-level utilities and helper functions.
|
System-level utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
import unicodedata
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
import six
|
||||||
|
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
# Used for looking up extensions of text
|
||||||
|
# to their 'multiplied' byte amount
|
||||||
|
BYTE_MULTIPLIERS = {
|
||||||
|
'': 1,
|
||||||
|
't': 1024 ** 4,
|
||||||
|
'g': 1024 ** 3,
|
||||||
|
'm': 1024 ** 2,
|
||||||
|
'k': 1024,
|
||||||
|
}
|
||||||
|
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
|
||||||
|
|
||||||
|
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||||
|
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||||
|
|
||||||
|
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
|
||||||
|
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
|
||||||
|
|
||||||
|
|
||||||
def int_from_bool_as_string(subject):
|
def int_from_bool_as_string(subject):
|
||||||
"""
|
"""Interpret a string as a boolean and return either 1 or 0.
|
||||||
Interpret a string as a boolean and return either 1 or 0.
|
|
||||||
|
|
||||||
Any string value in:
|
Any string value in:
|
||||||
|
|
||||||
@ -40,42 +58,53 @@ def int_from_bool_as_string(subject):
|
|||||||
return bool_from_string(subject) and 1 or 0
|
return bool_from_string(subject) and 1 or 0
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(subject):
|
def bool_from_string(subject, strict=False):
|
||||||
|
"""Interpret a string as a boolean.
|
||||||
|
|
||||||
|
A case-insensitive match is performed such that strings matching 't',
|
||||||
|
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
||||||
|
`strict=False`, anything else is considered False.
|
||||||
|
|
||||||
|
Useful for JSON-decoded stuff and config file parsing.
|
||||||
|
|
||||||
|
If `strict=True`, unrecognized values, including None, will raise a
|
||||||
|
ValueError which is useful when parsing values passed in from an API call.
|
||||||
|
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
|
||||||
"""
|
"""
|
||||||
Interpret a string as a boolean.
|
if not isinstance(subject, six.string_types):
|
||||||
|
subject = str(subject)
|
||||||
|
|
||||||
Any string value in:
|
lowered = subject.strip().lower()
|
||||||
|
|
||||||
('True', 'true', 'On', 'on', 'Yes', 'yes', '1')
|
if lowered in TRUE_STRINGS:
|
||||||
|
return True
|
||||||
is interpreted as a boolean True.
|
elif lowered in FALSE_STRINGS:
|
||||||
|
return False
|
||||||
Useful for JSON-decoded stuff and config file parsing
|
elif strict:
|
||||||
"""
|
acceptable = ', '.join(
|
||||||
if isinstance(subject, bool):
|
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
|
||||||
return subject
|
msg = _("Unrecognized value '%(val)s', acceptable values are:"
|
||||||
if isinstance(subject, basestring):
|
" %(acceptable)s") % {'val': subject,
|
||||||
if subject.strip().lower() in ('true', 'on', 'yes', '1'):
|
'acceptable': acceptable}
|
||||||
return True
|
raise ValueError(msg)
|
||||||
return False
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def safe_decode(text, incoming=None, errors='strict'):
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
"""
|
"""Decodes incoming str using `incoming` if they're not already unicode.
|
||||||
Decodes incoming str using `incoming` if they're
|
|
||||||
not already unicode.
|
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
:param incoming: Text's current encoding
|
||||||
:param errors: Errors handling policy. See here for valid
|
:param errors: Errors handling policy. See here for valid
|
||||||
values http://docs.python.org/2/library/codecs.html
|
values http://docs.python.org/2/library/codecs.html
|
||||||
:returns: text or a unicode `incoming` encoded
|
:returns: text or a unicode `incoming` encoded
|
||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an isntance of basestring
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, basestring):
|
if not isinstance(text, six.string_types):
|
||||||
raise TypeError("%s can't be decoded" % type(text))
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
|
|
||||||
if isinstance(text, unicode):
|
if isinstance(text, six.text_type):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
if not incoming:
|
if not incoming:
|
||||||
@ -102,11 +131,10 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
|
|
||||||
def safe_encode(text, incoming=None,
|
def safe_encode(text, incoming=None,
|
||||||
encoding='utf-8', errors='strict'):
|
encoding='utf-8', errors='strict'):
|
||||||
"""
|
"""Encodes incoming str/unicode using `encoding`.
|
||||||
Encodes incoming str/unicode using `encoding`. If
|
|
||||||
incoming is not specified, text is expected to
|
If incoming is not specified, text is expected to be encoded with
|
||||||
be encoded with current python's default encoding.
|
current python's default encoding. (`sys.getdefaultencoding`)
|
||||||
(`sys.getdefaultencoding`)
|
|
||||||
|
|
||||||
:param incoming: Text's current encoding
|
:param incoming: Text's current encoding
|
||||||
:param encoding: Expected encoding for text (Default UTF-8)
|
:param encoding: Expected encoding for text (Default UTF-8)
|
||||||
@ -114,20 +142,81 @@ def safe_encode(text, incoming=None,
|
|||||||
values http://docs.python.org/2/library/codecs.html
|
values http://docs.python.org/2/library/codecs.html
|
||||||
:returns: text or a bytestring `encoding` encoded
|
:returns: text or a bytestring `encoding` encoded
|
||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an isntance of basestring
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, basestring):
|
if not isinstance(text, six.string_types):
|
||||||
raise TypeError("%s can't be encoded" % type(text))
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
|
|
||||||
if not incoming:
|
if not incoming:
|
||||||
incoming = (sys.stdin.encoding or
|
incoming = (sys.stdin.encoding or
|
||||||
sys.getdefaultencoding())
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
if isinstance(text, unicode):
|
if isinstance(text, six.text_type):
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
elif text and encoding != incoming:
|
elif text and encoding != incoming:
|
||||||
# Decode text before encoding it with `encoding`
|
# Decode text before encoding it with `encoding`
|
||||||
text = safe_decode(text, incoming, errors)
|
text = safe_decode(text, incoming, errors)
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def to_bytes(text, default=0):
|
||||||
|
"""Converts a string into an integer of bytes.
|
||||||
|
|
||||||
|
Looks at the last characters of the text to determine
|
||||||
|
what conversion is needed to turn the input text into a byte number.
|
||||||
|
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
|
||||||
|
|
||||||
|
:param text: String input for bytes size conversion.
|
||||||
|
:param default: Default return value when text is blank.
|
||||||
|
|
||||||
|
"""
|
||||||
|
match = BYTE_REGEX.search(text)
|
||||||
|
if match:
|
||||||
|
magnitude = int(match.group(1))
|
||||||
|
mult_key_org = match.group(2)
|
||||||
|
if not mult_key_org:
|
||||||
|
return magnitude
|
||||||
|
elif text:
|
||||||
|
msg = _('Invalid string format: %s') % text
|
||||||
|
raise TypeError(msg)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
mult_key = mult_key_org.lower().replace('b', '', 1)
|
||||||
|
multiplier = BYTE_MULTIPLIERS.get(mult_key)
|
||||||
|
if multiplier is None:
|
||||||
|
msg = _('Unknown byte multiplier: %s') % mult_key_org
|
||||||
|
raise TypeError(msg)
|
||||||
|
return magnitude * multiplier
|
||||||
|
|
||||||
|
|
||||||
|
def to_slug(value, incoming=None, errors="strict"):
|
||||||
|
"""Normalize string.
|
||||||
|
|
||||||
|
Convert to lowercase, remove non-word characters, and convert spaces
|
||||||
|
to hyphens.
|
||||||
|
|
||||||
|
Inspired by Django's `slugify` filter.
|
||||||
|
|
||||||
|
:param value: Text to slugify
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: slugified unicode representation of `value`
|
||||||
|
:raises TypeError: If text is not an instance of str
|
||||||
|
"""
|
||||||
|
value = safe_decode(value, incoming, errors)
|
||||||
|
# NOTE(aababilov): no need to use safe_(encode|decode) here:
|
||||||
|
# encodings are always "ascii", error handling is always "ignore"
|
||||||
|
# and types are always known (first: unicode; second: str)
|
||||||
|
value = unicodedata.normalize("NFKD", value).encode(
|
||||||
|
"ascii", "ignore").decode("ascii")
|
||||||
|
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
|
||||||
|
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -26,11 +24,12 @@ _TRUE_VALUES = ('True', 'true', '1', 'yes')
|
|||||||
|
|
||||||
|
|
||||||
class BaseTestCase(testtools.TestCase):
|
class BaseTestCase(testtools.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(BaseTestCase, self).setUp()
|
super(BaseTestCase, self).setUp()
|
||||||
self._set_timeout()
|
self._set_timeout()
|
||||||
self._fake_output()
|
self._fake_output()
|
||||||
self.useFixture(fixtures.FakeLogger('designate.openstack.common'))
|
self.useFixture(fixtures.FakeLogger())
|
||||||
self.useFixture(fixtures.NestedTempfile())
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
self.useFixture(fixtures.TempHomeDir())
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -14,7 +12,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from eventlet import greenlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
@ -26,7 +24,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def _thread_done(gt, *args, **kwargs):
|
def _thread_done(gt, *args, **kwargs):
|
||||||
""" Callback function to be passed to GreenThread.link() when we spawn()
|
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||||
Calls the :class:`ThreadGroup` to notify if.
|
Calls the :class:`ThreadGroup` to notify if.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -34,7 +32,7 @@ def _thread_done(gt, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
class Thread(object):
|
class Thread(object):
|
||||||
""" Wrapper around a greenthread, that holds a reference to the
|
"""Wrapper around a greenthread, that holds a reference to the
|
||||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||||
it has done so it can be removed from the threads list.
|
it has done so it can be removed from the threads list.
|
||||||
"""
|
"""
|
||||||
@ -48,9 +46,12 @@ class Thread(object):
|
|||||||
def wait(self):
|
def wait(self):
|
||||||
return self.thread.wait()
|
return self.thread.wait()
|
||||||
|
|
||||||
|
def link(self, func, *args, **kwargs):
|
||||||
|
self.thread.link(func, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ThreadGroup(object):
|
class ThreadGroup(object):
|
||||||
""" The point of the ThreadGroup classis to:
|
"""The point of the ThreadGroup classis to:
|
||||||
|
|
||||||
* keep track of timers and greenthreads (making it easier to stop them
|
* keep track of timers and greenthreads (making it easier to stop them
|
||||||
when need be).
|
when need be).
|
||||||
@ -61,9 +62,16 @@ class ThreadGroup(object):
|
|||||||
self.threads = []
|
self.threads = []
|
||||||
self.timers = []
|
self.timers = []
|
||||||
|
|
||||||
|
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||||
|
periodic_interval_max=None, *args, **kwargs):
|
||||||
|
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||||
|
timer.start(initial_delay=initial_delay,
|
||||||
|
periodic_interval_max=periodic_interval_max)
|
||||||
|
self.timers.append(timer)
|
||||||
|
|
||||||
def add_timer(self, interval, callback, initial_delay=None,
|
def add_timer(self, interval, callback, initial_delay=None,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
|
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||||
pulse.start(interval=interval,
|
pulse.start(interval=interval,
|
||||||
initial_delay=initial_delay)
|
initial_delay=initial_delay)
|
||||||
self.timers.append(pulse)
|
self.timers.append(pulse)
|
||||||
@ -72,13 +80,17 @@ class ThreadGroup(object):
|
|||||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||||
th = Thread(gt, self)
|
th = Thread(gt, self)
|
||||||
self.threads.append(th)
|
self.threads.append(th)
|
||||||
|
return th
|
||||||
|
|
||||||
def thread_done(self, thread):
|
def thread_done(self, thread):
|
||||||
self.threads.remove(thread)
|
self.threads.remove(thread)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
current = greenthread.getcurrent()
|
current = greenthread.getcurrent()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
# don't kill the current thread.
|
# don't kill the current thread.
|
||||||
continue
|
continue
|
||||||
@ -98,17 +110,20 @@ class ThreadGroup(object):
|
|||||||
for x in self.timers:
|
for x in self.timers:
|
||||||
try:
|
try:
|
||||||
x.wait()
|
x.wait()
|
||||||
except greenlet.GreenletExit:
|
except eventlet.greenlet.GreenletExit:
|
||||||
pass
|
pass
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
current = greenthread.getcurrent()
|
current = greenthread.getcurrent()
|
||||||
for x in self.threads:
|
|
||||||
|
# Iterate over a copy of self.threads so thread_done doesn't
|
||||||
|
# modify the list while we're iterating
|
||||||
|
for x in self.threads[:]:
|
||||||
if x is current:
|
if x is current:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
x.wait()
|
x.wait()
|
||||||
except greenlet.GreenletExit:
|
except eventlet.greenlet.GreenletExit:
|
||||||
pass
|
pass
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(ex)
|
LOG.exception(ex)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -21,8 +19,10 @@ Time related utilities and helper functions.
|
|||||||
|
|
||||||
import calendar
|
import calendar
|
||||||
import datetime
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
import iso8601
|
import iso8601
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
# ISO 8601 extended time format with microseconds
|
# ISO 8601 extended time format with microseconds
|
||||||
@ -32,7 +32,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
|||||||
|
|
||||||
|
|
||||||
def isotime(at=None, subsecond=False):
|
def isotime(at=None, subsecond=False):
|
||||||
"""Stringify time in ISO 8601 format"""
|
"""Stringify time in ISO 8601 format."""
|
||||||
if not at:
|
if not at:
|
||||||
at = utcnow()
|
at = utcnow()
|
||||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||||
@ -44,13 +44,13 @@ def isotime(at=None, subsecond=False):
|
|||||||
|
|
||||||
|
|
||||||
def parse_isotime(timestr):
|
def parse_isotime(timestr):
|
||||||
"""Parse time from ISO 8601 format"""
|
"""Parse time from ISO 8601 format."""
|
||||||
try:
|
try:
|
||||||
return iso8601.parse_date(timestr)
|
return iso8601.parse_date(timestr)
|
||||||
except iso8601.ParseError as e:
|
except iso8601.ParseError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(six.text_type(e))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
raise ValueError(e.message)
|
raise ValueError(six.text_type(e))
|
||||||
|
|
||||||
|
|
||||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
@ -66,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
|||||||
|
|
||||||
|
|
||||||
def normalize_time(timestamp):
|
def normalize_time(timestamp):
|
||||||
"""Normalize time in arbitrary timezone to UTC naive object"""
|
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||||
offset = timestamp.utcoffset()
|
offset = timestamp.utcoffset()
|
||||||
if offset is None:
|
if offset is None:
|
||||||
return timestamp
|
return timestamp
|
||||||
@ -75,20 +75,31 @@ def normalize_time(timestamp):
|
|||||||
|
|
||||||
def is_older_than(before, seconds):
|
def is_older_than(before, seconds):
|
||||||
"""Return True if before is older than seconds."""
|
"""Return True if before is older than seconds."""
|
||||||
if isinstance(before, basestring):
|
if isinstance(before, six.string_types):
|
||||||
before = parse_strtime(before).replace(tzinfo=None)
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
def is_newer_than(after, seconds):
|
def is_newer_than(after, seconds):
|
||||||
"""Return True if after is newer than seconds."""
|
"""Return True if after is newer than seconds."""
|
||||||
if isinstance(after, basestring):
|
if isinstance(after, six.string_types):
|
||||||
after = parse_strtime(after).replace(tzinfo=None)
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
def utcnow_ts():
|
def utcnow_ts():
|
||||||
"""Timestamp version of our utcnow function."""
|
"""Timestamp version of our utcnow function."""
|
||||||
|
if utcnow.override_time is None:
|
||||||
|
# NOTE(kgriffs): This is several times faster
|
||||||
|
# than going through calendar.timegm(...)
|
||||||
|
return int(time.time())
|
||||||
|
|
||||||
return calendar.timegm(utcnow().timetuple())
|
return calendar.timegm(utcnow().timetuple())
|
||||||
|
|
||||||
|
|
||||||
@ -103,19 +114,22 @@ def utcnow():
|
|||||||
|
|
||||||
|
|
||||||
def iso8601_from_timestamp(timestamp):
|
def iso8601_from_timestamp(timestamp):
|
||||||
"""Returns a iso8601 formated date from timestamp"""
|
"""Returns a iso8601 formated date from timestamp."""
|
||||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||||
|
|
||||||
|
|
||||||
utcnow.override_time = None
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
def set_time_override(override_time=datetime.datetime.utcnow()):
|
def set_time_override(override_time=None):
|
||||||
|
"""Overrides utils.utcnow.
|
||||||
|
|
||||||
|
Make it return a constant time or a list thereof, one at a time.
|
||||||
|
|
||||||
|
:param override_time: datetime instance or list thereof. If not
|
||||||
|
given, defaults to the current UTC time.
|
||||||
"""
|
"""
|
||||||
Override utils.utcnow to return a constant time or a list thereof,
|
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||||
one at a time.
|
|
||||||
"""
|
|
||||||
utcnow.override_time = override_time
|
|
||||||
|
|
||||||
|
|
||||||
def advance_time_delta(timedelta):
|
def advance_time_delta(timedelta):
|
||||||
@ -141,7 +155,8 @@ def clear_time_override():
|
|||||||
def marshall_now(now=None):
|
def marshall_now(now=None):
|
||||||
"""Make an rpc-safe datetime with microseconds.
|
"""Make an rpc-safe datetime with microseconds.
|
||||||
|
|
||||||
Note: tzinfo is stripped, but not required for relative times."""
|
Note: tzinfo is stripped, but not required for relative times.
|
||||||
|
"""
|
||||||
if not now:
|
if not now:
|
||||||
now = utcnow()
|
now = utcnow()
|
||||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||||
@ -161,11 +176,21 @@ def unmarshall_time(tyme):
|
|||||||
|
|
||||||
|
|
||||||
def delta_seconds(before, after):
|
def delta_seconds(before, after):
|
||||||
"""
|
"""Return the difference between two timing objects.
|
||||||
|
|
||||||
Compute the difference in seconds between two date, time, or
|
Compute the difference in seconds between two date, time, or
|
||||||
datetime objects (as a float, to microsecond resolution).
|
datetime objects (as a float, to microsecond resolution).
|
||||||
"""
|
"""
|
||||||
delta = after - before
|
delta = after - before
|
||||||
|
return total_seconds(delta)
|
||||||
|
|
||||||
|
|
||||||
|
def total_seconds(delta):
|
||||||
|
"""Return the total seconds of datetime.timedelta object.
|
||||||
|
|
||||||
|
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||||
|
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return delta.total_seconds()
|
return delta.total_seconds()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -174,11 +199,10 @@ def delta_seconds(before, after):
|
|||||||
|
|
||||||
|
|
||||||
def is_soon(dt, window):
|
def is_soon(dt, window):
|
||||||
"""
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
Determines if time is going to happen in the next window seconds.
|
|
||||||
|
|
||||||
:params dt: the time
|
:param dt: the time
|
||||||
:params window: minimum seconds to remain to consider the time not soon
|
:param window: minimum seconds to remain to consider the time not soon
|
||||||
|
|
||||||
:return: True if expiration is within the given duration
|
:return: True if expiration is within the given duration
|
||||||
"""
|
"""
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2012 Intel Corporation.
|
# Copyright (c) 2012 Intel Corporation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
148
designate/openstack/common/versionutils.py
Normal file
148
designate/openstack/common/versionutils.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for comparing version strings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class deprecated(object):
|
||||||
|
"""A decorator to mark callables as deprecated.
|
||||||
|
|
||||||
|
This decorator logs a deprecation message when the callable it decorates is
|
||||||
|
used. The message will include the release where the callable was
|
||||||
|
deprecated, the release where it may be removed and possibly an optional
|
||||||
|
replacement.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. Specifying the required deprecated release
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE)
|
||||||
|
... def a(): pass
|
||||||
|
|
||||||
|
2. Specifying a replacement:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
|
||||||
|
... def b(): pass
|
||||||
|
|
||||||
|
3. Specifying the release where the functionality may be removed:
|
||||||
|
|
||||||
|
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
|
||||||
|
... def c(): pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
FOLSOM = 'F'
|
||||||
|
GRIZZLY = 'G'
|
||||||
|
HAVANA = 'H'
|
||||||
|
ICEHOUSE = 'I'
|
||||||
|
|
||||||
|
_RELEASES = {
|
||||||
|
'F': 'Folsom',
|
||||||
|
'G': 'Grizzly',
|
||||||
|
'H': 'Havana',
|
||||||
|
'I': 'Icehouse',
|
||||||
|
}
|
||||||
|
|
||||||
|
_deprecated_msg_with_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s in favor of '
|
||||||
|
'%(in_favor_of)s and may be removed in %(remove_in)s.')
|
||||||
|
|
||||||
|
_deprecated_msg_no_alternative = _(
|
||||||
|
'%(what)s is deprecated as of %(as_of)s and may be '
|
||||||
|
'removed in %(remove_in)s. It will not be superseded.')
|
||||||
|
|
||||||
|
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
|
||||||
|
"""Initialize decorator
|
||||||
|
|
||||||
|
:param as_of: the release deprecating the callable. Constants
|
||||||
|
are define in this class for convenience.
|
||||||
|
:param in_favor_of: the replacement for the callable (optional)
|
||||||
|
:param remove_in: an integer specifying how many releases to wait
|
||||||
|
before removing (default: 2)
|
||||||
|
:param what: name of the thing being deprecated (default: the
|
||||||
|
callable's name)
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.as_of = as_of
|
||||||
|
self.in_favor_of = in_favor_of
|
||||||
|
self.remove_in = remove_in
|
||||||
|
self.what = what
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
if not self.what:
|
||||||
|
self.what = func.__name__ + '()'
|
||||||
|
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
msg, details = self._build_message()
|
||||||
|
LOG.deprecated(msg, details)
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
def _get_safe_to_remove_release(self, release):
|
||||||
|
# TODO(dstanek): this method will have to be reimplemented once
|
||||||
|
# when we get to the X release because once we get to the Y
|
||||||
|
# release, what is Y+2?
|
||||||
|
new_release = chr(ord(release) + self.remove_in)
|
||||||
|
if new_release in self._RELEASES:
|
||||||
|
return self._RELEASES[new_release]
|
||||||
|
else:
|
||||||
|
return new_release
|
||||||
|
|
||||||
|
def _build_message(self):
|
||||||
|
details = dict(what=self.what,
|
||||||
|
as_of=self._RELEASES[self.as_of],
|
||||||
|
remove_in=self._get_safe_to_remove_release(self.as_of))
|
||||||
|
|
||||||
|
if self.in_favor_of:
|
||||||
|
details['in_favor_of'] = self.in_favor_of
|
||||||
|
msg = self._deprecated_msg_with_alternative
|
||||||
|
else:
|
||||||
|
msg = self._deprecated_msg_no_alternative
|
||||||
|
return msg, details
|
||||||
|
|
||||||
|
|
||||||
|
def is_compatible(requested_version, current_version, same_major=True):
|
||||||
|
"""Determine whether `requested_version` is satisfied by
|
||||||
|
`current_version`; in other words, `current_version` is >=
|
||||||
|
`requested_version`.
|
||||||
|
|
||||||
|
:param requested_version: version to check for compatibility
|
||||||
|
:param current_version: version to check against
|
||||||
|
:param same_major: if True, the major version must be identical between
|
||||||
|
`requested_version` and `current_version`. This is used when a
|
||||||
|
major-version difference indicates incompatibility between the two
|
||||||
|
versions. Since this is the common-case in practice, the default is
|
||||||
|
True.
|
||||||
|
:returns: True if compatible, False if not
|
||||||
|
"""
|
||||||
|
requested_parts = pkg_resources.parse_version(requested_version)
|
||||||
|
current_parts = pkg_resources.parse_version(current_version)
|
||||||
|
|
||||||
|
if same_major and (requested_parts[0] != current_parts[0]):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return current_parts >= requested_parts
|
@ -1,6 +1,4 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# Copyright 2013 IBM Corp.
|
||||||
|
|
||||||
# Copyright 2013 IBM
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
0
designate/openstack/deprecated/__init__.py
Normal file
0
designate/openstack/deprecated/__init__.py
Normal file
@ -35,13 +35,13 @@ import webob.exc
|
|||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
from xml.parsers import expat
|
from xml.parsers import expat
|
||||||
|
|
||||||
from designate.openstack.common import exception
|
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
from designate.openstack.common import jsonutils
|
from designate.openstack.common import jsonutils
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import service
|
from designate.openstack.common import service
|
||||||
from designate.openstack.common import sslutils
|
from designate.openstack.common import sslutils
|
||||||
from designate.openstack.common import xmlutils
|
from designate.openstack.common import xmlutils
|
||||||
|
from designate.openstack.deprecated import exception
|
||||||
|
|
||||||
socket_opts = [
|
socket_opts = [
|
||||||
cfg.IntOpt('backlog',
|
cfg.IntOpt('backlog',
|
@ -61,10 +61,6 @@ class Service(service.Service):
|
|||||||
self._setup_subscriptions()
|
self._setup_subscriptions()
|
||||||
self.rpc_conn.consume_in_thread()
|
self.rpc_conn.consume_in_thread()
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
super(Service, self).wait()
|
|
||||||
self.rpc_conn.consumer_thread.wait()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
# Try to shut the connection down, but if we get any sort of
|
# Try to shut the connection down, but if we get any sort of
|
||||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from designate.openstack.common import wsgi
|
from designate.openstack.deprecated import wsgi
|
||||||
|
|
||||||
|
|
||||||
class Middleware(wsgi.Middleware):
|
class Middleware(wsgi.Middleware):
|
||||||
|
@ -15,7 +15,7 @@ debug = False
|
|||||||
#logdir = /var/log/designate
|
#logdir = /var/log/designate
|
||||||
|
|
||||||
# Driver used for issuing notifications
|
# Driver used for issuing notifications
|
||||||
#notification_driver = designate.openstack.common.notifier.rabbit_notifier
|
#notification_driver = designate.openstack.common.notifier.rpc_notifier
|
||||||
|
|
||||||
# Use "sudo designate-rootwrap /etc/designate/rootwrap.conf" to use the real
|
# Use "sudo designate-rootwrap /etc/designate/rootwrap.conf" to use the real
|
||||||
# root filter facility.
|
# root filter facility.
|
||||||
|
@ -2,7 +2,10 @@
|
|||||||
|
|
||||||
# The list of modules to copy from oslo-incubator.git
|
# The list of modules to copy from oslo-incubator.git
|
||||||
module=context
|
module=context
|
||||||
|
module=excutils
|
||||||
|
module=fixture
|
||||||
module=gettextutils
|
module=gettextutils
|
||||||
|
module=importutils
|
||||||
module=jsonutils
|
module=jsonutils
|
||||||
module=local
|
module=local
|
||||||
module=log
|
module=log
|
||||||
@ -12,21 +15,12 @@ module=processutils
|
|||||||
module=rootwrap
|
module=rootwrap
|
||||||
module=rpc
|
module=rpc
|
||||||
module=service
|
module=service
|
||||||
|
module=strutils
|
||||||
module=test
|
module=test
|
||||||
module=timeutils
|
module=timeutils
|
||||||
module=uuidutils
|
module=uuidutils
|
||||||
module=wsgi
|
|
||||||
|
|
||||||
# transitive dependencies
|
# Modules needed for the deprecated oslo.wsgi we're still using
|
||||||
module=eventlet_backdoor
|
|
||||||
module=exception
|
|
||||||
module=excutils
|
|
||||||
module=importutils
|
|
||||||
module=loopingcall
|
|
||||||
module=network_utils
|
|
||||||
module=periodic_task
|
|
||||||
module=sslutils
|
|
||||||
module=threadgroup
|
|
||||||
module=xmlutils
|
module=xmlutils
|
||||||
|
|
||||||
# The base module to hold the copy of openstack.common
|
# The base module to hold the copy of openstack.common
|
||||||
|
2
tox.ini
2
tox.ini
@ -31,4 +31,4 @@ commands = {posargs}
|
|||||||
[flake8]
|
[flake8]
|
||||||
ignore = H302,H306,H401,H402,H404
|
ignore = H302,H306,H401,H402,H404
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
|
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*openstack/deprecated*,*lib/python*,*egg,build,tools
|
||||||
|
Loading…
Reference in New Issue
Block a user