Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: Iaf35c4bab146e2070cca59a8fd6520b556850233
changes/16/325916/1
Kenneth Giusti 6 years ago
commit 1c7ff9d60a
  1. 1
      .gitignore
  2. 2
      doc/source/AMQP1.0.rst
  3. 0
      oslo_messaging/_drivers/amqp1_driver/__init__.py
  4. 7
      oslo_messaging/_drivers/amqp1_driver/controller.py
  5. 5
      oslo_messaging/_drivers/amqp1_driver/drivertasks.py
  6. 0
      oslo_messaging/_drivers/amqp1_driver/eventloop.py
  7. 0
      oslo_messaging/_drivers/amqp1_driver/opts.py
  8. 10
      oslo_messaging/_drivers/amqpdriver.py
  9. 2
      oslo_messaging/_drivers/base.py
  10. 7
      oslo_messaging/_drivers/common.py
  11. 8
      oslo_messaging/_drivers/impl_amqp1.py
  12. 2
      oslo_messaging/_drivers/impl_fake.py
  13. 31
      oslo_messaging/_drivers/impl_kafka.py
  14. 28
      oslo_messaging/_drivers/impl_rabbit.py
  15. 12
      oslo_messaging/_drivers/impl_zmq.py
  16. 4
      oslo_messaging/_drivers/pika_driver/pika_message.py
  17. 2
      oslo_messaging/_drivers/zmq_driver/broker/zmq_queue_proxy.py
  18. 20
      oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.py
  19. 27
      oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py
  20. 104
      oslo_messaging/_drivers/zmq_driver/client/zmq_client.py
  21. 11
      oslo_messaging/_drivers/zmq_driver/client/zmq_response.py
  22. 2
      oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py
  23. 10
      oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py
  24. 2
      oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_pull_consumer.py
  25. 2
      oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py
  26. 2
      oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py
  27. 8
      oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py
  28. 1
      oslo_messaging/_drivers/zmq_driver/zmq_names.py
  29. 2
      oslo_messaging/conffixture.py
  30. 2
      oslo_messaging/opts.py
  31. 6
      oslo_messaging/rpc/client.py
  32. 9
      oslo_messaging/rpc/server.py
  33. 13
      oslo_messaging/server.py
  34. 54
      oslo_messaging/tests/drivers/test_amqp_driver.py
  35. 15
      oslo_messaging/tests/drivers/test_impl_kafka.py
  36. 61
      oslo_messaging/tests/drivers/test_impl_rabbit.py
  37. 37
      oslo_messaging/tests/rpc/test_server.py
  38. 27
      oslo_messaging/tests/test_exception_serialization.py
  39. 10
      oslo_messaging/tests/test_transport.py
  40. 26
      oslo_messaging/transport.py
  41. 18
      oslo_messaging/version.py
  42. 3
      releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml
  43. 0
      releasenotes/source/_static/.placeholder
  44. 0
      releasenotes/source/_templates/.placeholder
  45. 276
      releasenotes/source/conf.py
  46. 8
      releasenotes/source/index.rst
  47. 5
      releasenotes/source/unreleased.rst
  48. 6
      requirements.txt
  49. 1
      setup-test-env-zmq.sh
  50. 2
      setup.cfg
  51. 3
      test-requirements.txt
  52. 3
      tox.ini

1
.gitignore vendored

@ -14,3 +14,4 @@ doc/build/
doc/source/api/
dist/
.testrepository/
releasenotes/build

@ -120,7 +120,7 @@ The new driver is selected by specifying **amqp** as the transport
name. For example::
from oslo import messaging
from olso.config import cfg
from oslo.config import cfg
amqp_transport = messaging.get_transport(cfg.CONF,
"amqp://me:passwd@host:5672")

@ -26,6 +26,7 @@ functions scheduled by the Controller.
import abc
import logging
import random
import threading
import uuid
@ -34,8 +35,8 @@ import proton
import pyngus
from six import moves
from oslo_messaging._drivers.protocols.amqp import eventloop
from oslo_messaging._drivers.protocols.amqp import opts
from oslo_messaging._drivers.amqp1_driver import eventloop
from oslo_messaging._drivers.amqp1_driver import opts
from oslo_messaging._i18n import _LE, _LI, _LW
from oslo_messaging import exceptions
from oslo_messaging import transport
@ -298,7 +299,7 @@ class Hosts(object):
entry.port = entry.port or 5672
entry.username = entry.username or default_username
entry.password = entry.password or default_password
self._current = 0
self._current = random.randint(0, len(self._entries) - 1)
@property
def current(self):

@ -16,7 +16,7 @@ import logging
import threading
import time
from oslo_messaging._drivers.protocols.amqp import controller
from oslo_messaging._drivers.amqp1_driver import controller
from oslo_messaging._i18n import _LW
from oslo_messaging import exceptions
@ -94,11 +94,10 @@ class ListenTask(controller.Task):
class ReplyTask(controller.Task):
"""A task that sends 'response' message to 'address'.
"""
def __init__(self, address, response, log_failure):
def __init__(self, address, response):
super(ReplyTask, self).__init__()
self._address = address
self._response = response
self._log_failure = log_failure
self._wakeup = threading.Event()
def wait(self):

@ -50,14 +50,13 @@ class AMQPIncomingMessage(base.RpcIncomingMessage):
self.stopwatch = timeutils.StopWatch()
self.stopwatch.start()
def _send_reply(self, conn, reply=None, failure=None, log_failure=True):
def _send_reply(self, conn, reply=None, failure=None):
if not self._obsolete_reply_queues.reply_q_valid(self.reply_q,
self.msg_id):
return
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
failure = rpc_common.serialize_remote_exception(failure)
# NOTE(sileht): ending can be removed in N*, see Listener.wait()
# for more detail.
msg = {'result': reply, 'failure': failure, 'ending': True,
@ -74,7 +73,7 @@ class AMQPIncomingMessage(base.RpcIncomingMessage):
'elapsed': self.stopwatch.elapsed()})
conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg))
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
if not self.msg_id:
# NOTE(Alexei_987) not sending reply, if msg_id is empty
# because reply should not be expected by caller side
@ -96,8 +95,7 @@ class AMQPIncomingMessage(base.RpcIncomingMessage):
try:
with self.listener.driver._get_connection(
rpc_common.PURPOSE_SEND) as conn:
self._send_reply(conn, reply, failure,
log_failure=log_failure)
self._send_reply(conn, reply, failure)
return
except rpc_amqp.AMQPDestinationNotFound:
if timer.check_return() > 0:

@ -92,7 +92,7 @@ class IncomingMessage(object):
class RpcIncomingMessage(IncomingMessage):
@abc.abstractmethod
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Send a reply or failure back to the client."""

@ -162,18 +162,15 @@ class Connection(object):
raise NotImplementedError()
def serialize_remote_exception(failure_info, log_failure=True):
def serialize_remote_exception(failure_info):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):

@ -39,10 +39,10 @@ from oslo_messaging import target as messaging_target
proton = importutils.try_import('proton')
controller = importutils.try_import(
'oslo_messaging._drivers.protocols.amqp.controller'
'oslo_messaging._drivers.amqp1_driver.controller'
)
drivertasks = importutils.try_import(
'oslo_messaging._drivers.protocols.amqp.drivertasks'
'oslo_messaging._drivers.amqp1_driver.drivertasks'
)
LOG = logging.getLogger(__name__)
@ -98,13 +98,13 @@ class ProtonIncomingMessage(base.RpcIncomingMessage):
self._reply_to = message.reply_to
self._correlation_id = message.id
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Schedule a ReplyTask to send the reply."""
if self._reply_to:
response = marshal_response(reply=reply, failure=failure)
response.correlation_id = self._correlation_id
LOG.debug("Replying to %s", self._correlation_id)
task = drivertasks.ReplyTask(self._reply_to, response, log_failure)
task = drivertasks.ReplyTask(self._reply_to, response)
self.listener.driver._ctrl.add_task(task)
else:
LOG.debug("Ignoring reply as no reply address available")

@ -30,7 +30,7 @@ class FakeIncomingMessage(base.RpcIncomingMessage):
self.requeue_callback = requeue
self._reply_q = reply_q
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
if self._reply_q:
failure = failure[1] if failure else None
self._reply_q.put((reply, failure))

@ -33,9 +33,13 @@ PURPOSE_LISTEN = 'listen'
kafka_opts = [
cfg.StrOpt('kafka_default_host', default='localhost',
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='Default Kafka broker Host'),
cfg.PortOpt('kafka_default_port', default=9092,
deprecated_for_removal=True,
deprecated_reason="Replaced by [DEFAULT]/transport_url",
help='Default Kafka broker Port'),
cfg.IntOpt('kafka_max_fetch_bytes', default=1024 * 1024,
@ -97,21 +101,18 @@ class Connection(object):
def _parse_url(self):
driver_conf = self.conf.oslo_messaging_kafka
try:
self.host = self.url.hosts[0].hostname
except (NameError, IndexError):
self.host = driver_conf.kafka_default_host
try:
self.port = self.url.hosts[0].port
except (NameError, IndexError):
self.port = driver_conf.kafka_default_port
self.hostaddrs = []
if self.host is None:
self.host = driver_conf.kafka_default_host
for host in self.url.hosts:
if host.hostname:
self.hostaddrs.append("%s:%s" % (
host.hostname,
host.port or driver_conf.kafka_default_port))
if self.port is None:
self.port = driver_conf.kafka_default_port
if not self.hostaddrs:
self.hostaddrs.append("%s:%s" % (driver_conf.kafka_default_host,
driver_conf.kafka_default_port))
def notify_send(self, topic, ctxt, msg, retry):
"""Send messages to Kafka broker.
@ -215,7 +216,7 @@ class Connection(object):
return
try:
self.kafka_client = kafka.KafkaClient(
"%s:%s" % (self.host, str(self.port)))
self.hostaddrs)
self.producer = kafka.SimpleProducer(self.kafka_client)
except KafkaError as e:
LOG.exception(_LE("Kafka Connection is not available: %s"), e)
@ -227,7 +228,7 @@ class Connection(object):
self.kafka_client.ensure_topic_exists(topic)
self.consumer = kafka.KafkaConsumer(
*topics, group_id=group,
bootstrap_servers=["%s:%s" % (self.host, str(self.port))],
bootstrap_servers=self.hostaddrs,
fetch_message_max_bytes=self.fetch_messages_max_bytes)
self._consume_loop_stopped = False
@ -240,7 +241,7 @@ class OsloKafkaMessage(base.RpcIncomingMessage):
def requeue(self):
LOG.warning(_LW("requeue is not supported"))
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
LOG.warning(_LW("reply is not supported"))

@ -261,7 +261,7 @@ class Consumer(object):
def __init__(self, exchange_name, queue_name, routing_key, type, durable,
exchange_auto_delete, queue_auto_delete, callback,
nowait=True, rabbit_ha_queues=None, rabbit_queue_ttl=0):
nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0):
"""Init the Publisher class with the exchange_name, routing_key,
type, durable auto_delete
"""
@ -1024,11 +1024,31 @@ class Connection(object):
if not self.connection.connected:
raise self.connection.recoverable_connection_errors[0]
if self._new_tags:
consume_max_retries = 2
while self._new_tags and consume_max_retries:
for consumer, tag in self._consumers.items():
if tag in self._new_tags:
consumer.consume(tag=tag)
self._new_tags.remove(tag)
try:
consumer.consume(tag=tag)
self._new_tags.remove(tag)
except self.connection.channel_errors as exc:
# NOTE(kbespalov): during the interval between
# a queue declaration and consumer declaration
# the queue can disappear. In this case
# we must redeclare queue and try to re-consume.
# More details is here:
# bugs.launchpad.net/oslo.messaging/+bug/1581148
if exc.code == 404 and consume_max_retries:
consumer.declare(self)
# NOTE(kbespalov): the broker closes a channel
# at any channel error. The py-amqp catches
# this situation and re-open a new channel.
# So, we must re-declare all consumers again.
self._new_tags = set(self._consumers.values())
consume_max_retries -= 1
break
else:
raise
poll_timeout = (self._poll_timeout if timeout is None
else min(timeout, self._poll_timeout))

@ -87,7 +87,7 @@ zmq_opts = [
'PUB/SUB always uses proxy.'),
cfg.BoolOpt('use_router_proxy', default=True,
help='Use ROUTER remote proxy for direct methods.'),
help='Use ROUTER remote proxy.'),
cfg.PortOpt('rpc_zmq_min_port',
default=49153,
@ -182,12 +182,18 @@ class ZmqDriver(base.BaseDriver):
self.get_matchmaker_backend(url),
).driver(self.conf, url=url)
client_cls = zmq_client.ZmqClientProxy
if conf.use_pub_sub and not conf.use_router_proxy:
client_cls = zmq_client.ZmqClientMixDirectPubSub
elif not conf.use_pub_sub and not conf.use_router_proxy:
client_cls = zmq_client.ZmqClientDirect
self.client = LazyDriverItem(
zmq_client.ZmqClient, self.conf, self.matchmaker,
client_cls, self.conf, self.matchmaker,
self.allowed_remote_exmods)
self.notifier = LazyDriverItem(
zmq_client.ZmqClient, self.conf, self.matchmaker,
client_cls, self.conf, self.matchmaker,
self.allowed_remote_exmods)
super(ZmqDriver, self).__init__(conf, url, default_exchange,

@ -175,13 +175,11 @@ class RpcPikaIncomingMessage(PikaIncomingMessage, base.RpcIncomingMessage):
self.reply_q = properties.reply_to
self.msg_id = properties.correlation_id
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Send back reply to the RPC client
:param reply: Dictionary, reply. In case of exception should be None
:param failure: Tuple, should be a sys.exc_info() tuple.
Should be None if RPC request was successfully processed.
:param log_failure: Boolean, not used in this implementation.
It present here to be compatible with driver API
:return RpcReplyPikaIncomingMessage, message with reply
"""

@ -73,7 +73,7 @@ class UniversalQueueProxy(object):
if self.conf.use_pub_sub and msg_type in (zmq_names.CAST_FANOUT_TYPE,
zmq_names.NOTIFY_TYPE):
self.pub_publisher.send_request(message)
elif msg_type in zmq_names.DIRECT_TYPES:
else:
self._redirect_message(self.be_router_socket
if socket is self.fe_router_socket
else self.fe_router_socket, message)

@ -35,6 +35,7 @@ class DealerPublisherProxy(object):
"""Used when publishing to a proxy. """
def __init__(self, conf, matchmaker, socket_to_proxy):
self.conf = conf
self.sockets_manager = zmq_publisher_base.SocketsManager(
conf, matchmaker, zmq.ROUTER, zmq.DEALER)
self.socket = socket_to_proxy
@ -45,10 +46,17 @@ class DealerPublisherProxy(object):
raise zmq_publisher_base.UnsupportedSendPattern(
request.msg_type)
routing_key = self.routing_table.get_routable_host(request.target) \
if request.msg_type in zmq_names.DIRECT_TYPES else \
zmq_address.target_to_subscribe_filter(request.target)
if self.conf.use_pub_sub:
routing_key = self.routing_table.get_routable_host(request.target) \
if request.msg_type in zmq_names.DIRECT_TYPES else \
zmq_address.target_to_subscribe_filter(request.target)
self._do_send_request(request, routing_key)
else:
routing_keys = self.routing_table.get_all_hosts(request.target)
for routing_key in routing_keys:
self._do_send_request(request, routing_key)
def _do_send_request(self, request, routing_key):
self.socket.send(b'', zmq.SNDMORE)
self.socket.send(six.b(str(request.msg_type)), zmq.SNDMORE)
self.socket.send(six.b(routing_key), zmq.SNDMORE)
@ -132,6 +140,10 @@ class RoutingTable(object):
self.routing_table = {}
self.routable_hosts = {}
def get_all_hosts(self, target):
self._update_routing_table(target)
return list(self.routable_hosts.get(str(target)) or [])
def get_routable_host(self, target):
self._update_routing_table(target)
hosts_for_target = self.routable_hosts[str(target)]

@ -112,30 +112,37 @@ class SocketsManager(object):
self.socket_to_publishers = None
self.socket_to_routers = None
def _track_socket(self, socket, target):
self.outbound_sockets[str(target)] = (socket, time.time())
def get_hosts(self, target):
return self.matchmaker.get_hosts(
target, zmq_names.socket_type_str(self.listener_type))
@staticmethod
def _key_from_target(target):
return target.topic if target.fanout else str(target)
def _get_hosts_and_connect(self, socket, target):
hosts = self.get_hosts(target)
self._connect_to_hosts(socket, target, hosts)
def _track_socket(self, socket, target):
key = self._key_from_target(target)
self.outbound_sockets[key] = (socket, time.time())
def _connect_to_hosts(self, socket, target, hosts):
for host in hosts:
socket.connect_to_host(host)
self._track_socket(socket, target)
def _check_for_new_hosts(self, target):
socket, tm = self.outbound_sockets[str(target)]
key = self._key_from_target(target)
socket, tm = self.outbound_sockets[key]
if 0 <= self.conf.zmq_target_expire <= time.time() - tm:
self._get_hosts_and_connect(socket, target)
return socket
def get_socket(self, target):
if str(target) in self.outbound_sockets:
key = self._key_from_target(target)
if key in self.outbound_sockets:
socket = self._check_for_new_hosts(target)
else:
socket = zmq_socket.ZmqSocket(self.conf, self.zmq_context,
@ -143,16 +150,6 @@ class SocketsManager(object):
self._get_hosts_and_connect(socket, target)
return socket
def get_socket_to_hosts(self, target, hosts):
key = str(target)
if key in self.outbound_sockets:
socket, tm = self.outbound_sockets[key]
else:
socket = zmq_socket.ZmqSocket(self.conf, self.zmq_context,
self.socket_type)
self._connect_to_hosts(socket, target, hosts)
return socket
def get_socket_to_publishers(self):
if self.socket_to_publishers is not None:
return self.socket_to_publishers

@ -13,6 +13,7 @@
# under the License.
from oslo_messaging._drivers import common
from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \
import zmq_dealer_call_publisher
from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \
@ -28,45 +29,102 @@ from oslo_messaging._drivers.zmq_driver import zmq_names
zmq = zmq_async.import_zmq()
class ZmqClient(zmq_client_base.ZmqClientBase):
class WrongClientException(common.RPCException):
"""Raised if client type doesn't match configuration"""
class ZmqClientMixDirectPubSub(zmq_client_base.ZmqClientBase):
"""Client for using with direct connections and fanout over proxy:
use_pub_sub = true
use_router_proxy = false
"""
def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None):
if conf.use_router_proxy or not conf.use_pub_sub:
raise WrongClientException()
self.sockets_manager = zmq_publisher_base.SocketsManager(
conf, matchmaker, zmq.ROUTER, zmq.DEALER)
default_publisher = zmq_dealer_publisher.DealerPublisher(
conf, matchmaker)
publisher_to_proxy = zmq_dealer_publisher_proxy.DealerPublisherProxy(
fanout_publisher = zmq_dealer_publisher_proxy.DealerPublisherProxy(
conf, matchmaker, self.sockets_manager.get_socket_to_publishers())
call_publisher = zmq_dealer_publisher_proxy.DealerCallPublisherProxy(
conf, matchmaker, self.sockets_manager) if conf.use_router_proxy \
else zmq_dealer_call_publisher.DealerCallPublisher(
conf, matchmaker, self.sockets_manager)
super(ZmqClientMixDirectPubSub, self).__init__(
conf, matchmaker, allowed_remote_exmods,
publishers={
zmq_names.CALL_TYPE:
zmq_dealer_call_publisher.DealerCallPublisher(
conf, matchmaker, self.sockets_manager),
zmq_names.CAST_FANOUT_TYPE: fanout_publisher,
zmq_names.NOTIFY_TYPE: fanout_publisher,
"default": zmq_dealer_publisher.DealerPublisherAsync(
conf, matchmaker)
}
)
class ZmqClientDirect(zmq_client_base.ZmqClientBase):
"""This kind of client (publishers combination) is to be used for
direct connections only:
use_pub_sub = false
use_router_proxy = false
"""
cast_publisher = publisher_to_proxy if conf.use_router_proxy \
else zmq_dealer_publisher.DealerPublisherAsync(
conf, matchmaker)
def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None):
fanout_publisher = publisher_to_proxy \
if conf.use_pub_sub else default_publisher
if conf.use_pub_sub or conf.use_router_proxy:
raise WrongClientException()
super(ZmqClient, self).__init__(
self.sockets_manager = zmq_publisher_base.SocketsManager(
conf, matchmaker, zmq.ROUTER, zmq.DEALER)
super(ZmqClientDirect, self).__init__(
conf, matchmaker, allowed_remote_exmods,
publishers={
zmq_names.CALL_TYPE: call_publisher,
zmq_names.CALL_TYPE:
zmq_dealer_call_publisher.DealerCallPublisher(
conf, matchmaker, self.sockets_manager),
zmq_names.CAST_TYPE: cast_publisher,
"default": zmq_dealer_publisher.DealerPublisher(
conf, matchmaker)
}
)
# Here use DealerPublisherLight for sending request to proxy
# which finally uses PubPublisher to send fanout in case of
# 'use_pub_sub' option configured.
zmq_names.CAST_FANOUT_TYPE: fanout_publisher,
zmq_names.NOTIFY_TYPE: fanout_publisher,
class ZmqClientProxy(zmq_client_base.ZmqClientBase):
"""Client for using with proxy:
use_pub_sub = true
use_router_proxy = true
or
use_pub_sub = false
use_router_proxy = true
"""
def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None):
if not conf.use_router_proxy:
raise WrongClientException()
self.sockets_manager = zmq_publisher_base.SocketsManager(
conf, matchmaker, zmq.ROUTER, zmq.DEALER)
super(ZmqClientProxy, self).__init__(
conf, matchmaker, allowed_remote_exmods,
publishers={
zmq_names.CALL_TYPE:
zmq_dealer_publisher_proxy.DealerCallPublisherProxy(
conf, matchmaker, self.sockets_manager),
"default": default_publisher
"default": zmq_dealer_publisher_proxy.DealerPublisherProxy(
conf, matchmaker,
self.sockets_manager.get_socket_to_publishers())
}
)

@ -18,8 +18,7 @@ from oslo_messaging._drivers.zmq_driver import zmq_names
class Response(object):
def __init__(self, id=None, type=None, message_id=None,
reply_id=None, reply_body=None,
failure=None, log_failure=None):
reply_id=None, reply_body=None, failure=None):
self._id = id
self._type = type
@ -27,7 +26,6 @@ class Response(object):
self._reply_id = reply_id
self._reply_body = reply_body
self._failure = failure
self._log_failure = log_failure
@property
def id_(self):
@ -53,18 +51,13 @@ class Response(object):
def failure(self):
return self._failure
@property
def log_failure(self):
return self._log_failure
def to_dict(self):
return {zmq_names.FIELD_ID: self._id,
zmq_names.FIELD_TYPE: self._type,
zmq_names.FIELD_MSG_ID: self._message_id,
zmq_names.FIELD_REPLY_ID: self._reply_id,
zmq_names.FIELD_REPLY: self._reply_body,
zmq_names.FIELD_FAILURE: self._failure,
zmq_names.FIELD_LOG_FAILURE: self._log_failure}
zmq_names.FIELD_FAILURE: self._failure}
def __str__(self):
return str(self.to_dict())

@ -190,7 +190,7 @@ class RedisMatchMaker(base.MatchMakerBase):
key = zmq_address.target_to_key(target, listener_type)
hosts.extend(self._get_hosts_by_key(key))
if not hosts and target.topic and target.server:
if (not hosts or target.fanout) and target.topic and target.server:
key = zmq_address.prefix_str(target.topic, listener_type)
hosts.extend(self._get_hosts_by_key(key))

@ -37,7 +37,7 @@ class DealerIncomingMessage(base.RpcIncomingMessage):
def __init__(self, context, message):
super(DealerIncomingMessage, self).__init__(context, message)
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Reply is not needed for non-call messages"""
def acknowledge(self):
@ -55,16 +55,14 @@ class DealerIncomingRequest(base.RpcIncomingMessage):
self.reply_id = reply_id
self.message_id = message_id
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
if failure is not None:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
failure = rpc_common.serialize_remote_exception(failure)
response = zmq_response.Response(type=zmq_names.REPLY_TYPE,
message_id=self.message_id,
reply_id=self.reply_id,
reply_body=reply,
failure=failure,
log_failure=log_failure)
failure=failure)
LOG.debug("Replying %s", self.message_id)

@ -31,7 +31,7 @@ class PullIncomingMessage(base.RpcIncomingMessage):
def __init__(self, context, message):
super(PullIncomingMessage, self).__init__(context, message)
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Reply is not needed for non-call messages."""
def acknowledge(self):

@ -37,7 +37,7 @@ class RouterIncomingMessage(base.RpcIncomingMessage):
self.msg_id = msg_id
self.message = message
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Reply is not needed for non-call messages"""
def acknowledge(self):

@ -34,7 +34,7 @@ class SubIncomingMessage(base.RpcIncomingMessage):
def __init__(self, context, message):
super(SubIncomingMessage, self).__init__(context, message)
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
"""Reply is not needed for non-call messages."""
def acknowledge(self):

@ -39,16 +39,14 @@ class ZmqIncomingRequest(base.RpcIncomingMessage):
self.received = None
self.poller = poller
def reply(self, reply=None, failure=None, log_failure=True):
def reply(self, reply=None, failure=None):
if failure is not None:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
failure = rpc_common.serialize_remote_exception(failure)
response = zmq_response.Response(type=zmq_names.REPLY_TYPE,
message_id=self.request.message_id,
reply_id=self.reply_id,
reply_body=reply,
failure=failure,
log_failure=log_failure)
failure=failure)
LOG.debug("Replying %s", (str(self.request.message_id)))

@ -20,7 +20,6 @@ zmq = zmq_async.import_zmq()
FIELD_TYPE = 'type'
FIELD_FAILURE = 'failure'
FIELD_REPLY = 'reply'
FIELD_LOG_FAILURE = 'log_failure'
FIELD_ID = 'id'
FIELD_MSG_ID = 'message_id'
FIELD_MSG_TYPE = 'msg_type'

@ -55,7 +55,7 @@ class ConfFixture(fixtures.Fixture):
'oslo_messaging._drivers.amqp', 'amqp_opts',
'oslo_messaging_qpid')
_import_opts(self.conf,
'oslo_messaging._drivers.protocols.amqp.opts',
'oslo_messaging._drivers.amqp1_driver.opts',
'amqp1_opts', 'oslo_messaging_amqp')
_import_opts(self.conf,
'oslo_messaging._drivers.impl_zmq', 'zmq_opts')

@ -21,12 +21,12 @@ import copy
import itertools
from oslo_messaging._drivers import amqp
from oslo_messaging._drivers.amqp1_driver import opts as amqp_opts
from oslo_messaging._drivers import base as drivers_base
from oslo_messaging._drivers import impl_pika
from oslo_messaging._drivers import impl_rabbit
from oslo_messaging._drivers import impl_zmq
from oslo_messaging._drivers.pika_driver import pika_connection_factory
from oslo_messaging._drivers.protocols.amqp import opts as amqp_opts
from oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis
from oslo_messaging.notify import notifier
from oslo_messaging.rpc import client

@ -383,7 +383,7 @@ class RPCClient(_BaseCallContext):
:type kwargs: dict
:raises: MessageDeliveryFailure
"""
super(RPCClient, self).cast(ctxt, method, **kwargs)
self.prepare().cast(ctxt, method, **kwargs)
def call(self, ctxt, method, **kwargs):
"""Invoke a method and wait for a reply.
@ -425,8 +425,8 @@ class RPCClient(_BaseCallContext):
:type kwargs: dict
:raises: MessagingTimeout, RemoteError, MessageDeliveryFailure
"""
return super(RPCClient, self).call(ctxt, method, **kwargs)
return self.prepare().call(ctxt, method, **kwargs)
def can_send_version(self, version=_marker):
"""Check to see if a version is compatible with the version cap."""
return super(RPCClient, self).can_send_version(version)
return self.prepare(version=version).can_send_version()

@ -132,15 +132,14 @@ class RPCServer(msg_server.MessageHandlingServer):
try:
res = self.dispatcher.dispatch(message)
except rpc_dispatcher.ExpectedException as e:
LOG.debug(u'Expected exception during message handling (%s)',
e.exc_info[1])
failure = e.exc_info
except Exception as e:
LOG.debug(u'Expected exception during message handling (%s)', e)
except Exception:
# current sys.exc_info() content can be overriden
# by another exception raise by a log handler during
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
LOG.exception(_LE('Exception during handling message'))
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:

@ -38,7 +38,7 @@ import six
from stevedore import driver
from oslo_messaging._drivers import base as driver_base
from oslo_messaging._i18n import _LW
from oslo_messaging._i18n import _LW, _LI
from oslo_messaging import exceptions
LOG = logging.getLogger(__name__)
@ -313,7 +313,8 @@ class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner):
The executor parameter controls how incoming messages will be received
and dispatched. By default, the most simple executor is used - the
blocking executor.
blocking executor. It handles only one message at once. It's
recommended to use threading or eventlet.
:param transport: the messaging transport
:type transport: Transport
@ -330,6 +331,14 @@ class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner):
self.transport = transport
self.dispatcher = dispatcher
self.executor_type = executor
if self.executor_type == 'blocking':
# NOTE(sileht): We keep blocking as default to not enforce the
# application to use threading or eventlet. Because application
# have to be preprepared accordingly for each one (monkeypatching,
# threadsafe, ...)
LOG.info(_LI("blocking executor handles only one message at "
"once. threading or eventlet executor is "
"recommended."))
self.listener = None

@ -36,7 +36,7 @@ from oslo_messaging.tests import utils as test_utils
# are available in the base repos for all supported platforms.
pyngus = importutils.try_import("pyngus")
if pyngus:
from oslo_messaging._drivers.protocols.amqp import driver as amqp_driver
import oslo_messaging._drivers.impl_amqp1 as amqp_driver
# The Cyrus-based SASL tests can only be run if the installed version of proton
# has been built with Cyrus SASL support.
@ -512,6 +512,8 @@ class TestFailover(test_utils.BaseTestCase):
def setUp(self):
super(TestFailover, self).setUp()
self._brokers = [FakeBroker(), FakeBroker()]
self._primary = 0
self._backup = 1
hosts = []
for broker in self._brokers:
hosts.append(oslo_messaging.TransportHost(hostname=broker.host,
@ -526,8 +528,10 @@ class TestFailover(test_utils.BaseTestCase):
if broker.isAlive():
broker.stop()
def _failover(self, fail_brokers):
def _failover(self, fail_broker):
self._brokers[0].start()
self._brokers[1].start()
# self.config(trace=True, group="oslo_messaging_amqp")
driver = amqp_driver.ProtonDriver(self.conf, self._broker_url)
@ -535,12 +539,17 @@ class TestFailover(test_utils.BaseTestCase):
listener = _ListenerThread(
driver.listen(target, None, None)._poll_style_listener, 2)
# wait for listener links to come up
# wait for listener links to come up on either broker
# 4 == 3 links per listener + 1 for the global reply queue
predicate = lambda: self._brokers[0].sender_link_count == 4
predicate = lambda: ((self._brokers[0].sender_link_count == 4) or
(self._brokers[1].sender_link_count == 4))
_wait_until(predicate, 30)
self.assertTrue(predicate())
if self._brokers[1].sender_link_count == 4:
self._primary = 1
self._backup = 0
rc = driver.send(target, {"context": "whatever"},
{"method": "echo", "id": "echo-1"},
wait_for_reply=True,
@ -549,15 +558,15 @@ class TestFailover(test_utils.BaseTestCase):
self.assertEqual(rc.get('correlation-id'), 'echo-1')
# 1 request msg, 1 response:
self.assertEqual(self._brokers[0].topic_count, 1)
self.assertEqual(self._brokers[0].direct_count, 1)
self.assertEqual(self._brokers[self._primary].topic_count, 1)
self.assertEqual(self._brokers[self._primary].direct_count, 1)
# invoke failover method
fail_brokers(self._brokers[0], self._brokers[1])
fail_broker(self._brokers[self._primary])
# wait for listener links to re-establish on broker 1
# 4 = 3 links per listener + 1 for the global reply queue
predicate = lambda: self._brokers[1].sender_link_count == 4
predicate = lambda: self._brokers[self._backup].sender_link_count == 4
_wait_until(predicate, 30)
self.assertTrue(predicate())
@ -570,44 +579,41 @@ class TestFailover(test_utils.BaseTestCase):
self.assertEqual(rc.get('correlation-id'), 'echo-2')
# 1 request msg, 1 response:
self.assertEqual(self._brokers[1].topic_count, 1)
self.assertEqual(self._brokers[1].direct_count, 1)
self.assertEqual(self._brokers[self._backup].topic_count, 1)
self.assertEqual(self._brokers[self._backup].direct_count, 1)
listener.join(timeout=30)
self.assertFalse(listener.isAlive())
# note: stopping the broker first tests cleaning up driver without a
# connection active
self._brokers[1].stop()
self._brokers[self._backup].stop()
driver.cleanup()
def test_broker_crash(self):
"""Simulate a failure of one broker."""
def _meth(broker0, broker1):
# fail broker 0 and start broker 1:
broker0.stop()
def _meth(broker):
# fail broker:
broker.stop()
time.sleep(0.5)
broker1.start()
self._failover(_meth)
def test_broker_shutdown(self):
"""Simulate a normal shutdown of a broker."""
def _meth(broker0, broker1):
broker0.stop(clean=True)
def _meth(broker):
broker.stop(clean=True)
time.sleep(0.5)
broker1.start()
self._failover(_meth)
def test_heartbeat_failover(self):
"""Simulate broker heartbeat timeout."""
def _meth(broker0, broker1):
# keep alive heartbeat from broker 0 will stop, which should force
# failover to broker 1 in about two seconds
broker0.pause()
broker1.start()
def _meth(broker):
# keep alive heartbeat from primary broker will stop, which should
# force failover to backup broker in about two seconds
broker.pause()
self.config(idle_timeout=2, group="oslo_messaging_amqp")
self._failover(_meth)
self._brokers[0].stop()
self._brokers[self._primary].stop()
def test_listener_failover(self):
"""Verify that Listeners sharing the same topic are re-established

@ -57,13 +57,17 @@ class TestKafkaTransportURL(test_utils.BaseTestCase):
scenarios = [
('none', dict(url=None,
expected=[dict(host='localhost', port=9092)])),
expected=dict(hostaddrs=['localhost:9092']))),
('empty', dict(url='kafka:///',
expected=[dict(host='localhost', port=9092)])),
expected=dict(hostaddrs=['localhost:9092']))),
('host', dict(url='kafka://127.0.0.1',
expected=[dict(host='127.0.0.1', port=9092)])),
expected=dict(hostaddrs=['127.0.0.1:9092']))),
('port', dict(url='kafka://localhost:1234',
expected=[dict(host='localhost', port=1234)])),
expected=dict(hostaddrs=['localhost:1234']))),
('two', dict(url='kafka://localhost:1234,localhost2:1234',
expected=dict(hostaddrs=['localhost:1234',
'localhost2:1234']))),
]
def setUp(self):
@ -76,8 +80,7 @@ class TestKafkaTransportURL(test_utils.BaseTestCase):
driver = transport._driver
conn = driver._get_connection(kafka_driver.PURPOSE_SEND)
self.assertEqual(self.expected[0]['host'], conn.host)
self.assertEqual(self.expected[0]['port'], conn.port)
self.assertEqual(self.expected['hostaddrs'], conn.hostaddrs)
class TestKafkaDriver(test_utils.BaseTestCase):

@ -321,6 +321,51 @@ class TestRabbitConsume(test_utils.BaseTestCase):
self.assertEqual(0, int(deadline - time.time()))
def test_consume_from_missing_queue(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
conn.connection.connection.recoverable_connection_errors = ()
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.transport.drain_events = mock.Mock()
# Ensure that a queue will be re-declared if the consume method
# of kombu.Queue raise amqp.NotFound
conn.consume()
self.assertEqual(2, declare.call_count)
def test_consume_from_missing_queue_with_io_error_on_redeclaration(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
declare.side_effect = [IOError, None]
conn.connection.connection.recoverable_connection_errors = (
IOError,)
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.transport.drain_events = mock.Mock()
# Ensure that a queue will be re-declared after
# 'queue not found' exception despite on connection error.
conn.consume()
self.assertEqual(3, declare.call_count)
def test_connection_ack_have_disconnected_kombu_connection(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
@ -451,14 +496,6 @@ class TestSendReceive(test_utils.BaseTestCase):
senders = []
replies = []
msgs = []
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(driver_common.LOG, 'error', stub_error)
def send_and_wait_for_reply(i):
try:
@ -500,8 +537,7 @@ class TestSendReceive(test_utils.BaseTestCase):
raise ZeroDivisionError
except Exception:
failure = sys.exc_info()
msgs[i].reply(failure=failure,
log_failure=not self.expected)
msgs[i].reply(failure=failure)
elif self.rx_id:
msgs[i].reply({'rx_id': i})
else:
@ -519,11 +555,6 @@ class TestSendReceive(test_utils.BaseTestCase):
else:
self.assertEqual(self.reply, reply)
if not self.timeout and self.failure and not self.expected:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(0, len(errors), errors)
TestSendReceive.generate_scenarios()

@ -21,6 +21,7 @@ import testscenarios
import mock
import oslo_messaging
from oslo_messaging.rpc import server as rpc_server_module
from oslo_messaging import server as server_module
from oslo_messaging.tests import utils as test_utils
@ -326,6 +327,22 @@ class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin):
def ping(self, ctxt, arg):
raise ValueError(arg)
debugs = []
errors = []
def stub_debug(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):