Port to oslo.messaging
Move from oslo RPC to oslo.messaging. Implements: blueprint oslo-messaging Co-Authored-By: sdake@redhat.com Change-Id: I2d222c248dd2cd405b8ec35c4c8198ed001fb69f
This commit is contained in:
parent
91fddd5c45
commit
9090b988e6
@ -33,6 +33,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.common import config
|
from heat.common import config
|
||||||
|
from heat.common import messaging
|
||||||
from heat.common import notify
|
from heat.common import notify
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
@ -52,6 +53,7 @@ if __name__ == '__main__':
|
|||||||
'eventlet.wsgi.server=WARN',
|
'eventlet.wsgi.server=WARN',
|
||||||
]
|
]
|
||||||
logging.setup('heat')
|
logging.setup('heat')
|
||||||
|
messaging.setup()
|
||||||
|
|
||||||
app = config.load_paste_app()
|
app = config.load_paste_app()
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.common import config
|
from heat.common import config
|
||||||
|
from heat.common import messaging
|
||||||
from heat.common import notify
|
from heat.common import notify
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
@ -54,6 +55,7 @@ if __name__ == '__main__':
|
|||||||
'eventlet.wsgi.server=WARN',
|
'eventlet.wsgi.server=WARN',
|
||||||
]
|
]
|
||||||
logging.setup('heat')
|
logging.setup('heat')
|
||||||
|
messaging.setup()
|
||||||
|
|
||||||
app = config.load_paste_app()
|
app = config.load_paste_app()
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from heat.common import config
|
from heat.common import config
|
||||||
|
from heat.common import messaging
|
||||||
from heat.common import notify
|
from heat.common import notify
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
@ -54,6 +55,7 @@ if __name__ == '__main__':
|
|||||||
'eventlet.wsgi.server=WARN',
|
'eventlet.wsgi.server=WARN',
|
||||||
]
|
]
|
||||||
logging.setup('heat')
|
logging.setup('heat')
|
||||||
|
messaging.setup()
|
||||||
|
|
||||||
app = config.load_paste_app()
|
app = config.load_paste_app()
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from heat.common import messaging
|
||||||
from heat.common import notify
|
from heat.common import notify
|
||||||
from heat.openstack.common import gettextutils
|
from heat.openstack.common import gettextutils
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
@ -55,6 +56,7 @@ if __name__ == '__main__':
|
|||||||
'eventlet.wsgi.server=WARN',
|
'eventlet.wsgi.server=WARN',
|
||||||
]
|
]
|
||||||
logging.setup('heat')
|
logging.setup('heat')
|
||||||
|
messaging.setup()
|
||||||
|
|
||||||
from heat.engine import service as engine
|
from heat.engine import service as engine
|
||||||
|
|
||||||
|
@ -139,6 +139,200 @@
|
|||||||
#max_json_body_size=1048576
|
#max_json_body_size=1048576
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Options defined in oslo.messaging
|
||||||
|
#
|
||||||
|
|
||||||
|
# Use durable queues in amqp. (boolean value)
|
||||||
|
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
||||||
|
#amqp_durable_queues=false
|
||||||
|
|
||||||
|
# Auto-delete queues in amqp. (boolean value)
|
||||||
|
#amqp_auto_delete=false
|
||||||
|
|
||||||
|
# Size of RPC connection pool. (integer value)
|
||||||
|
#rpc_conn_pool_size=30
|
||||||
|
|
||||||
|
# Modules of exceptions that are permitted to be recreated
|
||||||
|
# upon receiving exception data from an rpc call. (list value)
|
||||||
|
#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
|
||||||
|
|
||||||
|
# Qpid broker hostname. (string value)
|
||||||
|
#qpid_hostname=localhost
|
||||||
|
|
||||||
|
# Qpid broker port. (integer value)
|
||||||
|
#qpid_port=5672
|
||||||
|
|
||||||
|
# Qpid HA cluster host:port pairs. (list value)
|
||||||
|
#qpid_hosts=$qpid_hostname:$qpid_port
|
||||||
|
|
||||||
|
# Username for Qpid connection. (string value)
|
||||||
|
#qpid_username=
|
||||||
|
|
||||||
|
# Password for Qpid connection. (string value)
|
||||||
|
#qpid_password=
|
||||||
|
|
||||||
|
# Space separated list of SASL mechanisms to use for auth.
|
||||||
|
# (string value)
|
||||||
|
#qpid_sasl_mechanisms=
|
||||||
|
|
||||||
|
# Seconds between connection keepalive heartbeats. (integer
|
||||||
|
# value)
|
||||||
|
#qpid_heartbeat=60
|
||||||
|
|
||||||
|
# Transport to use, either 'tcp' or 'ssl'. (string value)
|
||||||
|
#qpid_protocol=tcp
|
||||||
|
|
||||||
|
# Whether to disable the Nagle algorithm. (boolean value)
|
||||||
|
#qpid_tcp_nodelay=true
|
||||||
|
|
||||||
|
# The qpid topology version to use. Version 1 is what was
|
||||||
|
# originally used by impl_qpid. Version 2 includes some
|
||||||
|
# backwards-incompatible changes that allow broker federation
|
||||||
|
# to work. Users should update to version 2 when they are
|
||||||
|
# able to take everything down, as it requires a clean break.
|
||||||
|
# (integer value)
|
||||||
|
#qpid_topology_version=1
|
||||||
|
|
||||||
|
# SSL version to use (valid only if SSL enabled). valid values
|
||||||
|
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
|
||||||
|
# distributions. (string value)
|
||||||
|
#kombu_ssl_version=
|
||||||
|
|
||||||
|
# SSL key file (valid only if SSL enabled). (string value)
|
||||||
|
#kombu_ssl_keyfile=
|
||||||
|
|
||||||
|
# SSL cert file (valid only if SSL enabled). (string value)
|
||||||
|
#kombu_ssl_certfile=
|
||||||
|
|
||||||
|
# SSL certification authority file (valid only if SSL
|
||||||
|
# enabled). (string value)
|
||||||
|
#kombu_ssl_ca_certs=
|
||||||
|
|
||||||
|
# How long to wait before reconnecting in response to an AMQP
|
||||||
|
# consumer cancel notification. (floating point value)
|
||||||
|
#kombu_reconnect_delay=1.0
|
||||||
|
|
||||||
|
# The RabbitMQ broker address where a single node is used.
|
||||||
|
# (string value)
|
||||||
|
#rabbit_host=localhost
|
||||||
|
|
||||||
|
# The RabbitMQ broker port where a single node is used.
|
||||||
|
# (integer value)
|
||||||
|
#rabbit_port=5672
|
||||||
|
|
||||||
|
# RabbitMQ HA cluster host:port pairs. (list value)
|
||||||
|
#rabbit_hosts=$rabbit_host:$rabbit_port
|
||||||
|
|
||||||
|
# Connect over SSL for RabbitMQ. (boolean value)
|
||||||
|
#rabbit_use_ssl=false
|
||||||
|
|
||||||
|
# The RabbitMQ userid. (string value)
|
||||||
|
#rabbit_userid=guest
|
||||||
|
|
||||||
|
# The RabbitMQ password. (string value)
|
||||||
|
#rabbit_password=guest
|
||||||
|
|
||||||
|
# the RabbitMQ login method (string value)
|
||||||
|
#rabbit_login_method=AMQPLAIN
|
||||||
|
|
||||||
|
# The RabbitMQ virtual host. (string value)
|
||||||
|
#rabbit_virtual_host=/
|
||||||
|
|
||||||
|
# How frequently to retry connecting with RabbitMQ. (integer
|
||||||
|
# value)
|
||||||
|
#rabbit_retry_interval=1
|
||||||
|
|
||||||
|
# How long to backoff for between retries when connecting to
|
||||||
|
# RabbitMQ. (integer value)
|
||||||
|
#rabbit_retry_backoff=2
|
||||||
|
|
||||||
|
# Maximum number of RabbitMQ connection retries. Default is 0
|
||||||
|
# (infinite retry count). (integer value)
|
||||||
|
#rabbit_max_retries=0
|
||||||
|
|
||||||
|
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
|
||||||
|
# this option, you must wipe the RabbitMQ database. (boolean
|
||||||
|
# value)
|
||||||
|
#rabbit_ha_queues=false
|
||||||
|
|
||||||
|
# If passed, use a fake RabbitMQ provider. (boolean value)
|
||||||
|
#fake_rabbit=false
|
||||||
|
|
||||||
|
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
||||||
|
# interface, or IP. The "host" option should point or resolve
|
||||||
|
# to this address. (string value)
|
||||||
|
#rpc_zmq_bind_address=*
|
||||||
|
|
||||||
|
# MatchMaker driver. (string value)
|
||||||
|
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
|
||||||
|
|
||||||
|
# ZeroMQ receiver listening port. (integer value)
|
||||||
|
#rpc_zmq_port=9501
|
||||||
|
|
||||||
|
# Number of ZeroMQ contexts, defaults to 1. (integer value)
|
||||||
|
#rpc_zmq_contexts=1
|
||||||
|
|
||||||
|
# Maximum number of ingress messages to locally buffer per
|
||||||
|
# topic. Default is unlimited. (integer value)
|
||||||
|
#rpc_zmq_topic_backlog=<None>
|
||||||
|
|
||||||
|
# Directory for holding IPC sockets. (string value)
|
||||||
|
#rpc_zmq_ipc_dir=/var/run/openstack
|
||||||
|
|
||||||
|
# Name of this node. Must be a valid hostname, FQDN, or IP
|
||||||
|
# address. Must match "host" option, if running Nova. (string
|
||||||
|
# value)
|
||||||
|
#rpc_zmq_host=heat
|
||||||
|
|
||||||
|
# Seconds to wait before a cast expires (TTL). Only supported
|
||||||
|
# by impl_zmq. (integer value)
|
||||||
|
#rpc_cast_timeout=30
|
||||||
|
|
||||||
|
# Heartbeat frequency. (integer value)
|
||||||
|
#matchmaker_heartbeat_freq=300
|
||||||
|
|
||||||
|
# Heartbeat time-to-live. (integer value)
|
||||||
|
#matchmaker_heartbeat_ttl=600
|
||||||
|
|
||||||
|
# Host to locate redis. (string value)
|
||||||
|
#host=127.0.0.1
|
||||||
|
|
||||||
|
# Use this port to connect to redis host. (integer value)
|
||||||
|
#port=6379
|
||||||
|
|
||||||
|
# Password for Redis server (optional). (string value)
|
||||||
|
#password=<None>
|
||||||
|
|
||||||
|
# Size of RPC greenthread pool. (integer value)
|
||||||
|
#rpc_thread_pool_size=64
|
||||||
|
|
||||||
|
# Driver or drivers to handle sending notifications. (multi
|
||||||
|
# valued)
|
||||||
|
#notification_driver=
|
||||||
|
|
||||||
|
# AMQP topic used for OpenStack notifications. (list value)
|
||||||
|
# Deprecated group/name - [rpc_notifier2]/topics
|
||||||
|
#notification_topics=notifications
|
||||||
|
|
||||||
|
# Seconds to wait for a response from a call. (integer value)
|
||||||
|
#rpc_response_timeout=60
|
||||||
|
|
||||||
|
# A URL representing the messaging driver to use and its full
|
||||||
|
# configuration. If not set, we fall back to the rpc_backend
|
||||||
|
# option and driver specific configuration. (string value)
|
||||||
|
#transport_url=<None>
|
||||||
|
|
||||||
|
# The messaging driver to use, defaults to rabbit. Other
|
||||||
|
# drivers include qpid and zmq. (string value)
|
||||||
|
#rpc_backend=rabbit
|
||||||
|
|
||||||
|
# The default exchange under which topics are scoped. May be
|
||||||
|
# overridden by an exchange name specified in the
|
||||||
|
# transport_url option. (string value)
|
||||||
|
#control_exchange=openstack
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in heat.api.middleware.ssl
|
# Options defined in heat.api.middleware.ssl
|
||||||
#
|
#
|
||||||
@ -176,6 +370,23 @@
|
|||||||
#cloud_backend=heat.engine.clients.OpenStackClients
|
#cloud_backend=heat.engine.clients.OpenStackClients
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Options defined in heat.engine.notification
|
||||||
|
#
|
||||||
|
|
||||||
|
# Default notification level for outgoing notifications
|
||||||
|
# (string value)
|
||||||
|
#default_notification_level=INFO
|
||||||
|
|
||||||
|
# Default publisher_id for outgoing notifications (string
|
||||||
|
# value)
|
||||||
|
#default_publisher_id=<None>
|
||||||
|
|
||||||
|
# List of drivers to send notifications (DEPRECATED) (multi
|
||||||
|
# valued)
|
||||||
|
#list_notifier_drivers=<None>
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in heat.engine.resources.loadbalancer
|
# Options defined in heat.engine.resources.loadbalancer
|
||||||
#
|
#
|
||||||
@ -305,39 +516,6 @@
|
|||||||
#syslog_log_facility=LOG_USER
|
#syslog_log_facility=LOG_USER
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.notifier.api
|
|
||||||
#
|
|
||||||
|
|
||||||
# Driver or drivers to handle sending notifications (multi
|
|
||||||
# valued)
|
|
||||||
#notification_driver=
|
|
||||||
|
|
||||||
# Default notification level for outgoing notifications
|
|
||||||
# (string value)
|
|
||||||
#default_notification_level=INFO
|
|
||||||
|
|
||||||
# Default publisher_id for outgoing notifications (string
|
|
||||||
# value)
|
|
||||||
#default_publisher_id=<None>
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.notifier.list_notifier
|
|
||||||
#
|
|
||||||
|
|
||||||
# List of drivers to send notifications (multi valued)
|
|
||||||
#list_notifier_drivers=heat.openstack.common.notifier.no_op_notifier
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.notifier.rpc_notifier
|
|
||||||
#
|
|
||||||
|
|
||||||
# AMQP topic used for OpenStack notifications (list value)
|
|
||||||
#notification_topics=notifications
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in heat.openstack.common.policy
|
# Options defined in heat.openstack.common.policy
|
||||||
#
|
#
|
||||||
@ -350,200 +528,6 @@
|
|||||||
#policy_default_rule=default
|
#policy_default_rule=default
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc
|
|
||||||
#
|
|
||||||
|
|
||||||
# The messaging module to use, defaults to kombu. (string
|
|
||||||
# value)
|
|
||||||
#rpc_backend=heat.openstack.common.rpc.impl_kombu
|
|
||||||
|
|
||||||
# Size of RPC thread pool (integer value)
|
|
||||||
#rpc_thread_pool_size=64
|
|
||||||
|
|
||||||
# Size of RPC connection pool (integer value)
|
|
||||||
#rpc_conn_pool_size=30
|
|
||||||
|
|
||||||
# Seconds to wait for a response from call or multicall
|
|
||||||
# (integer value)
|
|
||||||
#rpc_response_timeout=60
|
|
||||||
|
|
||||||
# Seconds to wait before a cast expires (TTL). Only supported
|
|
||||||
# by impl_zmq. (integer value)
|
|
||||||
#rpc_cast_timeout=30
|
|
||||||
|
|
||||||
# Modules of exceptions that are permitted to be recreated
|
|
||||||
# upon receiving exception data from an rpc call. (list value)
|
|
||||||
#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
|
|
||||||
|
|
||||||
# If passed, use a fake RabbitMQ provider (boolean value)
|
|
||||||
#fake_rabbit=false
|
|
||||||
|
|
||||||
# AMQP exchange to connect to if using RabbitMQ or Qpid
|
|
||||||
# (string value)
|
|
||||||
#control_exchange=heat
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.amqp
|
|
||||||
#
|
|
||||||
|
|
||||||
# Use durable queues in amqp. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
|
||||||
#amqp_durable_queues=false
|
|
||||||
|
|
||||||
# Auto-delete queues in amqp. (boolean value)
|
|
||||||
#amqp_auto_delete=false
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.impl_kombu
|
|
||||||
#
|
|
||||||
|
|
||||||
# If SSL is enabled, the SSL version to use. Valid values are
|
|
||||||
# TLSv1, SSLv23 and SSLv3. SSLv2 might be available on some
|
|
||||||
# distributions. (string value)
|
|
||||||
#kombu_ssl_version=
|
|
||||||
|
|
||||||
# SSL key file (valid only if SSL enabled) (string value)
|
|
||||||
#kombu_ssl_keyfile=
|
|
||||||
|
|
||||||
# SSL cert file (valid only if SSL enabled) (string value)
|
|
||||||
#kombu_ssl_certfile=
|
|
||||||
|
|
||||||
# SSL certification authority file (valid only if SSL enabled)
|
|
||||||
# (string value)
|
|
||||||
#kombu_ssl_ca_certs=
|
|
||||||
|
|
||||||
# How long to wait before reconnecting in response to an AMQP
|
|
||||||
# consumer cancel notification. (floating point value)
|
|
||||||
#kombu_reconnect_delay=1.0
|
|
||||||
|
|
||||||
# The RabbitMQ broker address where a single node is used
|
|
||||||
# (string value)
|
|
||||||
#rabbit_host=localhost
|
|
||||||
|
|
||||||
# The RabbitMQ broker port where a single node is used
|
|
||||||
# (integer value)
|
|
||||||
#rabbit_port=5672
|
|
||||||
|
|
||||||
# RabbitMQ HA cluster host:port pairs (list value)
|
|
||||||
#rabbit_hosts=$rabbit_host:$rabbit_port
|
|
||||||
|
|
||||||
# Connect over SSL for RabbitMQ (boolean value)
|
|
||||||
#rabbit_use_ssl=false
|
|
||||||
|
|
||||||
# The RabbitMQ userid (string value)
|
|
||||||
#rabbit_userid=guest
|
|
||||||
|
|
||||||
# The RabbitMQ password (string value)
|
|
||||||
#rabbit_password=guest
|
|
||||||
|
|
||||||
# The RabbitMQ virtual host (string value)
|
|
||||||
#rabbit_virtual_host=/
|
|
||||||
|
|
||||||
# How frequently to retry connecting with RabbitMQ (integer
|
|
||||||
# value)
|
|
||||||
#rabbit_retry_interval=1
|
|
||||||
|
|
||||||
# How long to backoff for between retries when connecting to
|
|
||||||
# RabbitMQ (integer value)
|
|
||||||
#rabbit_retry_backoff=2
|
|
||||||
|
|
||||||
# Maximum number of RabbitMQ connection retries. Default is 0
|
|
||||||
# (infinite retry count) (integer value)
|
|
||||||
#rabbit_max_retries=0
|
|
||||||
|
|
||||||
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
|
|
||||||
# this option, you must wipe the RabbitMQ database. (boolean
|
|
||||||
# value)
|
|
||||||
#rabbit_ha_queues=false
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.impl_qpid
|
|
||||||
#
|
|
||||||
|
|
||||||
# Qpid broker hostname (string value)
|
|
||||||
#qpid_hostname=localhost
|
|
||||||
|
|
||||||
# Qpid broker port (integer value)
|
|
||||||
#qpid_port=5672
|
|
||||||
|
|
||||||
# Qpid HA cluster host:port pairs (list value)
|
|
||||||
#qpid_hosts=$qpid_hostname:$qpid_port
|
|
||||||
|
|
||||||
# Username for qpid connection (string value)
|
|
||||||
#qpid_username=
|
|
||||||
|
|
||||||
# Password for qpid connection (string value)
|
|
||||||
#qpid_password=
|
|
||||||
|
|
||||||
# Space separated list of SASL mechanisms to use for auth
|
|
||||||
# (string value)
|
|
||||||
#qpid_sasl_mechanisms=
|
|
||||||
|
|
||||||
# Seconds between connection keepalive heartbeats (integer
|
|
||||||
# value)
|
|
||||||
#qpid_heartbeat=60
|
|
||||||
|
|
||||||
# Transport to use, either 'tcp' or 'ssl' (string value)
|
|
||||||
#qpid_protocol=tcp
|
|
||||||
|
|
||||||
# Disable Nagle algorithm (boolean value)
|
|
||||||
#qpid_tcp_nodelay=true
|
|
||||||
|
|
||||||
# The qpid topology version to use. Version 1 is what was
|
|
||||||
# originally used by impl_qpid. Version 2 includes some
|
|
||||||
# backwards-incompatible changes that allow broker federation
|
|
||||||
# to work. Users should update to version 2 when they are
|
|
||||||
# able to take everything down, as it requires a clean break.
|
|
||||||
# (integer value)
|
|
||||||
#qpid_topology_version=1
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.impl_zmq
|
|
||||||
#
|
|
||||||
|
|
||||||
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
|
||||||
# interface, or IP. The "host" option should point or resolve
|
|
||||||
# to this address. (string value)
|
|
||||||
#rpc_zmq_bind_address=*
|
|
||||||
|
|
||||||
# MatchMaker driver (string value)
|
|
||||||
#rpc_zmq_matchmaker=heat.openstack.common.rpc.matchmaker.MatchMakerLocalhost
|
|
||||||
|
|
||||||
# ZeroMQ receiver listening port (integer value)
|
|
||||||
#rpc_zmq_port=9501
|
|
||||||
|
|
||||||
# Number of ZeroMQ contexts, defaults to 1 (integer value)
|
|
||||||
#rpc_zmq_contexts=1
|
|
||||||
|
|
||||||
# Maximum number of ingress messages to locally buffer per
|
|
||||||
# topic. Default is unlimited. (integer value)
|
|
||||||
#rpc_zmq_topic_backlog=<None>
|
|
||||||
|
|
||||||
# Directory for holding IPC sockets (string value)
|
|
||||||
#rpc_zmq_ipc_dir=/var/run/openstack
|
|
||||||
|
|
||||||
# Name of this node. Must be a valid hostname, FQDN, or IP
|
|
||||||
# address. Must match "host" option, if running Nova. (string
|
|
||||||
# value)
|
|
||||||
#rpc_zmq_host=heat
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.matchmaker
|
|
||||||
#
|
|
||||||
|
|
||||||
# Heartbeat frequency (integer value)
|
|
||||||
#matchmaker_heartbeat_freq=300
|
|
||||||
|
|
||||||
# Heartbeat time-to-live. (integer value)
|
|
||||||
#matchmaker_heartbeat_ttl=600
|
|
||||||
|
|
||||||
|
|
||||||
[auth_password]
|
[auth_password]
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -1226,29 +1210,13 @@
|
|||||||
#hash_algorithms=md5
|
#hash_algorithms=md5
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_redis]
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.rpc.matchmaker_redis
|
|
||||||
#
|
|
||||||
|
|
||||||
# Host to locate redis (string value)
|
|
||||||
#host=127.0.0.1
|
|
||||||
|
|
||||||
# Use this port to connect to redis host. (integer value)
|
|
||||||
#port=6379
|
|
||||||
|
|
||||||
# Password for Redis server. (optional) (string value)
|
|
||||||
#password=<None>
|
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_ring]
|
[matchmaker_ring]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in heat.openstack.common.rpc.matchmaker_ring
|
# Options defined in oslo.messaging
|
||||||
#
|
#
|
||||||
|
|
||||||
# Matchmaker ring file (JSON) (string value)
|
# Matchmaker ring file (JSON). (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
|
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
|
||||||
#ringfile=/etc/oslo/matchmaker_ring.json
|
#ringfile=/etc/oslo/matchmaker_ring.json
|
||||||
|
|
||||||
@ -1279,13 +1247,3 @@
|
|||||||
#heat_revision=unknown
|
#heat_revision=unknown
|
||||||
|
|
||||||
|
|
||||||
[rpc_notifier2]
|
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.openstack.common.notifier.rpc_notifier2
|
|
||||||
#
|
|
||||||
|
|
||||||
# AMQP topic(s) used for OpenStack notifications (list value)
|
|
||||||
#topics=notifications
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ import webob.exc
|
|||||||
|
|
||||||
from heat.common import serializers
|
from heat.common import serializers
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
|
|
||||||
class HeatAPIException(webob.exc.HTTPError):
|
class HeatAPIException(webob.exc.HTTPError):
|
||||||
@ -262,7 +261,7 @@ class HeatActionInProgressError(HeatAPIException):
|
|||||||
|
|
||||||
def map_remote_error(ex):
|
def map_remote_error(ex):
|
||||||
"""
|
"""
|
||||||
Map rpc_common.RemoteError exceptions returned by the engine
|
Map RemoteError exceptions returned by the engine
|
||||||
to HeatAPIException subclasses which can be used to return
|
to HeatAPIException subclasses which can be used to return
|
||||||
properly formatted AWS error responses
|
properly formatted AWS error responses
|
||||||
"""
|
"""
|
||||||
@ -290,8 +289,8 @@ def map_remote_error(ex):
|
|||||||
|
|
||||||
ex_type = ex.__class__.__name__
|
ex_type = ex.__class__.__name__
|
||||||
|
|
||||||
if ex_type.endswith(rpc_common._REMOTE_POSTFIX):
|
if ex_type.endswith('_Remote'):
|
||||||
ex_type = ex_type[:-len(rpc_common._REMOTE_POSTFIX)]
|
ex_type = ex_type[:-len('_Remote')]
|
||||||
|
|
||||||
if ex_type in inval_param_errors:
|
if ex_type in inval_param_errors:
|
||||||
return HeatInvalidParameterValueError(detail=six.text_type(ex))
|
return HeatInvalidParameterValueError(detail=six.text_type(ex))
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
"""
|
"""
|
||||||
endpoint for heat AWS-compatible CloudWatch API
|
endpoint for heat AWS-compatible CloudWatch API
|
||||||
"""
|
"""
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from heat.api.aws import exception
|
from heat.api.aws import exception
|
||||||
from heat.api.aws import utils as api_utils
|
from heat.api.aws import utils as api_utils
|
||||||
from heat.common import exception as heat_exception
|
from heat.common import exception as heat_exception
|
||||||
@ -21,7 +23,6 @@ from heat.common import policy
|
|||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.rpc import api as engine_api
|
from heat.rpc import api as engine_api
|
||||||
from heat.rpc import client as rpc_client
|
from heat.rpc import client as rpc_client
|
||||||
|
|
||||||
@ -137,7 +138,7 @@ class WatchController(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
watch_list = self.rpc_client.show_watch(con, watch_name=name)
|
watch_list = self.rpc_client.show_watch(con, watch_name=name)
|
||||||
except rpc_common.RemoteError as ex:
|
except messaging.RemoteError as ex:
|
||||||
return exception.map_remote_error(ex)
|
return exception.map_remote_error(ex)
|
||||||
|
|
||||||
res = {'MetricAlarms': [format_metric_alarm(a)
|
res = {'MetricAlarms': [format_metric_alarm(a)
|
||||||
@ -229,7 +230,7 @@ class WatchController(object):
|
|||||||
'metric_name': None}
|
'metric_name': None}
|
||||||
watch_data = self.rpc_client.show_watch_metric(con,
|
watch_data = self.rpc_client.show_watch_metric(con,
|
||||||
**null_kwargs)
|
**null_kwargs)
|
||||||
except rpc_common.RemoteError as ex:
|
except messaging.RemoteError as ex:
|
||||||
return exception.map_remote_error(ex)
|
return exception.map_remote_error(ex)
|
||||||
|
|
||||||
res = {'Metrics': []}
|
res = {'Metrics': []}
|
||||||
@ -292,7 +293,7 @@ class WatchController(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
self.rpc_client.create_watch_data(con, watch_name, data)
|
self.rpc_client.create_watch_data(con, watch_name, data)
|
||||||
except rpc_common.RemoteError as ex:
|
except messaging.RemoteError as ex:
|
||||||
return exception.map_remote_error(ex)
|
return exception.map_remote_error(ex)
|
||||||
|
|
||||||
result = {'ResponseMetadata': None}
|
result = {'ResponseMetadata': None}
|
||||||
@ -329,7 +330,7 @@ class WatchController(object):
|
|||||||
try:
|
try:
|
||||||
self.rpc_client.set_watch_state(con, watch_name=name,
|
self.rpc_client.set_watch_state(con, watch_name=name,
|
||||||
state=state_map[state])
|
state=state_map[state])
|
||||||
except rpc_common.RemoteError as ex:
|
except messaging.RemoteError as ex:
|
||||||
return exception.map_remote_error(ex)
|
return exception.map_remote_error(ex)
|
||||||
|
|
||||||
return api_utils.format_response("SetAlarmState", "")
|
return api_utils.format_response("SetAlarmState", "")
|
||||||
|
@ -25,11 +25,9 @@ import traceback
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.common import serializers
|
from heat.common import serializers
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
|
|
||||||
cfg.CONF.import_opt('debug', 'heat.openstack.common.log')
|
cfg.CONF.import_opt('debug', 'heat.openstack.common.log')
|
||||||
@ -108,8 +106,8 @@ class FaultWrapper(wsgi.Middleware):
|
|||||||
|
|
||||||
ex_type = ex.__class__.__name__
|
ex_type = ex.__class__.__name__
|
||||||
|
|
||||||
if ex_type.endswith(rpc_common._REMOTE_POSTFIX):
|
if ex_type.endswith('_Remote'):
|
||||||
ex_type = ex_type[:-len(rpc_common._REMOTE_POSTFIX)]
|
ex_type = ex_type[:-len('_Remote')]
|
||||||
|
|
||||||
full_message = unicode(ex)
|
full_message = unicode(ex)
|
||||||
if full_message.find('\n') > -1:
|
if full_message.find('\n') > -1:
|
||||||
|
@ -22,7 +22,6 @@ from oslo.config import cfg
|
|||||||
|
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common import rpc
|
|
||||||
|
|
||||||
paste_deploy_group = cfg.OptGroup('paste_deploy')
|
paste_deploy_group = cfg.OptGroup('paste_deploy')
|
||||||
paste_deploy_opts = [
|
paste_deploy_opts = [
|
||||||
@ -214,20 +213,6 @@ cfg.CONF.register_group(revision_group)
|
|||||||
for group, opts in list_opts():
|
for group, opts in list_opts():
|
||||||
cfg.CONF.register_opts(opts, group=group)
|
cfg.CONF.register_opts(opts, group=group)
|
||||||
|
|
||||||
rpc.set_defaults(control_exchange='heat')
|
|
||||||
|
|
||||||
|
|
||||||
# A bit of history:
|
|
||||||
# This was added initially by jianingy, then it got added
|
|
||||||
# to oslo by Luis. Then it was receintly removed from the
|
|
||||||
# default list again.
|
|
||||||
# I am not sure we can (or should) rely on oslo to keep
|
|
||||||
# our exceptions class in the defaults list.
|
|
||||||
allowed_rpc_exception_modules = cfg.CONF.allowed_rpc_exception_modules
|
|
||||||
allowed_rpc_exception_modules.append('heat.common.exception')
|
|
||||||
cfg.CONF.set_default(name='allowed_rpc_exception_modules',
|
|
||||||
default=allowed_rpc_exception_modules)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_deployment_flavor():
|
def _get_deployment_flavor():
|
||||||
"""
|
"""
|
||||||
|
118
heat/common/messaging.py
Normal file
118
heat/common/messaging.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2013 eNovance <licensing@enovance.com>
|
||||||
|
#
|
||||||
|
# Author: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from oslo.config import cfg
|
||||||
|
import oslo.messaging
|
||||||
|
|
||||||
|
from heat.common import context
|
||||||
|
from heat.openstack.common import jsonutils
|
||||||
|
|
||||||
|
|
||||||
|
TRANSPORT = None
|
||||||
|
NOTIFIER = None
|
||||||
|
|
||||||
|
_ALIASES = {
|
||||||
|
'heat.openstack.common.rpc.impl_kombu': 'rabbit',
|
||||||
|
'heat.openstack.common.rpc.impl_qpid': 'qpid',
|
||||||
|
'heat.openstack.common.rpc.impl_zmq': 'zmq',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContextSerializer(oslo.messaging.Serializer):
|
||||||
|
def __init__(self, base):
|
||||||
|
self._base = base
|
||||||
|
|
||||||
|
def serialize_entity(self, ctxt, entity):
|
||||||
|
if not self._base:
|
||||||
|
return entity
|
||||||
|
return self._base.serialize_entity(ctxt, entity)
|
||||||
|
|
||||||
|
def deserialize_entity(self, ctxt, entity):
|
||||||
|
if not self._base:
|
||||||
|
return entity
|
||||||
|
return self._base.deserialize_entity(ctxt, entity)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def serialize_context(ctxt):
|
||||||
|
return ctxt.to_dict()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def deserialize_context(ctxt):
|
||||||
|
return context.RequestContext.from_dict(ctxt)
|
||||||
|
|
||||||
|
|
||||||
|
class JsonPayloadSerializer(oslo.messaging.NoOpSerializer):
|
||||||
|
@classmethod
|
||||||
|
def serialize_entity(cls, context, entity):
|
||||||
|
return jsonutils.to_primitive(entity, convert_instances=True)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(url=None, optional=False):
|
||||||
|
"""Initialise the oslo.messaging layer."""
|
||||||
|
global TRANSPORT, NOTIFIER
|
||||||
|
|
||||||
|
if url and url.startswith("fake://"):
|
||||||
|
# NOTE(sileht): oslo.messaging fake driver uses time.sleep
|
||||||
|
# for task switch, so we need to monkey_patch it
|
||||||
|
eventlet.monkey_patch(time=True)
|
||||||
|
|
||||||
|
if not TRANSPORT:
|
||||||
|
oslo.messaging.set_transport_defaults('heat')
|
||||||
|
exmods = ['heat.common.exception']
|
||||||
|
try:
|
||||||
|
TRANSPORT = oslo.messaging.get_transport(
|
||||||
|
cfg.CONF, url, allowed_remote_exmods=exmods, aliases=_ALIASES)
|
||||||
|
except oslo.messaging.InvalidTransportURL as e:
|
||||||
|
TRANSPORT = None
|
||||||
|
if not optional or e.url:
|
||||||
|
# NOTE(sileht): oslo.messaging is configured but unloadable
|
||||||
|
# so reraise the exception
|
||||||
|
raise
|
||||||
|
|
||||||
|
if not NOTIFIER and TRANSPORT:
|
||||||
|
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||||
|
NOTIFIER = oslo.messaging.Notifier(TRANSPORT, serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
"""Cleanup the oslo.messaging layer."""
|
||||||
|
global TRANSPORT, NOTIFIER
|
||||||
|
if TRANSPORT:
|
||||||
|
TRANSPORT.cleanup()
|
||||||
|
TRANSPORT = NOTIFIER = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_rpc_server(target, endpoint):
|
||||||
|
"""Return a configured oslo.messaging rpc server."""
|
||||||
|
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||||
|
return oslo.messaging.get_rpc_server(TRANSPORT, target, [endpoint],
|
||||||
|
executor='eventlet',
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_rpc_client(**kwargs):
|
||||||
|
"""Return a configured oslo.messaging RPCClient."""
|
||||||
|
target = oslo.messaging.Target(**kwargs)
|
||||||
|
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||||
|
return oslo.messaging.RPCClient(TRANSPORT, target,
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_notifier(publisher_id):
|
||||||
|
"""Return a configured oslo.messaging notifier."""
|
||||||
|
return NOTIFIER.prepare(publisher_id=publisher_id)
|
@ -13,22 +13,31 @@
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from heat.common import messaging
|
||||||
from heat.openstack.common import log
|
from heat.openstack.common import log
|
||||||
from heat.openstack.common.notifier import api as notifier_api
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
SERVICE = 'orchestration'
|
SERVICE = 'orchestration'
|
||||||
|
INFO = 'INFO'
|
||||||
|
ERROR = 'ERROR'
|
||||||
|
|
||||||
|
notifier_opts = [
|
||||||
|
cfg.StrOpt('default_notification_level',
|
||||||
|
default=INFO,
|
||||||
|
help='Default notification level for outgoing notifications'),
|
||||||
|
cfg.StrOpt('default_publisher_id',
|
||||||
|
help='Default publisher_id for outgoing notifications'),
|
||||||
|
cfg.MultiStrOpt('list_notifier_drivers',
|
||||||
|
help='List of drivers to send notifications (DEPRECATED)')
|
||||||
|
]
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('default_notification_level',
|
CONF.register_opts(notifier_opts)
|
||||||
'heat.openstack.common.notifier.api')
|
|
||||||
CONF.import_opt('default_publisher_id',
|
|
||||||
'heat.openstack.common.notifier.api')
|
|
||||||
|
|
||||||
|
|
||||||
def _get_default_publisher():
|
def _get_default_publisher():
|
||||||
publisher_id = CONF.default_publisher_id
|
publisher_id = CONF.default_publisher_id
|
||||||
if publisher_id is None:
|
if publisher_id is None:
|
||||||
publisher_id = notifier_api.publisher_id(SERVICE)
|
publisher_id = "%s.%s" % (SERVICE, CONF.host)
|
||||||
return publisher_id
|
return publisher_id
|
||||||
|
|
||||||
|
|
||||||
@ -37,7 +46,7 @@ def get_default_level():
|
|||||||
|
|
||||||
|
|
||||||
def notify(context, event_type, level, body):
|
def notify(context, event_type, level, body):
|
||||||
|
client = messaging.get_notifier(_get_default_publisher())
|
||||||
|
|
||||||
notifier_api.notify(context, _get_default_publisher(),
|
method = getattr(client, level.lower())
|
||||||
"%s.%s" % (SERVICE, event_type),
|
method(context, "%s.%s" % (SERVICE, event_type), body)
|
||||||
level, body)
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
from heat.engine import api as engine_api
|
from heat.engine import api as engine_api
|
||||||
from heat.engine import notification
|
from heat.engine import notification
|
||||||
from heat.openstack.common.notifier import api as notifier_api
|
|
||||||
|
|
||||||
|
|
||||||
def send(stack,
|
def send(stack,
|
||||||
@ -37,6 +36,6 @@ def send(stack,
|
|||||||
|
|
||||||
level = notification.get_default_level()
|
level = notification.get_default_level()
|
||||||
if suffix == 'error':
|
if suffix == 'error':
|
||||||
level = notifier_api.ERROR
|
level = notification.ERROR
|
||||||
|
|
||||||
notification.notify(stack.context, event_type, level, body)
|
notification.notify(stack.context, event_type, level, body)
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
from heat.engine import api as engine_api
|
from heat.engine import api as engine_api
|
||||||
from heat.engine import notification
|
from heat.engine import notification
|
||||||
from heat.openstack.common.notifier import api as notifier_api
|
|
||||||
|
|
||||||
|
|
||||||
def send(stack):
|
def send(stack):
|
||||||
@ -30,7 +29,7 @@ def send(stack):
|
|||||||
suffix = 'end'
|
suffix = 'end'
|
||||||
else:
|
else:
|
||||||
suffix = 'error'
|
suffix = 'error'
|
||||||
level = notifier_api.ERROR
|
level = notification.ERROR
|
||||||
|
|
||||||
event_type = '%s.%s.%s' % ('stack',
|
event_type = '%s.%s.%s' % ('stack',
|
||||||
stack.action.lower(),
|
stack.action.lower(),
|
||||||
|
@ -17,6 +17,7 @@ import json
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
import six
|
import six
|
||||||
import warnings
|
import warnings
|
||||||
import webob
|
import webob
|
||||||
@ -24,6 +25,7 @@ import webob
|
|||||||
from heat.common import context
|
from heat.common import context
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.common import identifier
|
from heat.common import identifier
|
||||||
|
from heat.common import messaging as rpc_messaging
|
||||||
from heat.db import api as db_api
|
from heat.db import api as db_api
|
||||||
from heat.engine import api
|
from heat.engine import api
|
||||||
from heat.engine import attributes
|
from heat.engine import attributes
|
||||||
@ -39,9 +41,7 @@ from heat.engine import stack_lock
|
|||||||
from heat.engine import watchrule
|
from heat.engine import watchrule
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
from heat.openstack.common import service
|
||||||
from heat.openstack.common.rpc import proxy
|
|
||||||
from heat.openstack.common.rpc import service
|
|
||||||
from heat.openstack.common import threadgroup
|
from heat.openstack.common import threadgroup
|
||||||
from heat.openstack.common import timeutils
|
from heat.openstack.common import timeutils
|
||||||
from heat.openstack.common import uuidutils
|
from heat.openstack.common import uuidutils
|
||||||
@ -62,7 +62,7 @@ def request_context(func):
|
|||||||
try:
|
try:
|
||||||
return func(self, ctx, *args, **kwargs)
|
return func(self, ctx, *args, **kwargs)
|
||||||
except exception.HeatException:
|
except exception.HeatException:
|
||||||
raise rpc_common.ClientException()
|
raise messaging.rpc.dispatcher.ExpectedException()
|
||||||
return wrapped
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
@ -259,11 +259,17 @@ class EngineListener(service.Service):
|
|||||||
engines to communicate with each other for multi-engine support.
|
engines to communicate with each other for multi-engine support.
|
||||||
'''
|
'''
|
||||||
def __init__(self, host, engine_id, thread_group_mgr):
|
def __init__(self, host, engine_id, thread_group_mgr):
|
||||||
super(EngineListener, self).__init__(host, engine_id)
|
super(EngineListener, self).__init__()
|
||||||
|
|
||||||
self.thread_group_mgr = thread_group_mgr
|
self.thread_group_mgr = thread_group_mgr
|
||||||
self.engine_id = engine_id
|
self.engine_id = engine_id
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
super(EngineListener, self).start()
|
||||||
|
self.target = messaging.Target(
|
||||||
|
server=cfg.CONF.host, topic=self.engine_id)
|
||||||
|
server = rpc_messaging.get_rpc_server(self.target, self)
|
||||||
|
server.start()
|
||||||
|
|
||||||
def listening(self, ctxt):
|
def listening(self, ctxt):
|
||||||
'''
|
'''
|
||||||
Respond affirmatively to confirm that the engine performing the
|
Respond affirmatively to confirm that the engine performing the
|
||||||
@ -291,9 +297,10 @@ class EngineService(service.Service):
|
|||||||
RPC_API_VERSION = '1.1'
|
RPC_API_VERSION = '1.1'
|
||||||
|
|
||||||
def __init__(self, host, topic, manager=None):
|
def __init__(self, host, topic, manager=None):
|
||||||
super(EngineService, self).__init__(host, topic)
|
super(EngineService, self).__init__()
|
||||||
resources.initialise()
|
resources.initialise()
|
||||||
self.host = host
|
self.host = host
|
||||||
|
self.topic = topic
|
||||||
|
|
||||||
# The following are initialized here, but assigned in start() which
|
# The following are initialized here, but assigned in start() which
|
||||||
# happens after the fork when spawning multiple worker processes
|
# happens after the fork when spawning multiple worker processes
|
||||||
@ -301,6 +308,7 @@ class EngineService(service.Service):
|
|||||||
self.listener = None
|
self.listener = None
|
||||||
self.engine_id = None
|
self.engine_id = None
|
||||||
self.thread_group_mgr = None
|
self.thread_group_mgr = None
|
||||||
|
self.target = None
|
||||||
|
|
||||||
if cfg.CONF.instance_user:
|
if cfg.CONF.instance_user:
|
||||||
warnings.warn('The "instance_user" option in heat.conf is '
|
warnings.warn('The "instance_user" option in heat.conf is '
|
||||||
@ -322,13 +330,21 @@ class EngineService(service.Service):
|
|||||||
self.stack_watch.start_watch_task(s.id, admin_context)
|
self.stack_watch.start_watch_task(s.id, admin_context)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.thread_group_mgr = ThreadGroupManager()
|
|
||||||
self.engine_id = stack_lock.StackLock.generate_engine_id()
|
self.engine_id = stack_lock.StackLock.generate_engine_id()
|
||||||
|
self.thread_group_mgr = ThreadGroupManager()
|
||||||
self.listener = EngineListener(self.host, self.engine_id,
|
self.listener = EngineListener(self.host, self.engine_id,
|
||||||
self.thread_group_mgr)
|
self.thread_group_mgr)
|
||||||
LOG.debug("Starting listener for engine %s pid=%s, ppid=%s" %
|
LOG.debug("Starting listener for engine %s" % self.engine_id)
|
||||||
(self.engine_id, os.getpid(), os.getppid()))
|
|
||||||
self.listener.start()
|
self.listener.start()
|
||||||
|
target = messaging.Target(
|
||||||
|
version=self.RPC_API_VERSION, server=cfg.CONF.host,
|
||||||
|
topic=self.topic)
|
||||||
|
self.target = target
|
||||||
|
server = rpc_messaging.get_rpc_server(target, self)
|
||||||
|
server.start()
|
||||||
|
self._client = rpc_messaging.get_rpc_client(
|
||||||
|
version=self.RPC_API_VERSION)
|
||||||
|
|
||||||
super(EngineService, self).start()
|
super(EngineService, self).start()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
@ -702,12 +718,16 @@ class EngineService(service.Service):
|
|||||||
:param stack_identity: Name of the stack you want to delete.
|
:param stack_identity: Name of the stack you want to delete.
|
||||||
"""
|
"""
|
||||||
def remote_stop(lock_engine_id):
|
def remote_stop(lock_engine_id):
|
||||||
rpc = proxy.RpcProxy(lock_engine_id, "1.0")
|
|
||||||
msg = rpc.make_msg("stop_stack", stack_identity=stack_identity)
|
|
||||||
timeout = cfg.CONF.engine_life_check_timeout
|
timeout = cfg.CONF.engine_life_check_timeout
|
||||||
|
self.cctxt = self._client.prepare(
|
||||||
|
version='1.0',
|
||||||
|
timeout=timeout,
|
||||||
|
topic=lock_engine_id)
|
||||||
try:
|
try:
|
||||||
rpc.call(cnxt, msg, topic=lock_engine_id, timeout=timeout)
|
self.cctxt.call(cnxt,
|
||||||
except rpc_common.Timeout:
|
'stop_stack',
|
||||||
|
stack_identity=stack_identity)
|
||||||
|
except messaging.MessagingTimeout:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
st = self._get_stack(cnxt, stack_identity)
|
st = self._get_stack(cnxt, stack_identity)
|
||||||
|
@ -15,14 +15,14 @@ import contextlib
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
|
from heat.common import messaging as rpc_messaging
|
||||||
from heat.db import api as db_api
|
from heat.db import api as db_api
|
||||||
from heat.openstack.common import excutils
|
from heat.openstack.common import excutils
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common.rpc import proxy
|
|
||||||
|
|
||||||
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
|
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
|
||||||
|
|
||||||
@ -38,13 +38,12 @@ class StackLock(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def engine_alive(context, engine_id):
|
def engine_alive(context, engine_id):
|
||||||
topic = engine_id
|
client = rpc_messaging.get_rpc_client(version='1.0', topic=engine_id)
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
client_context = client.prepare(
|
||||||
msg = rpc.make_msg("listening")
|
timeout=cfg.CONF.engine_life_check_timeout)
|
||||||
try:
|
try:
|
||||||
return rpc.call(context, msg, topic=topic,
|
return client_context.call(context, 'listening')
|
||||||
timeout=cfg.CONF.engine_life_check_timeout)
|
except messaging.MessagingTimeout:
|
||||||
except rpc_common.Timeout:
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
# Copyright 2013 IBM Corp.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import notifier
|
|
||||||
|
|
||||||
|
|
||||||
class PublishErrorsHandler(logging.Handler):
|
|
||||||
def emit(self, record):
|
|
||||||
if ('heat.openstack.common.notifier.log_notifier' in
|
|
||||||
cfg.CONF.notification_driver):
|
|
||||||
return
|
|
||||||
notifier.api.notify(None, 'error.publisher',
|
|
||||||
'error_notification',
|
|
||||||
notifier.api.ERROR,
|
|
||||||
dict(error=record.getMessage()))
|
|
@ -1,172 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import context
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import jsonutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notifier_opts = [
|
|
||||||
cfg.MultiStrOpt('notification_driver',
|
|
||||||
default=[],
|
|
||||||
help='Driver or drivers to handle sending notifications'),
|
|
||||||
cfg.StrOpt('default_notification_level',
|
|
||||||
default='INFO',
|
|
||||||
help='Default notification level for outgoing notifications'),
|
|
||||||
cfg.StrOpt('default_publisher_id',
|
|
||||||
help='Default publisher_id for outgoing notifications'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(notifier_opts)
|
|
||||||
|
|
||||||
WARN = 'WARN'
|
|
||||||
INFO = 'INFO'
|
|
||||||
ERROR = 'ERROR'
|
|
||||||
CRITICAL = 'CRITICAL'
|
|
||||||
DEBUG = 'DEBUG'
|
|
||||||
|
|
||||||
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
|
||||||
|
|
||||||
|
|
||||||
class BadPriorityException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def notify_decorator(name, fn):
|
|
||||||
"""Decorator for notify which is used from utils.monkey_patch().
|
|
||||||
|
|
||||||
:param name: name of the function
|
|
||||||
:param function: - object of the function
|
|
||||||
:returns: function -- decorated function
|
|
||||||
|
|
||||||
"""
|
|
||||||
def wrapped_func(*args, **kwarg):
|
|
||||||
body = {}
|
|
||||||
body['args'] = []
|
|
||||||
body['kwarg'] = {}
|
|
||||||
for arg in args:
|
|
||||||
body['args'].append(arg)
|
|
||||||
for key in kwarg:
|
|
||||||
body['kwarg'][key] = kwarg[key]
|
|
||||||
|
|
||||||
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
|
||||||
notify(ctxt,
|
|
||||||
CONF.default_publisher_id or socket.gethostname(),
|
|
||||||
name,
|
|
||||||
CONF.default_notification_level,
|
|
||||||
body)
|
|
||||||
return fn(*args, **kwarg)
|
|
||||||
return wrapped_func
|
|
||||||
|
|
||||||
|
|
||||||
def publisher_id(service, host=None):
|
|
||||||
if not host:
|
|
||||||
try:
|
|
||||||
host = CONF.host
|
|
||||||
except AttributeError:
|
|
||||||
host = CONF.default_publisher_id or socket.gethostname()
|
|
||||||
return "%s.%s" % (service, host)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, publisher_id, event_type, priority, payload):
|
|
||||||
"""Sends a notification using the specified driver
|
|
||||||
|
|
||||||
:param publisher_id: the source worker_type.host of the message
|
|
||||||
:param event_type: the literal type of event (ex. Instance Creation)
|
|
||||||
:param priority: patterned after the enumeration of Python logging
|
|
||||||
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
|
||||||
:param payload: A python dictionary of attributes
|
|
||||||
|
|
||||||
Outgoing message format includes the above parameters, and appends the
|
|
||||||
following:
|
|
||||||
|
|
||||||
message_id
|
|
||||||
a UUID representing the id for this notification
|
|
||||||
|
|
||||||
timestamp
|
|
||||||
the GMT timestamp the notification was sent at
|
|
||||||
|
|
||||||
The composite message will be constructed as a dictionary of the above
|
|
||||||
attributes, which will then be sent via the transport mechanism defined
|
|
||||||
by the driver.
|
|
||||||
|
|
||||||
Message example::
|
|
||||||
|
|
||||||
{'message_id': str(uuid.uuid4()),
|
|
||||||
'publisher_id': 'compute.host1',
|
|
||||||
'timestamp': timeutils.utcnow(),
|
|
||||||
'priority': 'WARN',
|
|
||||||
'event_type': 'compute.create_instance',
|
|
||||||
'payload': {'instance_id': 12, ... }}
|
|
||||||
|
|
||||||
"""
|
|
||||||
if priority not in log_levels:
|
|
||||||
raise BadPriorityException(
|
|
||||||
_('%s not in valid priorities') % priority)
|
|
||||||
|
|
||||||
# Ensure everything is JSON serializable.
|
|
||||||
payload = jsonutils.to_primitive(payload, convert_instances=True)
|
|
||||||
|
|
||||||
msg = dict(message_id=str(uuid.uuid4()),
|
|
||||||
publisher_id=publisher_id,
|
|
||||||
event_type=event_type,
|
|
||||||
priority=priority,
|
|
||||||
payload=payload,
|
|
||||||
timestamp=str(timeutils.utcnow()))
|
|
||||||
|
|
||||||
for driver in _get_drivers():
|
|
||||||
try:
|
|
||||||
driver.notify(context, msg)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_LE("Problem '%(e)s' attempting to "
|
|
||||||
"send to notification system. "
|
|
||||||
"Payload=%(payload)s")
|
|
||||||
% dict(e=e, payload=payload))
|
|
||||||
|
|
||||||
|
|
||||||
_drivers = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_drivers():
|
|
||||||
"""Instantiate, cache, and return drivers based on the CONF."""
|
|
||||||
global _drivers
|
|
||||||
if _drivers is None:
|
|
||||||
_drivers = {}
|
|
||||||
for notification_driver in CONF.notification_driver:
|
|
||||||
try:
|
|
||||||
driver = importutils.import_module(notification_driver)
|
|
||||||
_drivers[notification_driver] = driver
|
|
||||||
except ImportError:
|
|
||||||
LOG.exception(_LE("Failed to load notifier %s. "
|
|
||||||
"These notifications will not be sent.") %
|
|
||||||
notification_driver)
|
|
||||||
return _drivers.values()
|
|
||||||
|
|
||||||
|
|
||||||
def _reset_drivers():
|
|
||||||
"""Used by unit tests to reset the drivers."""
|
|
||||||
global _drivers
|
|
||||||
_drivers = None
|
|
@ -1,120 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2011 OpenStack LLC.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
list_notifier_drivers_opt = cfg.MultiStrOpt(
|
|
||||||
'list_notifier_drivers',
|
|
||||||
default=['heat.openstack.common.notifier.no_op_notifier'],
|
|
||||||
help='List of drivers to send notifications')
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opt(list_notifier_drivers_opt)
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
drivers = None
|
|
||||||
|
|
||||||
|
|
||||||
class ImportFailureNotifier(object):
|
|
||||||
"""Noisily re-raises some exception over-and-over when notify is called."""
|
|
||||||
|
|
||||||
def __init__(self, exception):
|
|
||||||
self.exception = exception
|
|
||||||
|
|
||||||
def notify(self, context, message):
|
|
||||||
raise self.exception
|
|
||||||
|
|
||||||
|
|
||||||
def _get_drivers():
|
|
||||||
"""Instantiates and returns drivers based on the flag values."""
|
|
||||||
global drivers
|
|
||||||
if drivers is None:
|
|
||||||
drivers = []
|
|
||||||
for notification_driver in CONF.list_notifier_drivers:
|
|
||||||
try:
|
|
||||||
drivers.append(importutils.import_module(notification_driver))
|
|
||||||
except ImportError as e:
|
|
||||||
drivers.append(ImportFailureNotifier(e))
|
|
||||||
return drivers
|
|
||||||
|
|
||||||
|
|
||||||
def add_driver(notification_driver):
|
|
||||||
"""Add a notification driver at runtime."""
|
|
||||||
# Make sure the driver list is initialized.
|
|
||||||
_get_drivers()
|
|
||||||
if isinstance(notification_driver, basestring):
|
|
||||||
# Load and add
|
|
||||||
try:
|
|
||||||
drivers.append(importutils.import_module(notification_driver))
|
|
||||||
except ImportError as e:
|
|
||||||
drivers.append(ImportFailureNotifier(e))
|
|
||||||
else:
|
|
||||||
# Driver is already loaded; just add the object.
|
|
||||||
drivers.append(notification_driver)
|
|
||||||
|
|
||||||
|
|
||||||
def _object_name(obj):
|
|
||||||
name = []
|
|
||||||
if hasattr(obj, '__module__'):
|
|
||||||
name.append(obj.__module__)
|
|
||||||
if hasattr(obj, '__name__'):
|
|
||||||
name.append(obj.__name__)
|
|
||||||
else:
|
|
||||||
name.append(obj.__class__.__name__)
|
|
||||||
return '.'.join(name)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_driver(notification_driver):
|
|
||||||
"""Remove a notification driver at runtime."""
|
|
||||||
# Make sure the driver list is initialized.
|
|
||||||
_get_drivers()
|
|
||||||
removed = False
|
|
||||||
if notification_driver in drivers:
|
|
||||||
# We're removing an object. Easy.
|
|
||||||
drivers.remove(notification_driver)
|
|
||||||
removed = True
|
|
||||||
else:
|
|
||||||
# We're removing a driver by name. Search for it.
|
|
||||||
for driver in drivers:
|
|
||||||
if _object_name(driver) == notification_driver:
|
|
||||||
drivers.remove(driver)
|
|
||||||
removed = True
|
|
||||||
|
|
||||||
if not removed:
|
|
||||||
raise ValueError("Cannot remove; %s is not in list" %
|
|
||||||
notification_driver)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Passes notification to multiple notifiers in a list."""
|
|
||||||
for driver in _get_drivers():
|
|
||||||
try:
|
|
||||||
driver.notify(context, message)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_("Problem '%(e)s' attempting to send to "
|
|
||||||
"notification driver %(driver)s."), locals())
|
|
||||||
|
|
||||||
|
|
||||||
def _reset_drivers():
|
|
||||||
"""Used by unit tests to reset the drivers."""
|
|
||||||
global drivers
|
|
||||||
drivers = None
|
|
@ -1,37 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import jsonutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Notifies the recipient of the desired event given the model.
|
|
||||||
|
|
||||||
Log notifications using OpenStack's default logging system.
|
|
||||||
"""
|
|
||||||
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
logger = logging.getLogger(
|
|
||||||
'heat.openstack.common.notification.%s' %
|
|
||||||
message['event_type'])
|
|
||||||
getattr(logger, priority)(jsonutils.dumps(message))
|
|
@ -1,19 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Notifies the recipient of the desired event given the model."""
|
|
||||||
pass
|
|
@ -1,77 +0,0 @@
|
|||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A temporary helper which emulates oslo.messaging.Notifier.
|
|
||||||
|
|
||||||
This helper method allows us to do the tedious porting to the new Notifier API
|
|
||||||
as a standalone commit so that the commit which switches us to oslo.messaging
|
|
||||||
is smaller and easier to review. This file will be removed as part of that
|
|
||||||
commit.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common.notifier import api as notifier_api
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class Notifier(object):
|
|
||||||
|
|
||||||
def __init__(self, publisher_id):
|
|
||||||
super(Notifier, self).__init__()
|
|
||||||
self.publisher_id = publisher_id
|
|
||||||
|
|
||||||
_marker = object()
|
|
||||||
|
|
||||||
def prepare(self, publisher_id=_marker):
|
|
||||||
ret = self.__class__(self.publisher_id)
|
|
||||||
if publisher_id is not self._marker:
|
|
||||||
ret.publisher_id = publisher_id
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def _notify(self, ctxt, event_type, payload, priority):
|
|
||||||
notifier_api.notify(ctxt,
|
|
||||||
self.publisher_id,
|
|
||||||
event_type,
|
|
||||||
priority,
|
|
||||||
payload)
|
|
||||||
|
|
||||||
def audit(self, ctxt, event_type, payload):
|
|
||||||
# No audit in old notifier.
|
|
||||||
self._notify(ctxt, event_type, payload, 'INFO')
|
|
||||||
|
|
||||||
def debug(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'DEBUG')
|
|
||||||
|
|
||||||
def info(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'INFO')
|
|
||||||
|
|
||||||
def warn(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'WARN')
|
|
||||||
|
|
||||||
warning = warn
|
|
||||||
|
|
||||||
def error(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'ERROR')
|
|
||||||
|
|
||||||
def critical(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
|
||||||
|
|
||||||
|
|
||||||
def get_notifier(service=None, host=None, publisher_id=None):
|
|
||||||
if not publisher_id:
|
|
||||||
publisher_id = "%s.%s" % (service, host or CONF.host)
|
|
||||||
return Notifier(publisher_id)
|
|
@ -1,30 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common.notifier import rpc_notifier
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
|
|
||||||
|
|
||||||
LOG.deprecated(_("The rabbit_notifier is now deprecated."
|
|
||||||
" Please use rpc_notifier instead."))
|
|
||||||
rpc_notifier.notify(context, message)
|
|
@ -1,47 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import context as req_context
|
|
||||||
from heat.openstack.common.gettextutils import _LE
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import rpc
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
|
||||||
'notification_topics', default=['notifications', ],
|
|
||||||
help='AMQP topic used for OpenStack notifications')
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opt(notification_topic_opt)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Sends a notification via RPC."""
|
|
||||||
if not context:
|
|
||||||
context = req_context.get_admin_context()
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
for topic in CONF.notification_topics:
|
|
||||||
topic = '%s.%s' % (topic, priority)
|
|
||||||
try:
|
|
||||||
rpc.notify(context, topic, message)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
|
||||||
"Payload=%(message)s"),
|
|
||||||
{"topic": topic, "message": message})
|
|
@ -1,53 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
'''messaging based notification driver, with message envelopes'''
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import context as req_context
|
|
||||||
from heat.openstack.common.gettextutils import _LE
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import rpc
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
|
||||||
'topics', default=['notifications', ],
|
|
||||||
help='AMQP topic(s) used for OpenStack notifications')
|
|
||||||
|
|
||||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
|
||||||
title='Options for rpc_notifier2')
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_group(opt_group)
|
|
||||||
CONF.register_opt(notification_topic_opt, opt_group)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Sends a notification via RPC."""
|
|
||||||
if not context:
|
|
||||||
context = req_context.get_admin_context()
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
for topic in CONF.rpc_notifier2.topics:
|
|
||||||
topic = '%s.%s' % (topic, priority)
|
|
||||||
try:
|
|
||||||
rpc.notify(context, topic, message, envelope=True)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Could not send notification to %(topic)s. "
|
|
||||||
"Payload=%(message)s"),
|
|
||||||
{"topic": topic, "message": message})
|
|
@ -1,21 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
NOTIFICATIONS = []
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Test notifier, stores notifications in memory for unittests."""
|
|
||||||
NOTIFICATIONS.append(message)
|
|
@ -1,275 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A remote procedure call (rpc) abstraction.
|
|
||||||
|
|
||||||
For some wrappers that add message versioning to rpc, see:
|
|
||||||
rpc.dispatcher
|
|
||||||
rpc.proxy
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
rpc_opts = [
|
|
||||||
cfg.StrOpt('rpc_backend',
|
|
||||||
default='%s.impl_kombu' % __package__,
|
|
||||||
help="The messaging module to use, defaults to kombu."),
|
|
||||||
cfg.IntOpt('rpc_thread_pool_size',
|
|
||||||
default=64,
|
|
||||||
help='Size of RPC thread pool'),
|
|
||||||
cfg.IntOpt('rpc_conn_pool_size',
|
|
||||||
default=30,
|
|
||||||
help='Size of RPC connection pool'),
|
|
||||||
cfg.IntOpt('rpc_response_timeout',
|
|
||||||
default=60,
|
|
||||||
help='Seconds to wait for a response from call or multicall'),
|
|
||||||
cfg.IntOpt('rpc_cast_timeout',
|
|
||||||
default=30,
|
|
||||||
help='Seconds to wait before a cast expires (TTL). '
|
|
||||||
'Only supported by impl_zmq.'),
|
|
||||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
|
||||||
default=['nova.exception',
|
|
||||||
'cinder.exception',
|
|
||||||
'exceptions',
|
|
||||||
],
|
|
||||||
help='Modules of exceptions that are permitted to be recreated'
|
|
||||||
' upon receiving exception data from an rpc call.'),
|
|
||||||
cfg.BoolOpt('fake_rabbit',
|
|
||||||
default=False,
|
|
||||||
help='If passed, use a fake RabbitMQ provider'),
|
|
||||||
cfg.StrOpt('control_exchange',
|
|
||||||
default='openstack',
|
|
||||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(rpc_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(control_exchange):
|
|
||||||
cfg.set_defaults(rpc_opts,
|
|
||||||
control_exchange=control_exchange)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(new=True):
|
|
||||||
"""Create a connection to the message bus used for rpc.
|
|
||||||
|
|
||||||
For some example usage of creating a connection and some consumers on that
|
|
||||||
connection, see nova.service.
|
|
||||||
|
|
||||||
:param new: Whether or not to create a new connection. A new connection
|
|
||||||
will be created by default. If new is False, the
|
|
||||||
implementation is free to return an existing connection from a
|
|
||||||
pool.
|
|
||||||
|
|
||||||
:returns: An instance of openstack.common.rpc.common.Connection
|
|
||||||
"""
|
|
||||||
return _get_impl().create_connection(CONF, new=new)
|
|
||||||
|
|
||||||
|
|
||||||
def call(context, topic, msg, timeout=None):
|
|
||||||
"""Invoke a remote method that returns something.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
|
||||||
If set, this overrides the rpc_response_timeout option.
|
|
||||||
|
|
||||||
:returns: A dict from the remote method.
|
|
||||||
|
|
||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
|
||||||
is not received before the timeout is reached.
|
|
||||||
"""
|
|
||||||
return _get_impl().call(CONF, context, topic, msg, timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def cast(context, topic, msg):
|
|
||||||
"""Invoke a remote method that does not return anything.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cast(CONF, context, topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(context, topic, msg):
|
|
||||||
"""Broadcast a remote method invocation with no return.
|
|
||||||
|
|
||||||
This method will get invoked on all consumers that were set up with this
|
|
||||||
topic name and fanout=True.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=True.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(context, topic, msg, timeout=None):
|
|
||||||
"""Invoke a remote method and get back an iterator.
|
|
||||||
|
|
||||||
In this case, the remote method will be returning multiple values in
|
|
||||||
separate messages, so the return values can be processed as the come in via
|
|
||||||
an iterator.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
|
||||||
If set, this overrides the rpc_response_timeout option.
|
|
||||||
|
|
||||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
|
||||||
an index that starts at 0 and increases by one for each value
|
|
||||||
returned and X is the Nth value that was returned by the remote
|
|
||||||
method.
|
|
||||||
|
|
||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
|
||||||
is not received before the timeout is reached.
|
|
||||||
"""
|
|
||||||
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, topic, msg, envelope=False):
|
|
||||||
"""Send notification event.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict of content of event.
|
|
||||||
:param envelope: Set to True to enable message envelope for notifications.
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
"""Clean up resources in use by implementation.
|
|
||||||
|
|
||||||
Clean up any resources that have been allocated by the RPC implementation.
|
|
||||||
This is typically open connections to a messaging service. This function
|
|
||||||
would get called before an application using this API exits to allow
|
|
||||||
connections to get torn down cleanly.
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cleanup()
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(context, server_params, topic, msg):
|
|
||||||
"""Invoke a remote method that does not return anything.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param server_params: Connection information
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cast_to_server(CONF, context, server_params, topic,
|
|
||||||
msg)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(context, server_params, topic, msg):
|
|
||||||
"""Broadcast to a remote method invocation with no return.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param server_params: Connection information
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
|
|
||||||
topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def queue_get_for(context, topic, host):
|
|
||||||
"""Get a queue name for a given topic + host.
|
|
||||||
|
|
||||||
This function only works if this naming convention is followed on the
|
|
||||||
consumer side, as well. For example, in nova, every instance of the
|
|
||||||
nova-foo service calls create_consumer() for two topics:
|
|
||||||
|
|
||||||
foo
|
|
||||||
foo.<host>
|
|
||||||
|
|
||||||
Messages sent to the 'foo' topic are distributed to exactly one instance of
|
|
||||||
the nova-foo service. The services are chosen in a round-robin fashion.
|
|
||||||
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
|
|
||||||
<host>.
|
|
||||||
"""
|
|
||||||
return '%s.%s' % (topic, host) if host else topic
|
|
||||||
|
|
||||||
|
|
||||||
_RPCIMPL = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_impl():
|
|
||||||
"""Delay import of rpc_backend until configuration is loaded."""
|
|
||||||
global _RPCIMPL
|
|
||||||
if _RPCIMPL is None:
|
|
||||||
try:
|
|
||||||
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
|
|
||||||
except ImportError:
|
|
||||||
# For backwards compatibility with older nova config.
|
|
||||||
impl = CONF.rpc_backend.replace('nova.rpc',
|
|
||||||
'nova.openstack.common.rpc')
|
|
||||||
_RPCIMPL = importutils.import_module(impl)
|
|
||||||
return _RPCIMPL
|
|
@ -1,637 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Shared code between AMQP based openstack.common.rpc implementations.
|
|
||||||
|
|
||||||
The code in this module is shared between the rpc implementations based on
|
|
||||||
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
|
|
||||||
uses AMQP, but is deprecated and predates this code.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import inspect
|
|
||||||
import sys
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from eventlet import greenpool
|
|
||||||
from eventlet import pools
|
|
||||||
from eventlet import queue
|
|
||||||
from eventlet import semaphore
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE
|
|
||||||
from heat.openstack.common import local
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
|
|
||||||
amqp_opts = [
|
|
||||||
cfg.BoolOpt('amqp_durable_queues',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='rabbit_durable_queues',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Use durable queues in amqp.'),
|
|
||||||
cfg.BoolOpt('amqp_auto_delete',
|
|
||||||
default=False,
|
|
||||||
help='Auto-delete queues in amqp.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(amqp_opts)
|
|
||||||
|
|
||||||
UNIQUE_ID = '_unique_id'
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Pool(pools.Pool):
|
|
||||||
"""Class that implements a Pool of Connections."""
|
|
||||||
def __init__(self, conf, connection_cls, *args, **kwargs):
|
|
||||||
self.connection_cls = connection_cls
|
|
||||||
self.conf = conf
|
|
||||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
|
||||||
kwargs.setdefault("order_as_stack", True)
|
|
||||||
super(Pool, self).__init__(*args, **kwargs)
|
|
||||||
self.reply_proxy = None
|
|
||||||
|
|
||||||
# TODO(comstud): Timeout connections not used in a while
|
|
||||||
def create(self):
|
|
||||||
LOG.debug('Pool creating new connection')
|
|
||||||
return self.connection_cls(self.conf)
|
|
||||||
|
|
||||||
def empty(self):
|
|
||||||
while self.free_items:
|
|
||||||
self.get().close()
|
|
||||||
# Force a new connection pool to be created.
|
|
||||||
# Note that this was added due to failing unit test cases. The issue
|
|
||||||
# is the above "while loop" gets all the cached connections from the
|
|
||||||
# pool and closes them, but never returns them to the pool, a pool
|
|
||||||
# leak. The unit tests hang waiting for an item to be returned to the
|
|
||||||
# pool. The unit tests get here via the tearDown() method. In the run
|
|
||||||
# time code, it gets here via cleanup() and only appears in service.py
|
|
||||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
|
||||||
# the leakage is not a problem.
|
|
||||||
self.connection_cls.pool = None
|
|
||||||
|
|
||||||
|
|
||||||
_pool_create_sem = semaphore.Semaphore()
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection_pool(conf, connection_cls):
|
|
||||||
with _pool_create_sem:
|
|
||||||
# Make sure only one thread tries to create the connection pool.
|
|
||||||
if not connection_cls.pool:
|
|
||||||
connection_cls.pool = Pool(conf, connection_cls)
|
|
||||||
return connection_cls.pool
|
|
||||||
|
|
||||||
|
|
||||||
class ConnectionContext(rpc_common.Connection):
|
|
||||||
"""The class that is actually returned to the create_connection() caller.
|
|
||||||
|
|
||||||
This is essentially a wrapper around Connection that supports 'with'.
|
|
||||||
It can also return a new Connection, or one from a pool.
|
|
||||||
|
|
||||||
The function will also catch when an instance of this class is to be
|
|
||||||
deleted. With that we can return Connections to the pool on exceptions
|
|
||||||
and so forth without making the caller be responsible for catching them.
|
|
||||||
If possible the function makes sure to return a connection to the pool.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
|
||||||
"""Create a new connection, or get one from the pool."""
|
|
||||||
self.connection = None
|
|
||||||
self.conf = conf
|
|
||||||
self.connection_pool = connection_pool
|
|
||||||
if pooled:
|
|
||||||
self.connection = connection_pool.get()
|
|
||||||
else:
|
|
||||||
self.connection = connection_pool.connection_cls(
|
|
||||||
conf,
|
|
||||||
server_params=server_params)
|
|
||||||
self.pooled = pooled
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
"""When with ConnectionContext() is used, return self."""
|
|
||||||
return self
|
|
||||||
|
|
||||||
def _done(self):
|
|
||||||
"""If the connection came from a pool, clean it up and put it back.
|
|
||||||
If it did not come from a pool, close it.
|
|
||||||
"""
|
|
||||||
if self.connection:
|
|
||||||
if self.pooled:
|
|
||||||
# Reset the connection so it's ready for the next caller
|
|
||||||
# to grab from the pool
|
|
||||||
self.connection.reset()
|
|
||||||
self.connection_pool.put(self.connection)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, tb):
|
|
||||||
"""End of 'with' statement. We're done here."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
"""Caller is done with this connection. Make sure we cleaned up."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Caller is done with this connection."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
self.connection.create_consumer(topic, proxy, fanout)
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
self.connection.create_worker(topic, proxy, pool_name)
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
|
|
||||||
ack_on_error=True):
|
|
||||||
self.connection.join_consumer_pool(callback,
|
|
||||||
pool_name,
|
|
||||||
topic,
|
|
||||||
exchange_name,
|
|
||||||
ack_on_error)
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
return self.connection.consume_in_thread()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
"""Proxy all other calls to the Connection instance."""
|
|
||||||
if self.connection:
|
|
||||||
return getattr(self.connection, key)
|
|
||||||
else:
|
|
||||||
raise rpc_common.InvalidRPCConnectionReuse()
|
|
||||||
|
|
||||||
|
|
||||||
class ReplyProxy(ConnectionContext):
|
|
||||||
"""Connection class for RPC replies / callbacks."""
|
|
||||||
def __init__(self, conf, connection_pool):
|
|
||||||
self._call_waiters = {}
|
|
||||||
self._num_call_waiters = 0
|
|
||||||
self._num_call_waiters_wrn_threshold = 10
|
|
||||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
|
||||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
|
||||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
|
||||||
self.consume_in_thread()
|
|
||||||
|
|
||||||
def _process_data(self, message_data):
|
|
||||||
msg_id = message_data.pop('_msg_id', None)
|
|
||||||
waiter = self._call_waiters.get(msg_id)
|
|
||||||
if not waiter:
|
|
||||||
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
|
||||||
', message : %(data)s'), {'msg_id': msg_id,
|
|
||||||
'data': message_data})
|
|
||||||
LOG.warn(_('_call_waiters: %s') % self._call_waiters)
|
|
||||||
else:
|
|
||||||
waiter.put(message_data)
|
|
||||||
|
|
||||||
def add_call_waiter(self, waiter, msg_id):
|
|
||||||
self._num_call_waiters += 1
|
|
||||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
|
|
||||||
LOG.warn(_('Number of call waiters is greater than warning '
|
|
||||||
'threshold: %d. There could be a MulticallProxyWaiter '
|
|
||||||
'leak.') % self._num_call_waiters_wrn_threshold)
|
|
||||||
self._num_call_waiters_wrn_threshold *= 2
|
|
||||||
self._call_waiters[msg_id] = waiter
|
|
||||||
|
|
||||||
def del_call_waiter(self, msg_id):
|
|
||||||
self._num_call_waiters -= 1
|
|
||||||
del self._call_waiters[msg_id]
|
|
||||||
|
|
||||||
def get_reply_q(self):
|
|
||||||
return self._reply_q
|
|
||||||
|
|
||||||
|
|
||||||
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|
||||||
failure=None, ending=False, log_failure=True):
|
|
||||||
"""Sends a reply or an error on the channel signified by msg_id.
|
|
||||||
|
|
||||||
Failure should be a sys.exc_info() tuple.
|
|
||||||
|
|
||||||
"""
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
if failure:
|
|
||||||
failure = rpc_common.serialize_remote_exception(failure,
|
|
||||||
log_failure)
|
|
||||||
|
|
||||||
msg = {'result': reply, 'failure': failure}
|
|
||||||
if ending:
|
|
||||||
msg['ending'] = True
|
|
||||||
_add_unique_id(msg)
|
|
||||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
|
||||||
# reply_q to direct_send() to use it as the response queue.
|
|
||||||
# Otherwise use the msg_id for backward compatibility.
|
|
||||||
if reply_q:
|
|
||||||
msg['_msg_id'] = msg_id
|
|
||||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
|
||||||
else:
|
|
||||||
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
"""Context that supports replying to a rpc.call."""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.msg_id = kwargs.pop('msg_id', None)
|
|
||||||
self.reply_q = kwargs.pop('reply_q', None)
|
|
||||||
self.conf = kwargs.pop('conf')
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
values['conf'] = self.conf
|
|
||||||
values['msg_id'] = self.msg_id
|
|
||||||
values['reply_q'] = self.reply_q
|
|
||||||
return self.__class__(**values)
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False,
|
|
||||||
connection_pool=None, log_failure=True):
|
|
||||||
if self.msg_id:
|
|
||||||
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
|
|
||||||
reply, failure, ending, log_failure)
|
|
||||||
if ending:
|
|
||||||
self.msg_id = None
|
|
||||||
|
|
||||||
|
|
||||||
def unpack_context(conf, msg):
|
|
||||||
"""Unpack context from msg."""
|
|
||||||
context_dict = {}
|
|
||||||
for key in list(msg.keys()):
|
|
||||||
# NOTE(vish): Some versions of python don't like unicode keys
|
|
||||||
# in kwargs.
|
|
||||||
key = str(key)
|
|
||||||
if key.startswith('_context_'):
|
|
||||||
value = msg.pop(key)
|
|
||||||
context_dict[key[9:]] = value
|
|
||||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
|
||||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
|
||||||
context_dict['conf'] = conf
|
|
||||||
ctx = RpcContext.from_dict(context_dict)
|
|
||||||
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
|
|
||||||
return ctx
|
|
||||||
|
|
||||||
|
|
||||||
def pack_context(msg, context):
|
|
||||||
"""Pack context into msg.
|
|
||||||
|
|
||||||
Values for message keys need to be less than 255 chars, so we pull
|
|
||||||
context out into a bunch of separate keys. If we want to support
|
|
||||||
more arguments in rabbit messages, we may want to do the same
|
|
||||||
for args at some point.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(context, dict):
|
|
||||||
context_d = dict([('_context_%s' % key, value)
|
|
||||||
for (key, value) in six.iteritems(context)])
|
|
||||||
else:
|
|
||||||
context_d = dict([('_context_%s' % key, value)
|
|
||||||
for (key, value) in
|
|
||||||
six.iteritems(context.to_dict())])
|
|
||||||
|
|
||||||
msg.update(context_d)
|
|
||||||
|
|
||||||
|
|
||||||
class _MsgIdCache(object):
|
|
||||||
"""This class checks any duplicate messages."""
|
|
||||||
|
|
||||||
# NOTE: This value is considered can be a configuration item, but
|
|
||||||
# it is not necessary to change its value in most cases,
|
|
||||||
# so let this value as static for now.
|
|
||||||
DUP_MSG_CHECK_SIZE = 16
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.prev_msgids = collections.deque([],
|
|
||||||
maxlen=self.DUP_MSG_CHECK_SIZE)
|
|
||||||
|
|
||||||
def check_duplicate_message(self, message_data):
|
|
||||||
"""AMQP consumers may read same message twice when exceptions occur
|
|
||||||
before ack is returned. This method prevents doing it.
|
|
||||||
"""
|
|
||||||
if UNIQUE_ID in message_data:
|
|
||||||
msg_id = message_data[UNIQUE_ID]
|
|
||||||
if msg_id not in self.prev_msgids:
|
|
||||||
self.prev_msgids.append(msg_id)
|
|
||||||
else:
|
|
||||||
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
|
|
||||||
|
|
||||||
|
|
||||||
def _add_unique_id(msg):
|
|
||||||
"""Add unique_id for checking duplicate messages."""
|
|
||||||
unique_id = uuid.uuid4().hex
|
|
||||||
msg.update({UNIQUE_ID: unique_id})
|
|
||||||
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
|
|
||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
|
||||||
"""Base class for a delayed invocation manager.
|
|
||||||
|
|
||||||
Used by the Connection class to start up green threads
|
|
||||||
to handle incoming messages.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool):
|
|
||||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
|
||||||
self.connection_pool = connection_pool
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
"""Wait for all callback threads to exit."""
|
|
||||||
self.pool.waitall()
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackWrapper(_ThreadPoolWithWait):
|
|
||||||
"""Wraps a straight callback.
|
|
||||||
|
|
||||||
Allows it to be invoked in a green thread.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, callback, connection_pool,
|
|
||||||
wait_for_consumers=False):
|
|
||||||
"""Initiates CallbackWrapper object.
|
|
||||||
|
|
||||||
:param conf: cfg.CONF instance
|
|
||||||
:param callback: a callable (probably a function)
|
|
||||||
:param connection_pool: connection pool as returned by
|
|
||||||
get_connection_pool()
|
|
||||||
:param wait_for_consumers: wait for all green threads to
|
|
||||||
complete and raise the last
|
|
||||||
caught exception, if any.
|
|
||||||
|
|
||||||
"""
|
|
||||||
super(CallbackWrapper, self).__init__(
|
|
||||||
conf=conf,
|
|
||||||
connection_pool=connection_pool,
|
|
||||||
)
|
|
||||||
self.callback = callback
|
|
||||||
self.wait_for_consumers = wait_for_consumers
|
|
||||||
self.exc_info = None
|
|
||||||
|
|
||||||
def _wrap(self, message_data, **kwargs):
|
|
||||||
"""Wrap the callback invocation to catch exceptions.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.callback(message_data, **kwargs)
|
|
||||||
except Exception:
|
|
||||||
self.exc_info = sys.exc_info()
|
|
||||||
|
|
||||||
def __call__(self, message_data):
|
|
||||||
self.exc_info = None
|
|
||||||
self.pool.spawn_n(self._wrap, message_data)
|
|
||||||
|
|
||||||
if self.wait_for_consumers:
|
|
||||||
self.pool.waitall()
|
|
||||||
if self.exc_info:
|
|
||||||
six.reraise(self.exc_info[1], None, self.exc_info[2])
|
|
||||||
|
|
||||||
|
|
||||||
class ProxyCallback(_ThreadPoolWithWait):
|
|
||||||
"""Calls methods on a proxy object based on method and args."""
|
|
||||||
|
|
||||||
def __init__(self, conf, proxy, connection_pool):
|
|
||||||
super(ProxyCallback, self).__init__(
|
|
||||||
conf=conf,
|
|
||||||
connection_pool=connection_pool,
|
|
||||||
)
|
|
||||||
self.proxy = proxy
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def __call__(self, message_data):
|
|
||||||
"""Consumer callback to call a method on a proxy object.
|
|
||||||
|
|
||||||
Parses the message for validity and fires off a thread to call the
|
|
||||||
proxy object method.
|
|
||||||
|
|
||||||
Message data should be a dictionary with two keys:
|
|
||||||
method: string representing the method to call
|
|
||||||
args: dictionary of arg: value
|
|
||||||
|
|
||||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
|
||||||
|
|
||||||
"""
|
|
||||||
# It is important to clear the context here, because at this point
|
|
||||||
# the previous context is stored in local.store.context
|
|
||||||
if hasattr(local.store, 'context'):
|
|
||||||
del local.store.context
|
|
||||||
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
|
|
||||||
self.msg_id_cache.check_duplicate_message(message_data)
|
|
||||||
ctxt = unpack_context(self.conf, message_data)
|
|
||||||
method = message_data.get('method')
|
|
||||||
args = message_data.get('args', {})
|
|
||||||
version = message_data.get('version')
|
|
||||||
namespace = message_data.get('namespace')
|
|
||||||
if not method:
|
|
||||||
LOG.warn(_('no method for message: %s') % message_data)
|
|
||||||
ctxt.reply(_('No method for message: %s') % message_data,
|
|
||||||
connection_pool=self.connection_pool)
|
|
||||||
return
|
|
||||||
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
|
||||||
namespace, args)
|
|
||||||
|
|
||||||
def _process_data(self, ctxt, version, method, namespace, args):
|
|
||||||
"""Process a message in a new thread.
|
|
||||||
|
|
||||||
If the proxy object we have has a dispatch method
|
|
||||||
(see rpc.dispatcher.RpcDispatcher), pass it the version,
|
|
||||||
method, and args and let it dispatch as appropriate. If not, use
|
|
||||||
the old behavior of magically calling the specified method on the
|
|
||||||
proxy we have here.
|
|
||||||
"""
|
|
||||||
ctxt.update_store()
|
|
||||||
try:
|
|
||||||
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
|
||||||
**args)
|
|
||||||
# Check if the result was a generator
|
|
||||||
if inspect.isgenerator(rval):
|
|
||||||
for x in rval:
|
|
||||||
ctxt.reply(x, None, connection_pool=self.connection_pool)
|
|
||||||
else:
|
|
||||||
ctxt.reply(rval, None, connection_pool=self.connection_pool)
|
|
||||||
# This final None tells multicall that it is done.
|
|
||||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
LOG.debug('Expected exception during message handling (%s)' %
|
|
||||||
e._exc_info[1])
|
|
||||||
ctxt.reply(None, e._exc_info,
|
|
||||||
connection_pool=self.connection_pool,
|
|
||||||
log_failure=False)
|
|
||||||
except Exception:
|
|
||||||
# sys.exc_info() is deleted by LOG.exception().
|
|
||||||
exc_info = sys.exc_info()
|
|
||||||
LOG.error(_LE('Exception during message handling'),
|
|
||||||
exc_info=exc_info)
|
|
||||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
|
||||||
|
|
||||||
|
|
||||||
class MulticallProxyWaiter(object):
|
|
||||||
def __init__(self, conf, msg_id, timeout, connection_pool):
|
|
||||||
self._msg_id = msg_id
|
|
||||||
self._timeout = timeout or conf.rpc_response_timeout
|
|
||||||
self._reply_proxy = connection_pool.reply_proxy
|
|
||||||
self._done = False
|
|
||||||
self._got_ending = False
|
|
||||||
self._conf = conf
|
|
||||||
self._dataqueue = queue.LightQueue()
|
|
||||||
# Add this caller to the reply proxy's call_waiters
|
|
||||||
self._reply_proxy.add_call_waiter(self, self._msg_id)
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def put(self, data):
|
|
||||||
self._dataqueue.put(data)
|
|
||||||
|
|
||||||
def done(self):
|
|
||||||
if self._done:
|
|
||||||
return
|
|
||||||
self._done = True
|
|
||||||
# Remove this caller from reply proxy's call_waiters
|
|
||||||
self._reply_proxy.del_call_waiter(self._msg_id)
|
|
||||||
|
|
||||||
def _process_data(self, data):
|
|
||||||
result = None
|
|
||||||
self.msg_id_cache.check_duplicate_message(data)
|
|
||||||
if data['failure']:
|
|
||||||
failure = data['failure']
|
|
||||||
result = rpc_common.deserialize_remote_exception(self._conf,
|
|
||||||
failure)
|
|
||||||
elif data.get('ending', False):
|
|
||||||
self._got_ending = True
|
|
||||||
else:
|
|
||||||
result = data['result']
|
|
||||||
return result
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
"""Return a result until we get a reply with an 'ending' flag."""
|
|
||||||
if self._done:
|
|
||||||
raise StopIteration
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
data = self._dataqueue.get(timeout=self._timeout)
|
|
||||||
result = self._process_data(data)
|
|
||||||
except queue.Empty:
|
|
||||||
self.done()
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
self.done()
|
|
||||||
if self._got_ending:
|
|
||||||
self.done()
|
|
||||||
raise StopIteration
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
self.done()
|
|
||||||
raise result
|
|
||||||
yield result
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new, connection_pool):
|
|
||||||
"""Create a connection."""
|
|
||||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
|
||||||
|
|
||||||
|
|
||||||
_reply_proxy_create_sem = semaphore.Semaphore()
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
LOG.debug('Making synchronous call on %s ...', topic)
|
|
||||||
msg_id = uuid.uuid4().hex
|
|
||||||
msg.update({'_msg_id': msg_id})
|
|
||||||
LOG.debug('MSG_ID is %s' % (msg_id))
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
|
|
||||||
with _reply_proxy_create_sem:
|
|
||||||
if not connection_pool.reply_proxy:
|
|
||||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
|
||||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
|
||||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
|
||||||
return wait_msg
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout, connection_pool):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
|
|
||||||
# NOTE(vish): return the last result from the multicall
|
|
||||||
rv = list(rv)
|
|
||||||
if not rv:
|
|
||||||
return
|
|
||||||
return rv[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
LOG.debug('Making asynchronous cast on %s...', topic)
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
LOG.debug('Making asynchronous fanout cast...')
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
|
||||||
server_params=server_params) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
|
||||||
connection_pool):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
|
||||||
server_params=server_params) as conn:
|
|
||||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
LOG.debug('Sending %(event_type)s on %(topic)s',
|
|
||||||
dict(event_type=msg.get('event_type'),
|
|
||||||
topic=topic))
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
if envelope:
|
|
||||||
msg = rpc_common.serialize_msg(msg)
|
|
||||||
conn.notify_send(topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup(connection_pool):
|
|
||||||
if connection_pool:
|
|
||||||
connection_pool.empty()
|
|
||||||
|
|
||||||
|
|
||||||
def get_control_exchange(conf):
|
|
||||||
return conf.control_exchange
|
|
@ -1,508 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import jsonutils
|
|
||||||
from heat.openstack.common import local
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import versionutils
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
_RPC_ENVELOPE_VERSION = '2.0'
|
|
||||||
'''RPC Envelope Version.
|
|
||||||
|
|
||||||
This version number applies to the top level structure of messages sent out.
|
|
||||||
It does *not* apply to the message payload, which must be versioned
|
|
||||||
independently. For example, when using rpc APIs, a version number is applied
|
|
||||||
for changes to the API being exposed over rpc. This version number is handled
|
|
||||||
in the rpc proxy and dispatcher modules.
|
|
||||||
|
|
||||||
This version number applies to the message envelope that is used in the
|
|
||||||
serialization done inside the rpc layer. See serialize_msg() and
|
|
||||||
deserialize_msg().
|
|
||||||
|
|
||||||
The current message format (version 2.0) is very simple. It is::
|
|
||||||
|
|
||||||
{
|
|
||||||
'oslo.version': <RPC Envelope Version as a String>,
|
|
||||||
'oslo.message': <Application Message Payload, JSON encoded>
|
|
||||||
}
|
|
||||||
|
|
||||||
Message format version '1.0' is just considered to be the messages we sent
|
|
||||||
without a message envelope.
|
|
||||||
|
|
||||||
So, the current message envelope just includes the envelope version. It may
|
|
||||||
eventually contain additional information, such as a signature for the message
|
|
||||||
payload.
|
|
||||||
|
|
||||||
We will JSON encode the application message payload. The message envelope,
|
|
||||||
which includes the JSON encoded application message body, will be passed down
|
|
||||||
to the messaging libraries as a dict.
|
|
||||||
'''
|
|
||||||
|
|
||||||
_VERSION_KEY = 'oslo.version'
|
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
|
||||||
|
|
||||||
_REMOTE_POSTFIX = '_Remote'
|
|
||||||
|
|
||||||
|
|
||||||
class RPCException(Exception):
|
|
||||||
msg_fmt = _("An unknown RPC related exception occurred.")
|
|
||||||
|
|
||||||
def __init__(self, message=None, **kwargs):
|
|
||||||
self.kwargs = kwargs
|
|
||||||
|
|
||||||
if not message:
|
|
||||||
try:
|
|
||||||
message = self.msg_fmt % kwargs
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# kwargs doesn't match a variable in the message
|
|
||||||
# log the issue and the kwargs
|
|
||||||
LOG.exception(_LE('Exception in string format operation'))
|
|
||||||
for name, value in six.iteritems(kwargs):
|
|
||||||
LOG.error("%s: %s" % (name, value))
|
|
||||||
# at least get the core message out if something happened
|
|
||||||
message = self.msg_fmt
|
|
||||||
|
|
||||||
super(RPCException, self).__init__(message)
|
|
||||||
|
|
||||||
|
|
||||||
class RemoteError(RPCException):
|
|
||||||
"""Signifies that a remote class has raised an exception.
|
|
||||||
|
|
||||||
Contains a string representation of the type of the original exception,
|
|
||||||
the value of the original exception, and the traceback. These are
|
|
||||||
sent to the parent as a joined string so printing the exception
|
|
||||||
contains all of the relevant info.
|
|
||||||
|
|
||||||
"""
|
|
||||||
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
|
||||||
|
|
||||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
|
||||||
self.exc_type = exc_type
|
|
||||||
self.value = value
|
|
||||||
self.traceback = traceback
|
|
||||||
super(RemoteError, self).__init__(exc_type=exc_type,
|
|
||||||
value=value,
|
|
||||||
traceback=traceback)
|
|
||||||
|
|
||||||
|
|
||||||
class Timeout(RPCException):
|
|
||||||
"""Signifies that a timeout has occurred.
|
|
||||||
|
|
||||||
This exception is raised if the rpc_response_timeout is reached while
|
|
||||||
waiting for a response from the remote side.
|
|
||||||
"""
|
|
||||||
msg_fmt = _('Timeout while waiting on RPC response - '
|
|
||||||
'topic: "%(topic)s", RPC method: "%(method)s" '
|
|
||||||
'info: "%(info)s"')
|
|
||||||
|
|
||||||
def __init__(self, info=None, topic=None, method=None):
|
|
||||||
"""Initiates Timeout object.
|
|
||||||
|
|
||||||
:param info: Extra info to convey to the user
|
|
||||||
:param topic: The topic that the rpc call was sent to
|
|
||||||
:param rpc_method_name: The name of the rpc method being
|
|
||||||
called
|
|
||||||
"""
|
|
||||||
self.info = info
|
|
||||||
self.topic = topic
|
|
||||||
self.method = method
|
|
||||||
super(Timeout, self).__init__(
|
|
||||||
None,
|
|
||||||
info=info or _('<unknown>'),
|
|
||||||
topic=topic or _('<unknown>'),
|
|
||||||
method=method or _('<unknown>'))
|
|
||||||
|
|
||||||
|
|
||||||
class DuplicateMessageError(RPCException):
|
|
||||||
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidRPCConnectionReuse(RPCException):
|
|
||||||
msg_fmt = _("Invalid reuse of an RPC connection.")
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcVersion(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
|
|
||||||
"this endpoint.")
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC envelope version, %(version)s, "
|
|
||||||
"not supported by this endpoint.")
|
|
||||||
|
|
||||||
|
|
||||||
class RpcVersionCapError(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""A connection, returned by rpc.create_connection().
|
|
||||||
|
|
||||||
This class represents a connection to the message bus used for rpc.
|
|
||||||
An instance of this class should never be created by users of the rpc API.
|
|
||||||
Use rpc.create_connection() instead.
|
|
||||||
"""
|
|
||||||
def close(self):
|
|
||||||
"""Close the connection.
|
|
||||||
|
|
||||||
This method must be called when the connection will no longer be used.
|
|
||||||
It will ensure that any resources associated with the connection, such
|
|
||||||
as a network connection, and cleaned up.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer on this connection.
|
|
||||||
|
|
||||||
A consumer is associated with a message queue on the backend message
|
|
||||||
bus. The consumer will read messages from the queue, unpack them, and
|
|
||||||
dispatch them to the proxy object. The contents of the message pulled
|
|
||||||
off of the queue will determine which method gets called on the proxy
|
|
||||||
object.
|
|
||||||
|
|
||||||
:param topic: This is a name associated with what to consume from.
|
|
||||||
Multiple instances of a service may consume from the same
|
|
||||||
topic. For example, all instances of nova-compute consume
|
|
||||||
from a queue called "compute". In that case, the
|
|
||||||
messages will get distributed amongst the consumers in a
|
|
||||||
round-robin fashion if fanout=False. If fanout=True,
|
|
||||||
every consumer associated with this topic will get a
|
|
||||||
copy of every message.
|
|
||||||
:param proxy: The object that will handle all incoming messages.
|
|
||||||
:param fanout: Whether or not this is a fanout topic. See the
|
|
||||||
documentation for the topic parameter for some
|
|
||||||
additional comments on this.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker on this connection.
|
|
||||||
|
|
||||||
A worker is like a regular consumer of messages directed to a
|
|
||||||
topic, except that it is part of a set of such consumers (the
|
|
||||||
"pool") which may run in parallel. Every pool of workers will
|
|
||||||
receive a given message, but only one worker in the pool will
|
|
||||||
be asked to process it. Load is distributed across the members
|
|
||||||
of the pool in round-robin fashion.
|
|
||||||
|
|
||||||
:param topic: This is a name associated with what to consume from.
|
|
||||||
Multiple instances of a service may consume from the same
|
|
||||||
topic.
|
|
||||||
:param proxy: The object that will handle all incoming messages.
|
|
||||||
:param pool_name: String containing the name of the pool of workers
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
|
||||||
"""Register as a member of a group of consumers.
|
|
||||||
|
|
||||||
Uses given topic from the specified exchange.
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
|
|
||||||
:param callback: Callable to be invoked for each message.
|
|
||||||
:type callback: callable accepting one argument
|
|
||||||
:param pool_name: The name of the consumer pool.
|
|
||||||
:type pool_name: str
|
|
||||||
:param topic: The routing topic for desired messages.
|
|
||||||
:type topic: str
|
|
||||||
:param exchange_name: The name of the message exchange where
|
|
||||||
the client should attach. Defaults to
|
|
||||||
the configured exchange.
|
|
||||||
:type exchange_name: str
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Spawn a thread to handle incoming messages.
|
|
||||||
|
|
||||||
Spawn a thread that will be responsible for handling all incoming
|
|
||||||
messages for consumers that were set up on this connection.
|
|
||||||
|
|
||||||
Message dispatching inside of this is expected to be implemented in a
|
|
||||||
non-blocking manner. An example implementation would be having this
|
|
||||||
thread pull messages in for all of the consumers, but utilize a thread
|
|
||||||
pool for dispatching the messages to the proxy objects.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
def _safe_log(log_func, msg, msg_data):
|
|
||||||
"""Sanitizes the msg_data field before logging."""
|
|
||||||
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
|
|
||||||
|
|
||||||
def _fix_passwords(d):
|
|
||||||
"""Sanitizes the password fields in the dictionary."""
|
|
||||||
for k in six.iterkeys(d):
|
|
||||||
if k.lower().find('password') != -1:
|
|
||||||
d[k] = '<SANITIZED>'
|
|
||||||
elif k.lower() in SANITIZE:
|
|
||||||
d[k] = '<SANITIZED>'
|
|
||||||
elif isinstance(d[k], list):
|
|
||||||
for e in d[k]:
|
|
||||||
if isinstance(e, dict):
|
|
||||||
_fix_passwords(e)
|
|
||||||
elif isinstance(d[k], dict):
|
|
||||||
_fix_passwords(d[k])
|
|
||||||
return d
|
|
||||||
|
|
||||||
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_remote_exception(failure_info, log_failure=True):
|
|
||||||
"""Prepares exception data to be sent over rpc.
|
|
||||||
|
|
||||||
Failure_info should be a sys.exc_info() tuple.
|
|
||||||
|
|
||||||
"""
|
|
||||||
tb = traceback.format_exception(*failure_info)
|
|
||||||
failure = failure_info[1]
|
|
||||||
if log_failure:
|
|
||||||
LOG.error(_LE("Returning exception %s to caller"),
|
|
||||||
six.text_type(failure))
|
|
||||||
LOG.error(tb)
|
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
if hasattr(failure, 'kwargs'):
|
|
||||||
kwargs = failure.kwargs
|
|
||||||
|
|
||||||
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
|
||||||
# exceptions. Lets turn it back into the original exception type.
|
|
||||||
cls_name = str(failure.__class__.__name__)
|
|
||||||
mod_name = str(failure.__class__.__module__)
|
|
||||||
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
|
||||||
mod_name.endswith(_REMOTE_POSTFIX)):
|
|
||||||
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
|
||||||
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'class': cls_name,
|
|
||||||
'module': mod_name,
|
|
||||||
'message': six.text_type(failure),
|
|
||||||
'tb': tb,
|
|
||||||
'args': failure.args,
|
|
||||||
'kwargs': kwargs
|
|
||||||
}
|
|
||||||
|
|
||||||
json_data = jsonutils.dumps(data)
|
|
||||||
|
|
||||||
return json_data
|
|
||||||
|
|
||||||
|
|
||||||
def deserialize_remote_exception(conf, data):
|
|
||||||
failure = jsonutils.loads(str(data))
|
|
||||||
|
|
||||||
trace = failure.get('tb', [])
|
|
||||||
message = failure.get('message', "") + "\n" + "\n".join(trace)
|
|
||||||
name = failure.get('class')
|
|
||||||
module = failure.get('module')
|
|
||||||
|
|
||||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
|
||||||
# order to prevent arbitrary code execution.
|
|
||||||
if module not in conf.allowed_rpc_exception_modules:
|
|
||||||
return RemoteError(name, failure.get('message'), trace)
|
|
||||||
|
|
||||||
try:
|
|
||||||
mod = importutils.import_module(module)
|
|
||||||
klass = getattr(mod, name)
|
|
||||||
if not issubclass(klass, Exception):
|
|
||||||
raise TypeError("Can only deserialize Exceptions")
|
|
||||||
|
|
||||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
|
||||||
except (AttributeError, TypeError, ImportError):
|
|
||||||
return RemoteError(name, failure.get('message'), trace)
|
|
||||||
|
|
||||||
ex_type = type(failure)
|
|
||||||
str_override = lambda self: message
|
|
||||||
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
|
||||||
{'__str__': str_override, '__unicode__': str_override})
|
|
||||||
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
|
||||||
try:
|
|
||||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
|
||||||
# as the new type for the exception. This only works on user defined
|
|
||||||
# Exceptions and not core python exceptions. This is important because
|
|
||||||
# we cannot necessarily change an exception message so we must override
|
|
||||||
# the __str__ method.
|
|
||||||
failure.__class__ = new_ex_type
|
|
||||||
except TypeError:
|
|
||||||
# NOTE(ameade): If a core exception then just add the traceback to the
|
|
||||||
# first exception argument.
|
|
||||||
failure.args = (message,) + failure.args[1:]
|
|
||||||
return failure
|
|
||||||
|
|
||||||
|
|
||||||
class CommonRpcContext(object):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.values = kwargs
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
try:
|
|
||||||
return self.values[key]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(key)
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return copy.deepcopy(self.values)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_dict(cls, values):
|
|
||||||
return cls(**values)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
return self.from_dict(self.to_dict())
|
|
||||||
|
|
||||||
def update_store(self):
|
|
||||||
local.store.context = self
|
|
||||||
|
|
||||||
def elevated(self, read_deleted=None, overwrite=False):
|
|
||||||
"""Return a version of this context with admin flag set."""
|
|
||||||
# TODO(russellb) This method is a bit of a nova-ism. It makes
|
|
||||||
# some assumptions about the data in the request context sent
|
|
||||||
# across rpc, while the rest of this class does not. We could get
|
|
||||||
# rid of this if we changed the nova code that uses this to
|
|
||||||
# convert the RpcContext back to its native RequestContext doing
|
|
||||||
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
|
|
||||||
|
|
||||||
context = self.deepcopy()
|
|
||||||
context.values['is_admin'] = True
|
|
||||||
|
|
||||||
context.values.setdefault('roles', [])
|
|
||||||
|
|
||||||
if 'admin' not in context.values['roles']:
|
|
||||||
context.values['roles'].append('admin')
|
|
||||||
|
|
||||||
if read_deleted is not None:
|
|
||||||
context.values['read_deleted'] = read_deleted
|
|
||||||
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
class ClientException(Exception):
|
|
||||||
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
|
||||||
|
|
||||||
Merely instantiating it records the current exception information, which
|
|
||||||
will be passed back to the RPC client without exceptional logging.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self._exc_info = sys.exc_info()
|
|
||||||
|
|
||||||
|
|
||||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except Exception as e:
|
|
||||||
if type(e) in exceptions:
|
|
||||||
raise ClientException()
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def client_exceptions(*exceptions):
|
|
||||||
"""Decorator for manager methods that raise expected exceptions.
|
|
||||||
|
|
||||||
Marking a Manager method with this decorator allows the declaration
|
|
||||||
of expected exceptions that the RPC layer should not consider fatal,
|
|
||||||
and not log as if they were generated in a real error scenario. Note
|
|
||||||
that this will cause listed exceptions to be wrapped in a
|
|
||||||
ClientException, which is used internally by the RPC layer.
|
|
||||||
"""
|
|
||||||
def outer(func):
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
|
||||||
return inner
|
|
||||||
return outer
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(sirp): we should deprecate this in favor of
|
|
||||||
# using `versionutils.is_compatible` directly
|
|
||||||
def version_is_compatible(imp_version, version):
|
|
||||||
"""Determine whether versions are compatible.
|
|
||||||
|
|
||||||
:param imp_version: The version implemented
|
|
||||||
:param version: The version requested by an incoming message.
|
|
||||||
"""
|
|
||||||
return versionutils.is_compatible(version, imp_version)
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_msg(raw_msg):
|
|
||||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
|
||||||
# information about this format.
|
|
||||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
|
||||||
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
|
|
||||||
|
|
||||||
return msg
|
|
||||||
|
|
||||||
|
|
||||||
def deserialize_msg(msg):
|
|
||||||
# NOTE(russellb): Hang on to your hats, this road is about to
|
|
||||||
# get a little bumpy.
|
|
||||||
#
|
|
||||||
# Robustness Principle:
|
|
||||||
# "Be strict in what you send, liberal in what you accept."
|
|
||||||
#
|
|
||||||
# At this point we have to do a bit of guessing about what it
|
|
||||||
# is we just received. Here is the set of possibilities:
|
|
||||||
#
|
|
||||||
# 1) We received a dict. This could be 2 things:
|
|
||||||
#
|
|
||||||
# a) Inspect it to see if it looks like a standard message envelope.
|
|
||||||
# If so, great!
|
|
||||||
#
|
|
||||||
# b) If it doesn't look like a standard message envelope, it could either
|
|
||||||
# be a notification, or a message from before we added a message
|
|
||||||
# envelope (referred to as version 1.0).
|
|
||||||
# Just return the message as-is.
|
|
||||||
#
|
|
||||||
# 2) It's any other non-dict type. Just return it and hope for the best.
|
|
||||||
# This case covers return values from rpc.call() from before message
|
|
||||||
# envelopes were used. (messages to call a method were always a dict)
|
|
||||||
|
|
||||||
if not isinstance(msg, dict):
|
|
||||||
# See #2 above.
|
|
||||||
return msg
|
|
||||||
|
|
||||||
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
|
|
||||||
if not all(map(lambda key: key in msg, base_envelope_keys)):
|
|
||||||
# See #1.b above.
|
|
||||||
return msg
|
|
||||||
|
|
||||||
# At this point we think we have the message envelope
|
|
||||||
# format we were expecting. (#1.a above)
|
|
||||||
|
|
||||||
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
|
|
||||||
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
|
|
||||||
|
|
||||||
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
|
|
||||||
|
|
||||||
return raw_msg
|
|
@ -1,178 +0,0 @@
|
|||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Code for rpc message dispatching.
|
|
||||||
|
|
||||||
Messages that come in have a version number associated with them. RPC API
|
|
||||||
version numbers are in the form:
|
|
||||||
|
|
||||||
Major.Minor
|
|
||||||
|
|
||||||
For a given message with version X.Y, the receiver must be marked as able to
|
|
||||||
handle messages of version A.B, where:
|
|
||||||
|
|
||||||
A = X
|
|
||||||
|
|
||||||
B >= Y
|
|
||||||
|
|
||||||
The Major version number would be incremented for an almost completely new API.
|
|
||||||
The Minor version number would be incremented for backwards compatible changes
|
|
||||||
to an existing API. A backwards compatible change could be something like
|
|
||||||
adding a new method, adding an argument to an existing method (but not
|
|
||||||
requiring it), or changing the type for an existing argument (but still
|
|
||||||
handling the old type as well).
|
|
||||||
|
|
||||||
The conversion over to a versioned API must be done on both the client side and
|
|
||||||
server side of the API at the same time. However, as the code stands today,
|
|
||||||
there can be both versioned and unversioned APIs implemented in the same code
|
|
||||||
base.
|
|
||||||
|
|
||||||
EXAMPLES
|
|
||||||
========
|
|
||||||
|
|
||||||
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
|
|
||||||
API as an example. The client side is in nova/compute/rpcapi.py and the server
|
|
||||||
side is in nova/compute/manager.py.
|
|
||||||
|
|
||||||
|
|
||||||
Example 1) Adding a new method.
|
|
||||||
-------------------------------
|
|
||||||
|
|
||||||
Adding a new method is a backwards compatible change. It should be added to
|
|
||||||
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
|
|
||||||
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
|
|
||||||
have a specific version specified to indicate the minimum API version that must
|
|
||||||
be implemented for the method to be supported. For example::
|
|
||||||
|
|
||||||
def get_host_uptime(self, ctxt, host):
|
|
||||||
topic = _compute_topic(self.topic, ctxt, host, None)
|
|
||||||
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
|
|
||||||
version='1.1')
|
|
||||||
|
|
||||||
In this case, version '1.1' is the first version that supported the
|
|
||||||
get_host_uptime() method.
|
|
||||||
|
|
||||||
|
|
||||||
Example 2) Adding a new parameter.
|
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
Adding a new parameter to an rpc method can be made backwards compatible. The
|
|
||||||
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
|
|
||||||
The implementation of the method must not expect the parameter to be present.::
|
|
||||||
|
|
||||||
def some_remote_method(self, arg1, arg2, newarg=None):
|
|
||||||
# The code needs to deal with newarg=None for cases
|
|
||||||
# where an older client sends a message without it.
|
|
||||||
pass
|
|
||||||
|
|
||||||
On the client side, the same changes should be made as in example 1. The
|
|
||||||
minimum version that supports the new parameter should be specified.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common.rpc import serializer as rpc_serializer
|
|
||||||
|
|
||||||
|
|
||||||
class RpcDispatcher(object):
|
|
||||||
"""Dispatch rpc messages according to the requested API version.
|
|
||||||
|
|
||||||
This class can be used as the top level 'manager' for a service. It
|
|
||||||
contains a list of underlying managers that have an API_VERSION attribute.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, callbacks, serializer=None):
|
|
||||||
"""Initialize the rpc dispatcher.
|
|
||||||
|
|
||||||
:param callbacks: List of proxy objects that are an instance
|
|
||||||
of a class with rpc methods exposed. Each proxy
|
|
||||||
object should have an RPC_API_VERSION attribute.
|
|
||||||
:param serializer: The Serializer object that will be used to
|
|
||||||
deserialize arguments before the method call and
|
|
||||||
to serialize the result after it returns.
|
|
||||||
"""
|
|
||||||
self.callbacks = callbacks
|
|
||||||
if serializer is None:
|
|
||||||
serializer = rpc_serializer.NoOpSerializer()
|
|
||||||
self.serializer = serializer
|
|
||||||
super(RpcDispatcher, self).__init__()
|
|
||||||
|
|
||||||
def _deserialize_args(self, context, kwargs):
|
|
||||||
"""Helper method called to deserialize args before dispatch.
|
|
||||||
|
|
||||||
This calls our serializer on each argument, returning a new set of
|
|
||||||
args that have been deserialized.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param kwargs: The arguments to be deserialized
|
|
||||||
:returns: A new set of deserialized args
|
|
||||||
"""
|
|
||||||
new_kwargs = dict()
|
|
||||||
for argname, arg in six.iteritems(kwargs):
|
|
||||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
|
||||||
arg)
|
|
||||||
return new_kwargs
|
|
||||||
|
|
||||||
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
|
||||||
"""Dispatch a message based on a requested version.
|
|
||||||
|
|
||||||
:param ctxt: The request context
|
|
||||||
:param version: The requested API version from the incoming message
|
|
||||||
:param method: The method requested to be called by the incoming
|
|
||||||
message.
|
|
||||||
:param namespace: The namespace for the requested method. If None,
|
|
||||||
the dispatcher will look for a method on a callback
|
|
||||||
object with no namespace set.
|
|
||||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
|
||||||
|
|
||||||
:returns: Whatever is returned by the underlying method that gets
|
|
||||||
called.
|
|
||||||
"""
|
|
||||||
if not version:
|
|
||||||
version = '1.0'
|
|
||||||
|
|
||||||
had_compatible = False
|
|
||||||
for proxyobj in self.callbacks:
|
|
||||||
# Check for namespace compatibility
|
|
||||||
try:
|
|
||||||
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
|
||||||
except AttributeError:
|
|
||||||
cb_namespace = None
|
|
||||||
|
|
||||||
if namespace != cb_namespace:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check for version compatibility
|
|
||||||
try:
|
|
||||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
|
||||||
except AttributeError:
|
|
||||||
rpc_api_version = '1.0'
|
|
||||||
|
|
||||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
|
||||||
version)
|
|
||||||
had_compatible = had_compatible or is_compatible
|
|
||||||
|
|
||||||
if not hasattr(proxyobj, method):
|
|
||||||
continue
|
|
||||||
if is_compatible:
|
|
||||||
kwargs = self._deserialize_args(ctxt, kwargs)
|
|
||||||
result = getattr(proxyobj, method)(ctxt, **kwargs)
|
|
||||||
return self.serializer.serialize_entity(ctxt, result)
|
|
||||||
|
|
||||||
if had_compatible:
|
|
||||||
raise AttributeError("No such RPC function '%s'" % method)
|
|
||||||
else:
|
|
||||||
raise rpc_common.UnsupportedRpcVersion(version=version)
|
|
@ -1,195 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Fake RPC implementation which calls proxy methods directly with no
|
|
||||||
queues. Casts will block, but this is very useful for tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
|
|
||||||
# jsonutils has some extra logic to automatically convert objects to primitive
|
|
||||||
# types so that they can be serialized. We want to catch all cases where
|
|
||||||
# non-primitive types make it into this code and treat it as an error.
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
CONSUMERS = {}
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
self._response = []
|
|
||||||
self._done = False
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
new_inst = self.__class__(**values)
|
|
||||||
new_inst._response = self._response
|
|
||||||
new_inst._done = self._done
|
|
||||||
return new_inst
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False):
|
|
||||||
if ending:
|
|
||||||
self._done = True
|
|
||||||
if not self._done:
|
|
||||||
self._response.append((reply, failure))
|
|
||||||
|
|
||||||
|
|
||||||
class Consumer(object):
|
|
||||||
def __init__(self, topic, proxy):
|
|
||||||
self.topic = topic
|
|
||||||
self.proxy = proxy
|
|
||||||
|
|
||||||
def call(self, context, version, method, namespace, args, timeout):
|
|
||||||
done = eventlet.event.Event()
|
|
||||||
|
|
||||||
def _inner():
|
|
||||||
ctxt = RpcContext.from_dict(context.to_dict())
|
|
||||||
try:
|
|
||||||
rval = self.proxy.dispatch(context, version, method,
|
|
||||||
namespace, **args)
|
|
||||||
res = []
|
|
||||||
# Caller might have called ctxt.reply() manually
|
|
||||||
for (reply, failure) in ctxt._response:
|
|
||||||
if failure:
|
|
||||||
six.reraise(failure[0], failure[1], failure[2])
|
|
||||||
res.append(reply)
|
|
||||||
# if ending not 'sent'...we might have more data to
|
|
||||||
# return from the function itself
|
|
||||||
if not ctxt._done:
|
|
||||||
if inspect.isgenerator(rval):
|
|
||||||
for val in rval:
|
|
||||||
res.append(val)
|
|
||||||
else:
|
|
||||||
res.append(rval)
|
|
||||||
done.send(res)
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
done.send_exception(e._exc_info[1])
|
|
||||||
except Exception as e:
|
|
||||||
done.send_exception(e)
|
|
||||||
|
|
||||||
thread = eventlet.greenthread.spawn(_inner)
|
|
||||||
|
|
||||||
if timeout:
|
|
||||||
start_time = time.time()
|
|
||||||
while not done.ready():
|
|
||||||
eventlet.greenthread.sleep(1)
|
|
||||||
cur_time = time.time()
|
|
||||||
if (cur_time - start_time) > timeout:
|
|
||||||
thread.kill()
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
|
|
||||||
return done.wait()
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
consumer = Consumer(topic, proxy)
|
|
||||||
self.consumers.append(consumer)
|
|
||||||
if topic not in CONSUMERS:
|
|
||||||
CONSUMERS[topic] = []
|
|
||||||
CONSUMERS[topic].append(consumer)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
for consumer in self.consumers:
|
|
||||||
CONSUMERS[consumer.topic].remove(consumer)
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return Connection()
|
|
||||||
|
|
||||||
|
|
||||||
def check_serialize(msg):
|
|
||||||
"""Make sure a message intended for rpc can be serialized."""
|
|
||||||
json.dumps(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
|
|
||||||
check_serialize(msg)
|
|
||||||
|
|
||||||
method = msg.get('method')
|
|
||||||
if not method:
|
|
||||||
return
|
|
||||||
args = msg.get('args', {})
|
|
||||||
version = msg.get('version')
|
|
||||||
namespace = msg.get('namespace')
|
|
||||||
|
|
||||||
try:
|
|
||||||
consumer = CONSUMERS[topic][0]
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
raise rpc_common.Timeout("No consumers available")
|
|
||||||
else:
|
|
||||||
return consumer.call(context, version, method, namespace, args,
|
|
||||||
timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
rv = multicall(conf, context, topic, msg, timeout)
|
|
||||||
# NOTE(vish): return the last result from the multicall
|
|
||||||
rv = list(rv)
|
|
||||||
if not rv:
|
|
||||||
return
|
|
||||||
return rv[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
check_serialize(msg)
|
|
||||||
try:
|
|
||||||
call(conf, context, topic, msg)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
check_serialize(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Cast to all consumers of a topic."""
|
|
||||||
check_serialize(msg)
|
|
||||||
method = msg.get('method')
|
|
||||||
if not method:
|
|
||||||
return
|
|
||||||
args = msg.get('args', {})
|
|
||||||
version = msg.get('version')
|
|
||||||
namespace = msg.get('namespace')
|
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
|
||||||
try:
|
|
||||||
consumer.call(context, version, method, namespace, args, None)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
@ -1,873 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import itertools
|
|
||||||
import socket
|
|
||||||
import ssl
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
import kombu
|
|
||||||
import kombu.connection
|
|
||||||
import kombu.entity
|
|
||||||
import kombu.messaging
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE, _LI
|
|
||||||
from heat.openstack.common import network_utils
|
|
||||||
from heat.openstack.common.rpc import amqp as rpc_amqp
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common import sslutils
|
|
||||||
|
|
||||||
kombu_opts = [
|
|
||||||
cfg.StrOpt('kombu_ssl_version',
|
|
||||||
default='',
|
|
||||||
help='If SSL is enabled, the SSL version to use. Valid '
|
|
||||||
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
|
|
||||||
'be available on some distributions.'
|
|
||||||
),
|
|
||||||
cfg.StrOpt('kombu_ssl_keyfile',
|
|
||||||
default='',
|
|
||||||
help='SSL key file (valid only if SSL enabled)'),
|
|
||||||
cfg.StrOpt('kombu_ssl_certfile',
|
|
||||||
default='',
|
|
||||||
help='SSL cert file (valid only if SSL enabled)'),
|
|
||||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
|
||||||
default='',
|
|
||||||
help='SSL certification authority file '
|
|
||||||
'(valid only if SSL enabled)'),
|
|
||||||
cfg.FloatOpt('kombu_reconnect_delay',
|
|
||||||
default=1.0,
|
|
||||||
help='How long to wait before reconnecting in response to an '
|
|
||||||
'AMQP consumer cancel notification.'),
|
|
||||||
cfg.StrOpt('rabbit_host',
|
|
||||||
default='localhost',
|
|
||||||
help='The RabbitMQ broker address where a single node is used'),
|
|
||||||
cfg.IntOpt('rabbit_port',
|
|
||||||
default=5672,
|
|
||||||
help='The RabbitMQ broker port where a single node is used'),
|
|
||||||
cfg.ListOpt('rabbit_hosts',
|
|
||||||
default=['$rabbit_host:$rabbit_port'],
|
|
||||||
help='RabbitMQ HA cluster host:port pairs'),
|
|
||||||
cfg.BoolOpt('rabbit_use_ssl',
|
|
||||||
default=False,
|
|
||||||
help='Connect over SSL for RabbitMQ'),
|
|
||||||
cfg.StrOpt('rabbit_userid',
|
|
||||||
default='guest',
|
|
||||||
help='The RabbitMQ userid'),
|
|
||||||
cfg.StrOpt('rabbit_password',
|
|
||||||
default='guest',
|
|
||||||
help='The RabbitMQ password',
|
|
||||||
secret=True),
|
|
||||||
cfg.StrOpt('rabbit_virtual_host',
|
|
||||||
default='/',
|
|
||||||
help='The RabbitMQ virtual host'),
|
|
||||||
cfg.IntOpt('rabbit_retry_interval',
|
|
||||||
default=1,
|
|
||||||
help='How frequently to retry connecting with RabbitMQ'),
|
|
||||||
cfg.IntOpt('rabbit_retry_backoff',
|
|
||||||
default=2,
|
|
||||||
help='How long to backoff for between retries when connecting '
|
|
||||||
'to RabbitMQ'),
|
|
||||||
cfg.IntOpt('rabbit_max_retries',
|
|
||||||
default=0,
|
|
||||||
help='Maximum number of RabbitMQ connection retries. '
|
|
||||||
'Default is 0 (infinite retry count)'),
|
|
||||||
cfg.BoolOpt('rabbit_ha_queues',
|
|
||||||
default=False,
|
|
||||||
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
|
|
||||||
'If you change this option, you must wipe the '
|
|
||||||
'RabbitMQ database.'),
|
|
||||||
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(kombu_opts)
|
|
||||||
|
|
||||||
LOG = rpc_common.LOG
|
|
||||||
|
|
||||||
|
|
||||||
def _get_queue_arguments(conf):
|
|
||||||
"""Construct the arguments for declaring a queue.
|
|
||||||
|
|
||||||
If the rabbit_ha_queues option is set, we declare a mirrored queue
|
|
||||||
as described here:
|
|
||||||
|
|
||||||
http://www.rabbitmq.com/ha.html
|
|
||||||
|
|
||||||
Setting x-ha-policy to all means that the queue will be mirrored
|
|
||||||
to all nodes in the cluster.
|
|
||||||
"""
|
|
||||||
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Consumer base class."""
|
|
||||||
|
|
||||||
def __init__(self, channel, callback, tag, **kwargs):
|
|
||||||
"""Declare a queue on an amqp channel.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
queue name, exchange name, and other kombu options are
|
|
||||||
passed in here as a dictionary.
|
|
||||||
"""
|
|
||||||
self.callback = callback
|
|
||||||
self.tag = str(tag)
|
|
||||||
self.kwargs = kwargs
|
|
||||||
self.queue = None
|
|
||||||
self.ack_on_error = kwargs.get('ack_on_error', True)
|
|
||||||
self.reconnect(channel)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
"""Re-declare the queue after a rabbit reconnect."""
|
|
||||||
self.channel = channel
|
|
||||||
self.kwargs['channel'] = channel
|
|
||||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
|
||||||
self.queue.declare()
|
|
||||||
|
|
||||||
def _callback_handler(self, message, callback):
|
|
||||||
"""Call callback with deserialized message.
|
|
||||||
|
|
||||||
Messages that are processed without exception are ack'ed.
|
|
||||||
|
|
||||||
If the message processing generates an exception, it will be
|
|
||||||
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
msg = rpc_common.deserialize_msg(message.payload)
|
|
||||||
callback(msg)
|
|
||||||
except Exception:
|
|
||||||
if self.ack_on_error:
|
|
||||||
LOG.exception(_LE("Failed to process message"
|
|
||||||
" ... skipping it."))
|
|
||||||
message.ack()
|
|
||||||
else:
|
|
||||||
LOG.exception(_LE("Failed to process message"
|
|
||||||
" ... will requeue."))
|
|
||||||
message.requeue()
|
|
||||||
else:
|
|
||||||
message.ack()
|
|
||||||
|
|
||||||
def consume(self, *args, **kwargs):
|
|
||||||
"""Actually declare the consumer on the amqp channel. This will
|
|
||||||
start the flow of messages from the queue. Using the
|
|
||||||
Connection.iterconsume() iterator will process the messages,
|
|
||||||
calling the appropriate callback.
|
|
||||||
|
|
||||||
If a callback is specified in kwargs, use that. Otherwise,
|
|
||||||
use the callback passed during __init__()
|
|
||||||
|
|
||||||
If kwargs['nowait'] is True, then this call will block until
|
|
||||||
a message is read.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
options = {'consumer_tag': self.tag}
|
|
||||||
options['nowait'] = kwargs.get('nowait', False)
|
|
||||||
callback = kwargs.get('callback', self.callback)
|
|
||||||
if not callback:
|
|
||||||
raise ValueError("No callback defined")
|
|
||||||
|
|
||||||
def _callback(raw_message):
|
|
||||||
message = self.channel.message_to_python(raw_message)
|
|
||||||
self._callback_handler(message, callback)
|
|
||||||
|
|
||||||
self.queue.consume(*args, callback=_callback, **options)
|
|
||||||
|
|
||||||
def cancel(self):
|
|
||||||
"""Cancel the consuming from the queue, if it has started."""
|
|
||||||
try:
|
|
||||||
self.queue.cancel(self.tag)
|
|
||||||
except KeyError as e:
|
|
||||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
|
||||||
if str(e) != "u'%s'" % self.tag:
|
|
||||||
raise
|
|
||||||
self.queue = None
|
|
||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
|
||||||
"""Queue/consumer class for 'direct'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
|
||||||
"""Init a 'direct' queue.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'msg_id' is the msg_id to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
Other kombu options may be passed
|
|
||||||
"""
|
|
||||||
# Default options
|
|
||||||
options = {'durable': False,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange = kombu.entity.Exchange(name=msg_id,
|
|
||||||
type='direct',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(DirectConsumer, self).__init__(channel,
|
|
||||||
callback,
|
|
||||||
tag,
|
|
||||||
name=msg_id,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=msg_id,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'topic'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
|
||||||
exchange_name=None, **kwargs):
|
|
||||||
"""Init a 'topic' queue.
|
|
||||||
|
|
||||||
:param channel: the amqp channel to use
|
|
||||||
:param topic: the topic to listen on
|
|
||||||
:paramtype topic: str
|
|
||||||
:param callback: the callback to call when messages are received
|
|
||||||
:param tag: a unique ID for the consumer on the channel
|
|
||||||
:param name: optional queue name, defaults to topic
|
|
||||||
:paramtype name: str
|
|
||||||
|
|
||||||
Other kombu options may be passed as keyword arguments
|
|
||||||
"""
|
|
||||||
# Default options
|
|
||||||
options = {'durable': conf.amqp_durable_queues,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': conf.amqp_auto_delete,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
|
||||||
exchange = kombu.entity.Exchange(name=exchange_name,
|
|
||||||
type='topic',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(TopicConsumer, self).__init__(channel,
|
|
||||||
callback,
|
|
||||||
tag,
|
|
||||||
name=name or topic,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=topic,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'fanout'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
|
||||||
"""Init a 'fanout' queue.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'topic' is the topic to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
Other kombu options may be passed
|
|
||||||
"""
|
|
||||||
unique = uuid.uuid4().hex
|
|
||||||
exchange_name = '%s_fanout' % topic
|
|
||||||
queue_name = '%s_fanout_%s' % (topic, unique)
|
|
||||||
|
|
||||||
# Default options
|
|
||||||
options = {'durable': False,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(FanoutConsumer, self).__init__(channel, callback, tag,
|
|
||||||
name=queue_name,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=topic,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
|
||||||
"""Base Publisher class."""
|
|
||||||
|
|
||||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
|
||||||
and other options
|
|
||||||
"""
|
|
||||||
self.exchange_name = exchange_name
|
|
||||||
self.routing_key = routing_key
|
|
||||||
self.kwargs = kwargs
|
|
||||||
self.reconnect(channel)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
"""Re-establish the Producer after a rabbit reconnection."""
|
|
||||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
|
||||||
**self.kwargs)
|
|
||||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
|
||||||
channel=channel,
|
|
||||||
routing_key=self.routing_key)
|
|
||||||
|
|
||||||
def send(self, msg, timeout=None):
|
|
||||||
"""Send a message."""
|
|
||||||
if timeout:
|
|
||||||
#
|
|
||||||
# AMQP TTL is in milliseconds when set in the header.
|
|
||||||
#
|
|
||||||
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
|
|
||||||
else:
|
|
||||||
self.producer.publish(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
|
||||||
"""Publisher class for 'direct'."""
|
|
||||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
|
||||||
"""init a 'direct' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
|
|
||||||
options = {'durable': False,
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
|
||||||
type='direct', **options)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
|
||||||
"""Publisher class for 'topic'."""
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
"""init a 'topic' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
options = {'durable': conf.amqp_durable_queues,
|
|
||||||
'auto_delete': conf.amqp_auto_delete,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
super(TopicPublisher, self).__init__(channel,
|
|
||||||
exchange_name,
|
|
||||||
topic,
|
|
||||||
type='topic',
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
|
||||||
"""Publisher class for 'fanout'."""
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
"""init a 'fanout' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
options = {'durable': False,
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
|
||||||
None, type='fanout', **options)
|
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(TopicPublisher):
|
|
||||||
"""Publisher class for 'notify'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
|
|
||||||
self.queue_arguments = _get_queue_arguments(conf)
|
|
||||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
super(NotifyPublisher, self).reconnect(channel)
|
|
||||||
|
|
||||||
# NOTE(jerdfelt): Normally the consumer would create the queue, but
|
|
||||||
# we do this to ensure that messages don't get dropped if the
|
|
||||||
# consumer is started after we do
|
|
||||||
queue = kombu.entity.Queue(channel=channel,
|
|
||||||
exchange=self.exchange,
|
|
||||||
durable=self.durable,
|
|
||||||
name=self.routing_key,
|
|
||||||
routing_key=self.routing_key,
|
|
||||||
queue_arguments=self.queue_arguments)
|
|
||||||
queue.declare()
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
pool = None
|
|
||||||
|
|
||||||
def __init__(self, conf, server_params=None):
|
|
||||||
self.consumers = []
|
|
||||||
self.consumer_thread = None
|
|
||||||
self.proxy_callbacks = []
|
|
||||||
self.conf = conf
|
|
||||||
self.max_retries = self.conf.rabbit_max_retries
|
|
||||||
# Try forever?
|
|
||||||
if self.max_retries <= 0:
|
|
||||||
self.max_retries = None
|
|
||||||
self.interval_start = self.conf.rabbit_retry_interval
|
|
||||||
self.interval_stepping = self.conf.rabbit_retry_backoff
|
|
||||||
# max retry-interval = 30 seconds
|
|
||||||
self.interval_max = 30
|
|
||||||
self.memory_transport = False
|
|
||||||
|
|
||||||
if server_params is None:
|
|
||||||
server_params = {}
|
|
||||||
# Keys to translate from server_params to kombu params
|
|
||||||
server_params_to_kombu_params = {'username': 'userid'}
|
|
||||||
|
|
||||||
ssl_params = self._fetch_ssl_params()
|
|
||||||
params_list = []
|
|
||||||
for adr in self.conf.rabbit_hosts:
|
|
||||||
hostname, port = network_utils.parse_host_port(
|
|
||||||
adr, default_port=self.conf.rabbit_port)
|
|
||||||
|
|
||||||
params = {
|
|
||||||
'hostname': hostname,
|
|
||||||
'port': port,
|
|
||||||
'userid': self.conf.rabbit_userid,
|
|
||||||
'password': self.conf.rabbit_password,
|
|
||||||
'virtual_host': self.conf.rabbit_virtual_host,
|
|
||||||
}
|
|
||||||
|
|
||||||
for sp_key, value in six.iteritems(server_params):
|
|
||||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
|
||||||
params[p_key] = value
|
|
||||||
|
|
||||||
if self.conf.fake_rabbit:
|
|
||||||
params['transport'] = 'memory'
|
|
||||||
if self.conf.rabbit_use_ssl:
|
|
||||||
params['ssl'] = ssl_params
|
|
||||||
|
|
||||||
params_list.append(params)
|
|
||||||
|
|
||||||
self.params_list = params_list
|
|
||||||
|
|
||||||
brokers_count = len(self.params_list)
|
|
||||||
self.next_broker_indices = itertools.cycle(range(brokers_count))
|
|
||||||
|
|
||||||
self.memory_transport = self.conf.fake_rabbit
|
|
||||||
|
|
||||||
self.connection = None
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def _fetch_ssl_params(self):
|
|
||||||
"""Handles fetching what ssl params should be used for the connection
|
|
||||||
(if any).
|
|
||||||
"""
|
|
||||||
ssl_params = dict()
|
|
||||||
|
|
||||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
|
||||||
if self.conf.kombu_ssl_version:
|
|
||||||
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
|
|
||||||
self.conf.kombu_ssl_version)
|
|
||||||
if self.conf.kombu_ssl_keyfile:
|
|
||||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
|
||||||
if self.conf.kombu_ssl_certfile:
|
|
||||||
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
|
|
||||||
if self.conf.kombu_ssl_ca_certs:
|
|
||||||
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
|
|
||||||
# We might want to allow variations in the
|
|
||||||
# future with this?
|
|
||||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
|
||||||
|
|
||||||
# Return the extended behavior or just have the default behavior
|
|
||||||
return ssl_params or True
|
|
||||||
|
|
||||||
def _connect(self, params):
|
|
||||||
"""Connect to rabbit. Re-establish any queues that may have
|
|
||||||
been declared before if we are reconnecting. Exceptions should
|
|
||||||
be handled by the caller.
|
|
||||||
"""
|
|
||||||
if self.connection:
|
|
||||||
LOG.info(_LI("Reconnecting to AMQP server on "
|
|
||||||
"%(hostname)s:%(port)d") % params)
|
|
||||||
try:
|
|
||||||
# XXX(nic): when reconnecting to a RabbitMQ cluster
|
|
||||||
# with mirrored queues in use, the attempt to release the
|
|
||||||
# connection can hang "indefinitely" somewhere deep down
|
|
||||||
# in Kombu. Blocking the thread for a bit prior to
|
|
||||||
# release seems to kludge around the problem where it is
|
|
||||||
# otherwise reproduceable.
|
|
||||||
if self.conf.kombu_reconnect_delay > 0:
|
|
||||||
LOG.info(_("Delaying reconnect for %1.1f seconds...") %
|
|
||||||
self.conf.kombu_reconnect_delay)
|
|
||||||
time.sleep(self.conf.kombu_reconnect_delay)
|
|
||||||
|
|
||||||
self.connection.release()
|
|
||||||
except self.connection_errors:
|
|
||||||
pass
|
|
||||||
# Setting this in case the next statement fails, though
|
|
||||||
# it shouldn't be doing any network operations, yet.
|
|
||||||
self.connection = None
|
|
||||||
self.connection = kombu.connection.BrokerConnection(**params)
|
|
||||||
self.connection_errors = self.connection.connection_errors
|
|
||||||
if self.memory_transport:
|
|
||||||
# Kludge to speed up tests.
|
|
||||||
self.connection.transport.polling_interval = 0.0
|
|
||||||
self.consumer_num = itertools.count(1)
|
|
||||||
self.connection.connect()
|
|
||||||
self.channel = self.connection.channel()
|
|
||||||
# work around 'memory' transport bug in 1.1.3
|
|
||||||
if self.memory_transport:
|
|
||||||
self.channel._new_queue('ae.undeliver')
|
|
||||||
for consumer in self.consumers:
|
|
||||||
consumer.reconnect(self.channel)
|
|
||||||
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
|
|
||||||
params)
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Handles reconnecting and re-establishing queues.
|
|
||||||
Will retry up to self.max_retries number of times.
|
|
||||||
self.max_retries = 0 means to retry forever.
|
|
||||||
Sleep between tries, starting at self.interval_start
|
|
||||||
seconds, backing off self.interval_stepping number of seconds
|
|
||||||
each attempt.
|
|
||||||
"""
|
|
||||||
|
|
||||||
attempt = 0
|
|
||||||
while True:
|
|
||||||
params = self.params_list[next(self.next_broker_indices)]
|
|
||||||
attempt += 1
|
|
||||||
try:
|
|
||||||
self._connect(params)
|
|
||||||
return
|
|
||||||
except (IOError, self.connection_errors) as e:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
|
||||||
# to return an error not covered by its transport
|
|
||||||
# connection_errors in the case of a timeout waiting for
|
|
||||||
# a protocol response. (See paste link in LP888621)
|
|
||||||
# So, we check all exceptions for 'timeout' in them
|
|
||||||
# and try to reconnect in this case.
|
|
||||||
if 'timeout' not in str(e):
|
|
||||||
raise
|
|
||||||
|
|
||||||
log_info = {}
|
|
||||||
log_info['err_str'] = e
|
|
||||||
log_info['max_retries'] = self.max_retries
|
|
||||||
log_info.update(params)
|
|
||||||
|
|
||||||
if self.max_retries and attempt == self.max_retries:
|
|
||||||
msg = _('Unable to connect to AMQP server on '
|
|
||||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
|
||||||
'tries: %(err_str)s') % log_info
|
|
||||||
LOG.error(msg)
|
|
||||||
raise rpc_common.RPCException(msg)
|
|
||||||
|
|
||||||
if attempt == 1:
|
|
||||||
sleep_time = self.interval_start or 1
|
|
||||||
elif attempt > 1:
|
|
||||||
sleep_time += self.interval_stepping
|
|
||||||
if self.interval_max:
|
|
||||||
sleep_time = min(sleep_time, self.interval_max)
|
|
||||||
|
|
||||||
log_info['sleep_time'] = sleep_time
|
|
||||||
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
|
|
||||||
'unreachable: %(err_str)s. Trying again in '
|
|
||||||
'%(sleep_time)d seconds.') % log_info)
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return method(*args, **kwargs)
|
|
||||||
except (self.connection_errors, socket.timeout, IOError) as e:
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
except Exception as e:
|
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
|
||||||
# to return an error not covered by its transport
|
|
||||||
# connection_errors in the case of a timeout waiting for
|
|
||||||
# a protocol response. (See paste link in LP888621)
|
|
||||||
# So, we check all exceptions for 'timeout' in them
|
|
||||||
# and try to reconnect in this case.
|
|
||||||
if 'timeout' not in str(e):
|
|
||||||
raise
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def get_channel(self):
|
|
||||||
"""Convenience call for bin/clear_rabbit_queues."""
|
|
||||||
return self.channel
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close/release this connection."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.connection.release()
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
"""Reset a connection so it can be used again."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.channel.close()
|
|
||||||
self.channel = self.connection.channel()
|
|
||||||
# work around 'memory' transport bug in 1.1.3
|
|
||||||
if self.memory_transport:
|
|
||||||
self.channel._new_queue('ae.undeliver')
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def declare_consumer(self, consumer_cls, topic, callback):
|
|
||||||
"""Create a Consumer using the class that was passed in and
|
|
||||||
add it to our list of consumers
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': exc}
|
|
||||||
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
|
||||||
"%(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _declare_consumer():
|
|
||||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
|
||||||
six.next(self.consumer_num))
|
|
||||||
self.consumers.append(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _declare_consumer)
|
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
|
||||||
"""Return an iterator that will consume from all queues/consumers."""
|
|
||||||
|
|
||||||
info = {'do_consume': True}
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
if isinstance(exc, socket.timeout):
|
|
||||||
LOG.debug('Timed out waiting for RPC response: %s' %
|
|
||||||
exc)
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
else:
|
|
||||||
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
|
||||||
exc)
|
|
||||||
info['do_consume'] = True
|
|
||||||
|
|
||||||
def _consume():
|
|
||||||
if info['do_consume']:
|
|
||||||
queues_head = self.consumers[:-1] # not fanout.
|
|
||||||
queues_tail = self.consumers[-1] # fanout
|
|
||||||
for queue in queues_head:
|
|
||||||
queue.consume(nowait=True)
|
|
||||||
queues_tail.consume(nowait=False)
|
|
||||||
info['do_consume'] = False
|
|
||||||
return self.connection.drain_events(timeout=timeout)
|
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
|
||||||
if limit and iteration >= limit:
|
|
||||||
raise StopIteration
|
|
||||||
yield self.ensure(_error_callback, _consume)
|
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
|
||||||
"""Cancel a consumer thread."""
|
|
||||||
if self.consumer_thread is not None:
|
|
||||||
self.consumer_thread.kill()
|
|
||||||
try:
|
|
||||||
self.consumer_thread.wait()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
pass
|
|
||||||
self.consumer_thread = None
|
|
||||||
|
|
||||||
def wait_on_proxy_callbacks(self):
|
|
||||||
"""Wait for all proxy callback threads to exit."""
|
|
||||||
for proxy_cb in self.proxy_callbacks:
|
|
||||||
proxy_cb.wait()
|
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
|
||||||
"""Send to a publisher based on the publisher class."""
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': exc}
|
|
||||||
LOG.exception(_LE("Failed to publish message to topic "
|
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _publish():
|
|
||||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
|
||||||
publisher.send(msg, timeout)
|
|
||||||
|
|
||||||
self.ensure(_error_callback, _publish)
|
|
||||||
|
|
||||||
def declare_direct_consumer(self, topic, callback):
|
|
||||||
"""Create a 'direct' queue.
|
|
||||||
In nova's use, this is generally a msg_id queue used for
|
|
||||||
responses for call/multicall
|
|
||||||
"""
|
|
||||||
self.declare_consumer(DirectConsumer, topic, callback)
|
|
||||||
|
|
||||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Create a 'topic' consumer."""
|
|
||||||
self.declare_consumer(functools.partial(TopicConsumer,
|
|
||||||
name=queue_name,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
ack_on_error=ack_on_error,
|
|
||||||
),
|
|
||||||
topic, callback)
|
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
|
||||||
"""Create a 'fanout' consumer."""
|
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
|
||||||
"""Send a 'direct' message."""
|
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
|
||||||
"""Send a 'topic' message."""
|
|
||||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
|
||||||
"""Send a 'fanout' message."""
|
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
|
||||||
"""Send a notify message on a topic."""
|
|
||||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
|
||||||
|
|
||||||
def consume(self, limit=None):
|
|
||||||
"""Consume from all queues/consumers."""
|
|
||||||
it = self.iterconsume(limit=limit)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
six.next(it)
|
|
||||||
except StopIteration:
|
|
||||||
return
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Consumer from all queues/consumers in a greenthread."""
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consumer_thread():
|
|
||||||
try:
|
|
||||||
self.consume()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
return
|
|
||||||
if self.consumer_thread is None:
|
|
||||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
|
||||||
return self.consumer_thread
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
if fanout:
|
|
||||||
self.declare_fanout_consumer(topic, proxy_cb)
|
|
||||||
else:
|
|
||||||
self.declare_topic_consumer(topic, proxy_cb)
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
"""
|
|
||||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
|
||||||
conf=self.conf,
|
|
||||||
callback=callback,
|
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
|
||||||
Connection),
|
|
||||||
wait_for_consumers=not ack_on_error
|
|
||||||
)
|
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
|
||||||
self.declare_topic_consumer(
|
|
||||||
queue_name=pool_name,
|
|
||||||
topic=topic,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
callback=callback_wrapper,
|
|
||||||
ack_on_error=ack_on_error,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return rpc_amqp.create_connection(
|
|
||||||
conf, new,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
return rpc_amqp.multicall(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
return rpc_amqp.call(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
return rpc_amqp.cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
return rpc_amqp.fanout_cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
return rpc_amqp.cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
return rpc_amqp.fanout_cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
return rpc_amqp.notify(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection),
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
return rpc_amqp.cleanup(Connection.pool)
|
|
@ -1,823 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import itertools
|
|
||||||
import time
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE, _LI
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import jsonutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common.rpc import amqp as rpc_amqp
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
qpid_codec = importutils.try_import("qpid.codec010")
|
|
||||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
|
||||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
qpid_opts = [
|
|
||||||
cfg.StrOpt('qpid_hostname',
|
|
||||||
default='localhost',
|
|
||||||
help='Qpid broker hostname'),
|
|
||||||
cfg.IntOpt('qpid_port',
|
|
||||||
default=5672,
|
|
||||||
help='Qpid broker port'),
|
|
||||||
cfg.ListOpt('qpid_hosts',
|
|
||||||
default=['$qpid_hostname:$qpid_port'],
|
|
||||||
help='Qpid HA cluster host:port pairs'),
|
|
||||||
cfg.StrOpt('qpid_username',
|
|
||||||
default='',
|
|
||||||
help='Username for qpid connection'),
|
|
||||||
cfg.StrOpt('qpid_password',
|
|
||||||
default='',
|
|
||||||
help='Password for qpid connection',
|
|
||||||
secret=True),
|
|
||||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
|
||||||
default='',
|
|
||||||
help='Space separated list of SASL mechanisms to use for auth'),
|
|
||||||
cfg.IntOpt('qpid_heartbeat',
|
|
||||||
default=60,
|
|
||||||
help='Seconds between connection keepalive heartbeats'),
|
|
||||||
cfg.StrOpt('qpid_protocol',
|
|
||||||
default='tcp',
|
|
||||||
help="Transport to use, either 'tcp' or 'ssl'"),
|
|
||||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
|
||||||
default=True,
|
|
||||||
help='Disable Nagle algorithm'),
|
|
||||||
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
|
|
||||||
# this file could probably use some additional refactoring so that the
|
|
||||||
# differences between each version are split into different classes.
|
|
||||||
cfg.IntOpt('qpid_topology_version',
|
|
||||||
default=1,
|
|
||||||
help="The qpid topology version to use. Version 1 is what "
|
|
||||||
"was originally used by impl_qpid. Version 2 includes "
|
|
||||||
"some backwards-incompatible changes that allow broker "
|
|
||||||
"federation to work. Users should update to version 2 "
|
|
||||||
"when they are able to take everything down, as it "
|
|
||||||
"requires a clean break."),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(qpid_opts)
|
|
||||||
|
|
||||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
|
||||||
|
|
||||||
|
|
||||||
def raise_invalid_topology_version(conf):
|
|
||||||
msg = (_("Invalid value for qpid_topology_version: %d") %
|
|
||||||
conf.qpid_topology_version)
|
|
||||||
LOG.error(msg)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Consumer base class."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, callback, node_name, node_opts,
|
|
||||||
link_name, link_opts):
|
|
||||||
"""Declare a queue on an amqp session.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'node_name' is the first part of the Qpid address string, before ';'
|
|
||||||
'node_opts' will be applied to the "x-declare" section of "node"
|
|
||||||
in the address string.
|
|
||||||
'link_name' goes into the "name" field of the "link" in the address
|
|
||||||
string
|
|
||||||
'link_opts' will be applied to the "x-declare" section of "link"
|
|
||||||
in the address string.
|
|
||||||
"""
|
|
||||||
self.callback = callback
|
|
||||||
self.receiver = None
|
|
||||||
self.session = None
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
addr_opts = {
|
|
||||||
"create": "always",
|
|
||||||
"node": {
|
|
||||||
"type": "topic",
|
|
||||||
"x-declare": {
|
|
||||||
"durable": True,
|
|
||||||
"auto-delete": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"link": {
|
|
||||||
"durable": True,
|
|
||||||
"x-declare": {
|
|
||||||
"durable": False,
|
|
||||||
"auto-delete": True,
|
|
||||||
"exclusive": False,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
addr_opts = {
|
|
||||||
"link": {
|
|
||||||
"x-declare": {
|
|
||||||
"auto-delete": True,
|
|
||||||
"exclusive": False,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
addr_opts["link"]["x-declare"].update(link_opts)
|
|
||||||
if link_name:
|
|
||||||
addr_opts["link"]["name"] = link_name
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
|
||||||
|
|
||||||
self.connect(session)
|
|
||||||
|
|
||||||
def connect(self, session):
|
|
||||||
"""Declare the receiver on connect."""
|
|
||||||
self._declare_receiver(session)
|
|
||||||
|
|
||||||
def reconnect(self, session):
|
|
||||||
"""Re-declare the receiver after a qpid reconnect."""
|
|
||||||
self._declare_receiver(session)
|
|
||||||
|
|
||||||
def _declare_receiver(self, session):
|
|
||||||
self.session = session
|
|
||||||
self.receiver = session.receiver(self.address)
|
|
||||||
self.receiver.capacity = 1
|
|
||||||
|
|
||||||
def _unpack_json_msg(self, msg):
|
|
||||||
"""Load the JSON data in msg if msg.content_type indicates that it
|
|
||||||
is necessary. Put the loaded data back into msg.content and
|
|
||||||
update msg.content_type appropriately.
|
|
||||||
|
|
||||||
A Qpid Message containing a dict will have a content_type of
|
|
||||||
'amqp/map', whereas one containing a string that needs to be converted
|
|
||||||
back from JSON will have a content_type of JSON_CONTENT_TYPE.
|
|
||||||
|
|
||||||
:param msg: a Qpid Message object
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
if msg.content_type == JSON_CONTENT_TYPE:
|
|
||||||
msg.content = jsonutils.loads(msg.content)
|
|
||||||
msg.content_type = 'amqp/map'
|
|
||||||
|
|
||||||
def consume(self):
|
|
||||||
"""Fetch the message and pass it to the callback object."""
|
|
||||||
message = self.receiver.fetch()
|
|
||||||
try:
|
|
||||||
self._unpack_json_msg(message)
|
|
||||||
msg = rpc_common.deserialize_msg(message.content)
|
|
||||||
self.callback(msg)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Failed to process message... skipping it."))
|
|
||||||
finally:
|
|
||||||
# TODO(sandy): Need support for optional ack_on_error.
|
|
||||||
self.session.acknowledge(message)
|
|
||||||
|
|
||||||
def get_receiver(self):
|
|
||||||
return self.receiver
|
|
||||||
|
|
||||||
def get_node_name(self):
|
|
||||||
return self.address.split(';')[0]
|
|
||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
|
||||||
"""Queue/consumer class for 'direct'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, msg_id, callback):
|
|
||||||
"""Init a 'direct' queue.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'msg_id' is the msg_id to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
"""
|
|
||||||
|
|
||||||
link_opts = {
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
|
||||||
"exclusive": True,
|
|
||||||
"durable": conf.amqp_durable_queues,
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (msg_id, msg_id)
|
|
||||||
node_opts = {"type": "direct"}
|
|
||||||
link_name = msg_id
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.direct/%s" % msg_id
|
|
||||||
node_opts = {}
|
|
||||||
link_name = msg_id
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(DirectConsumer, self).__init__(conf, session, callback,
|
|
||||||
node_name, node_opts, link_name,
|
|
||||||
link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'topic'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback, name=None,
|
|
||||||
exchange_name=None):
|
|
||||||
"""Init a 'topic' queue.
|
|
||||||
|
|
||||||
:param session: the amqp session to use
|
|
||||||
:param topic: is the topic to listen on
|
|
||||||
:paramtype topic: str
|
|
||||||
:param callback: the callback to call when messages are received
|
|
||||||
:param name: optional queue name, defaults to topic
|
|
||||||
"""
|
|
||||||
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
|
||||||
link_opts = {
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
|
||||||
"durable": conf.amqp_durable_queues,
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
|
|
||||||
{}, name or topic, link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'fanout'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback):
|
|
||||||
"""Init a 'fanout' queue.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'topic' is the topic to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
"""
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
link_opts = {"exclusive": True}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s_fanout" % topic
|
|
||||||
node_opts = {"durable": False, "type": "fanout"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/fanout/%s" % topic
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(FanoutConsumer, self).__init__(conf, session, callback,
|
|
||||||
node_name, node_opts, None,
|
|
||||||
link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
|
||||||
"""Base Publisher class."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, node_name, node_opts=None):
|
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
|
||||||
and other options
|
|
||||||
"""
|
|
||||||
self.sender = None
|
|
||||||
self.session = session
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
addr_opts = {
|
|
||||||
"create": "always",
|
|
||||||
"node": {
|
|
||||||
"type": "topic",
|
|
||||||
"x-declare": {
|
|
||||||
"durable": False,
|
|
||||||
# auto-delete isn't implemented for exchanges in qpid,
|
|
||||||
# but put in here anyway
|
|
||||||
"auto-delete": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if node_opts:
|
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
self.address = node_name
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
self.reconnect(session)
|
|
||||||
|
|
||||||
def reconnect(self, session):
|
|
||||||
"""Re-establish the Sender after a reconnection."""
|
|
||||||
self.sender = session.sender(self.address)
|
|
||||||
|
|
||||||
def _pack_json_msg(self, msg):
|
|
||||||
"""Qpid cannot serialize dicts containing strings longer than 65535
|
|
||||||
characters. This function dumps the message content to a JSON
|
|
||||||
string, which Qpid is able to handle.
|
|
||||||
|
|
||||||
:param msg: May be either a Qpid Message object or a bare dict.
|
|
||||||
:returns: A Qpid Message with its content field JSON encoded.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
msg.content = jsonutils.dumps(msg.content)
|
|
||||||
except AttributeError:
|
|
||||||
# Need to have a Qpid message so we can set the content_type.
|
|
||||||
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
|
||||||
msg.content_type = JSON_CONTENT_TYPE
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def send(self, msg):
|
|
||||||
"""Send a message."""
|
|
||||||
try:
|
|
||||||
# Check if Qpid can encode the message
|
|
||||||
check_msg = msg
|
|
||||||
if not hasattr(check_msg, 'content_type'):
|
|
||||||
check_msg = qpid_messaging.Message(msg)
|
|
||||||
content_type = check_msg.content_type
|
|
||||||
enc, dec = qpid_messaging.message.get_codec(content_type)
|
|
||||||
enc(check_msg.content)
|
|
||||||
except qpid_codec.CodecException:
|
|
||||||
# This means the message couldn't be serialized as a dict.
|
|
||||||
msg = self._pack_json_msg(msg)
|
|
||||||
self.sender.send(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
|
||||||
"""Publisher class for 'direct'."""
|
|
||||||
def __init__(self, conf, session, msg_id):
|
|
||||||
"""Init a 'direct' publisher."""
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (msg_id, msg_id)
|
|
||||||
node_opts = {"type": "direct"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.direct/%s" % msg_id
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(DirectPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
|
||||||
"""Publisher class for 'topic'."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'topic' publisher.
|
|
||||||
"""
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(TopicPublisher, self).__init__(conf, session, node_name)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
|
||||||
"""Publisher class for 'fanout'."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'fanout' publisher.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s_fanout" % topic
|
|
||||||
node_opts = {"type": "fanout"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/fanout/%s" % topic
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(FanoutPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(Publisher):
|
|
||||||
"""Publisher class for notifications."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'topic' publisher.
|
|
||||||
"""
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
node_opts = {"durable": True}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(NotifyPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
pool = None
|
|
||||||
|
|
||||||
def __init__(self, conf, server_params=None):
|
|
||||||
if not qpid_messaging:
|
|
||||||
raise ImportError("Failed to import qpid.messaging")
|
|
||||||
|
|
||||||
self.connection = None
|
|
||||||
self.session = None
|
|
||||||
self.consumers = {}
|
|
||||||
self.consumer_thread = None
|
|
||||||
self.proxy_callbacks = []
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
if server_params and 'hostname' in server_params:
|
|
||||||
# NOTE(russellb) This enables support for cast_to_server.
|
|
||||||
server_params['qpid_hosts'] = [
|
|
||||||
'%s:%d' % (server_params['hostname'],
|
|
||||||
server_params.get('port', 5672))
|
|
||||||
]
|
|
||||||
|
|
||||||
params = {
|
|
||||||
'qpid_hosts': self.conf.qpid_hosts,
|
|
||||||
'username': self.conf.qpid_username,
|
|
||||||
'password': self.conf.qpid_password,
|
|
||||||
}
|
|
||||||
params.update(server_params or {})
|
|
||||||
|
|
||||||
self.brokers = params['qpid_hosts']
|
|
||||||
self.username = params['username']
|
|
||||||
self.password = params['password']
|
|
||||||
|
|
||||||
brokers_count = len(self.brokers)
|
|
||||||
self.next_broker_indices = itertools.cycle(range(brokers_count))
|
|
||||||
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def connection_create(self, broker):
|
|
||||||
# Create the connection - this does not open the connection
|
|
||||||
self.connection = qpid_messaging.Connection(broker)
|
|
||||||
|
|
||||||
# Check if flags are set and if so set them for the connection
|
|
||||||
# before we call open
|
|
||||||
self.connection.username = self.username
|
|
||||||
self.connection.password = self.password
|
|
||||||
|
|
||||||
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
|
|
||||||
# Reconnection is done by self.reconnect()
|
|
||||||
self.connection.reconnect = False
|
|
||||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
|
||||||
self.connection.transport = self.conf.qpid_protocol
|
|
||||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
|
||||||
|
|
||||||
def _register_consumer(self, consumer):
|
|
||||||
self.consumers[str(consumer.get_receiver())] = consumer
|
|
||||||
|
|
||||||
def _lookup_consumer(self, receiver):
|
|
||||||
return self.consumers[str(receiver)]
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Handles reconnecting and re-establishing sessions and queues."""
|
|
||||||
delay = 1
|
|
||||||
while True:
|
|
||||||
# Close the session if necessary
|
|
||||||
if self.connection is not None and self.connection.opened():
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except qpid_exceptions.MessagingError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
broker = self.brokers[next(self.next_broker_indices)]
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.connection_create(broker)
|
|
||||||
self.connection.open()
|
|
||||||
except qpid_exceptions.MessagingError as e:
|
|
||||||
msg_dict = dict(e=e, delay=delay)
|
|
||||||
msg = _LE("Unable to connect to AMQP server: %(e)s. "
|
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
|
||||||
LOG.error(msg)
|
|
||||||
time.sleep(delay)
|
|
||||||
delay = min(delay + 1, 5)
|
|
||||||
else:
|
|
||||||
LOG.info(_LI('Connected to AMQP server on %s'), broker)
|
|
||||||
break
|
|
||||||
|
|
||||||
self.session = self.connection.session()
|
|
||||||
|
|
||||||
if self.consumers:
|
|
||||||
consumers = self.consumers
|
|
||||||
self.consumers = {}
|
|
||||||
|
|
||||||
for consumer in six.itervalues(consumers):
|
|
||||||
consumer.reconnect(self.session)
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
LOG.debug("Re-established AMQP queues")
|
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return method(*args, **kwargs)
|
|
||||||
except (qpid_exceptions.Empty,
|
|
||||||
qpid_exceptions.MessagingError) as e:
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close/release this connection."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except Exception:
|
|
||||||
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
|
||||||
# causes confusion; there's really nothing useful we can do with
|
|
||||||
# them.
|
|
||||||
pass
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
"""Reset a connection so it can be used again."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.session.close()
|
|
||||||
self.session = self.connection.session()
|
|
||||||
self.consumers = {}
|
|
||||||
|
|
||||||
def declare_consumer(self, consumer_cls, topic, callback):
|
|
||||||
"""Create a Consumer using the class that was passed in and
|
|
||||||
add it to our list of consumers
|
|
||||||
"""
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': exc}
|
|
||||||
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
|
|
||||||
"%(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _declare_consumer():
|
|
||||||
consumer = consumer_cls(self.conf, self.session, topic, callback)
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _declare_consumer)
|
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
|
||||||
"""Return an iterator that will consume from all queues/consumers."""
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
|
||||||
LOG.debug('Timed out waiting for RPC response: %s' %
|
|
||||||
exc)
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
else:
|
|
||||||
LOG.exception(_LE('Failed to consume message from queue: %s') %
|
|
||||||
exc)
|
|
||||||
|
|
||||||
def _consume():
|
|
||||||
nxt_receiver = self.session.next_receiver(timeout=timeout)
|
|
||||||
try:
|
|
||||||
self._lookup_consumer(nxt_receiver).consume()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Error processing message. Skipping it."))
|
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
|
||||||
if limit and iteration >= limit:
|
|
||||||
raise StopIteration
|
|
||||||
yield self.ensure(_error_callback, _consume)
|
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
|
||||||
"""Cancel a consumer thread."""
|
|
||||||
if self.consumer_thread is not None:
|
|
||||||
self.consumer_thread.kill()
|
|
||||||
try:
|
|
||||||
self.consumer_thread.wait()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
pass
|
|
||||||
self.consumer_thread = None
|
|
||||||
|
|
||||||
def wait_on_proxy_callbacks(self):
|
|
||||||
"""Wait for all proxy callback threads to exit."""
|
|
||||||
for proxy_cb in self.proxy_callbacks:
|
|
||||||
proxy_cb.wait()
|
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg):
|
|
||||||
"""Send to a publisher based on the publisher class."""
|
|
||||||
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': exc}
|
|
||||||
LOG.exception(_LE("Failed to publish message to topic "
|
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _publisher_send():
|
|
||||||
publisher = cls(self.conf, self.session, topic)
|
|
||||||
publisher.send(msg)
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _publisher_send)
|
|
||||||
|
|
||||||
def declare_direct_consumer(self, topic, callback):
|
|
||||||
"""Create a 'direct' queue.
|
|
||||||
In nova's use, this is generally a msg_id queue used for
|
|
||||||
responses for call/multicall
|
|
||||||
"""
|
|
||||||
self.declare_consumer(DirectConsumer, topic, callback)
|
|
||||||
|
|
||||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
|
||||||
exchange_name=None):
|
|
||||||
"""Create a 'topic' consumer."""
|
|
||||||
self.declare_consumer(functools.partial(TopicConsumer,
|
|
||||||
name=queue_name,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
),
|
|
||||||
topic, callback)
|
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
|
||||||
"""Create a 'fanout' consumer."""
|
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
|
||||||
"""Send a 'direct' message."""
|
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
|
||||||
"""Send a 'topic' message."""
|
|
||||||
#
|
|
||||||
# We want to create a message with attributes, e.g. a TTL. We
|
|
||||||
# don't really need to keep 'msg' in its JSON format any longer
|
|
||||||
# so let's create an actual qpid message here and get some
|
|
||||||
# value-add on the go.
|
|
||||||
#
|
|
||||||
# WARNING: Request timeout happens to be in the same units as
|
|
||||||
# qpid's TTL (seconds). If this changes in the future, then this
|
|
||||||
# will need to be altered accordingly.
|
|
||||||
#
|
|
||||||
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
|
|
||||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
|
||||||
"""Send a 'fanout' message."""
|
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
|
||||||
"""Send a notify message on a topic."""
|
|
||||||
self.publisher_send(NotifyPublisher, topic, msg)
|
|
||||||
|
|
||||||
def consume(self, limit=None):
|
|
||||||
"""Consume from all queues/consumers."""
|
|
||||||
it = self.iterconsume(limit=limit)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
six.next(it)
|
|
||||||
except StopIteration:
|
|
||||||
return
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Consumer from all queues/consumers in a greenthread."""
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consumer_thread():
|
|
||||||
try:
|
|
||||||
self.consume()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
return
|
|
||||||
if self.consumer_thread is None:
|
|
||||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
|
||||||
return self.consumer_thread
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
if fanout:
|
|
||||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
|
||||||
else:
|
|
||||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
|
||||||
name=pool_name)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
"""
|
|
||||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
|
||||||
conf=self.conf,
|
|
||||||
callback=callback,
|
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
|
||||||
Connection),
|
|
||||||
wait_for_consumers=not ack_on_error
|
|
||||||
)
|
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
|
||||||
|
|
||||||
consumer = TopicConsumer(conf=self.conf,
|
|
||||||
session=self.session,
|
|
||||||
topic=topic,
|
|
||||||
callback=callback_wrapper,
|
|
||||||
name=pool_name,
|
|
||||||
exchange_name=exchange_name)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return rpc_amqp.create_connection(
|
|
||||||
conf, new,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
return rpc_amqp.multicall(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
return rpc_amqp.call(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
return rpc_amqp.cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
return rpc_amqp.fanout_cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
return rpc_amqp.cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
return rpc_amqp.fanout_cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
return rpc_amqp.notify(conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection),
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
return rpc_amqp.cleanup(Connection.pool)
|
|
@ -1,818 +0,0 @@
|
|||||||
# Copyright 2011 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import pprint
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import types
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
from six import moves
|
|
||||||
|
|
||||||
from heat.openstack.common import excutils
|
|
||||||
from heat.openstack.common.gettextutils import _, _LE, _LI
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import jsonutils
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
zmq = importutils.try_import('eventlet.green.zmq')
|
|
||||||
|
|
||||||
# for convenience, are not modified.
|
|
||||||
pformat = pprint.pformat
|
|
||||||
Timeout = eventlet.timeout.Timeout
|
|
||||||
LOG = rpc_common.LOG
|
|
||||||
RemoteError = rpc_common.RemoteError
|
|
||||||
RPCException = rpc_common.RPCException
|
|
||||||
|
|
||||||
zmq_opts = [
|
|
||||||
cfg.StrOpt('rpc_zmq_bind_address', default='*',
|
|
||||||
help='ZeroMQ bind address. Should be a wildcard (*), '
|
|
||||||
'an ethernet interface, or IP. '
|
|
||||||
'The "host" option should point or resolve to this '
|
|
||||||
'address.'),
|
|
||||||
|
|
||||||
# The module.Class to use for matchmaking.
|
|
||||||
cfg.StrOpt(
|
|
||||||
'rpc_zmq_matchmaker',
|
|
||||||
default=('heat.openstack.common.rpc.'
|
|
||||||
'matchmaker.MatchMakerLocalhost'),
|
|
||||||
help='MatchMaker driver',
|
|
||||||
),
|
|
||||||
|
|
||||||
# The following port is unassigned by IANA as of 2012-05-21
|
|
||||||
cfg.IntOpt('rpc_zmq_port', default=9501,
|
|
||||||
help='ZeroMQ receiver listening port'),
|
|
||||||
|
|
||||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
|
||||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
|
||||||
|
|
||||||
cfg.IntOpt('rpc_zmq_topic_backlog',
|
|
||||||
help='Maximum number of ingress messages to locally buffer '
|
|
||||||
'per topic. Default is unlimited.'),
|
|
||||||
|
|
||||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
|
||||||
help='Directory for holding IPC sockets'),
|
|
||||||
|
|
||||||
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
|
|
||||||
help='Name of this node. Must be a valid hostname, FQDN, or '
|
|
||||||
'IP address. Must match "host" option, if running Nova.')
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(zmq_opts)
|
|
||||||
|
|
||||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
|
||||||
matchmaker = None # memorized matchmaker object
|
|
||||||
|
|
||||||
|
|
||||||
def _serialize(data):
|
|
||||||
"""Serialization wrapper.
|
|
||||||
|
|
||||||
We prefer using JSON, but it cannot encode all types.
|
|
||||||
Error if a developer passes us bad data.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return jsonutils.dumps(data, ensure_ascii=True)
|
|
||||||
except TypeError:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_LE("JSON serialization failed."))
|
|
||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
|
||||||
"""Deserialization wrapper."""
|
|
||||||
LOG.debug("Deserializing: %s", data)
|
|
||||||
return jsonutils.loads(data)
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqSocket(object):
|
|
||||||
"""A tiny wrapper around ZeroMQ.
|
|
||||||
|
|
||||||
Simplifies the send/recv protocol and connection management.
|
|
||||||
Can be used as a Context (supports the 'with' statement).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
|
||||||
self.sock = _get_ctxt().socket(zmq_type)
|
|
||||||
self.addr = addr
|
|
||||||
self.type = zmq_type
|
|
||||||
self.subscriptions = []
|
|
||||||
|
|
||||||
# Support failures on sending/receiving on wrong socket type.
|
|
||||||
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
|
|
||||||
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
|
|
||||||
self.can_sub = zmq_type in (zmq.SUB, )
|
|
||||||
|
|
||||||
# Support list, str, & None for subscribe arg (cast to list)
|
|
||||||
do_sub = {
|
|
||||||
list: subscribe,
|
|
||||||
str: [subscribe],
|
|
||||||
type(None): []
|
|
||||||
}[type(subscribe)]
|
|
||||||
|
|
||||||
for f in do_sub:
|
|
||||||
self.subscribe(f)
|
|
||||||
|
|
||||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
|
||||||
'subscribe': subscribe, 'bind': bind}
|
|
||||||
|
|
||||||
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
|
|
||||||
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
|
|
||||||
LOG.debug("-> bind: %(bind)s", str_data)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if bind:
|
|
||||||
self.sock.bind(addr)
|
|
||||||
else:
|
|
||||||
self.sock.connect(addr)
|
|
||||||
except Exception:
|
|
||||||
raise RPCException(_("Could not open socket."))
|
|
||||||
|
|
||||||
def socket_s(self):
|
|
||||||
"""Get socket type as string."""
|
|
||||||
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
|
|
||||||
'DEALER')
|
|
||||||
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
|
|
||||||
|
|
||||||
def subscribe(self, msg_filter):
|
|
||||||
"""Subscribe."""
|
|
||||||
if not self.can_sub:
|
|
||||||
raise RPCException("Cannot subscribe on this socket.")
|
|
||||||
LOG.debug("Subscribing to %s", msg_filter)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
|
||||||
except Exception:
|
|
||||||
return
|
|
||||||
|
|
||||||
self.subscriptions.append(msg_filter)
|
|
||||||
|
|
||||||
def unsubscribe(self, msg_filter):
|
|
||||||
"""Unsubscribe."""
|
|
||||||
if msg_filter not in self.subscriptions:
|
|
||||||
return
|
|
||||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
|
|
||||||
self.subscriptions.remove(msg_filter)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self.sock is None or self.sock.closed:
|
|
||||||
return
|
|
||||||
|
|
||||||
# We must unsubscribe, or we'll leak descriptors.
|
|
||||||
if self.subscriptions:
|
|
||||||
for f in self.subscriptions:
|
|
||||||
try:
|
|
||||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self.subscriptions = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Default is to linger
|
|
||||||
self.sock.close()
|
|
||||||
except Exception:
|
|
||||||
# While this is a bad thing to happen,
|
|
||||||
# it would be much worse if some of the code calling this
|
|
||||||
# were to fail. For now, lets log, and later evaluate
|
|
||||||
# if we can safely raise here.
|
|
||||||
LOG.error(_LE("ZeroMQ socket could not be closed."))
|
|
||||||
self.sock = None
|
|
||||||
|
|
||||||
def recv(self, **kwargs):
|
|
||||||
if not self.can_recv:
|
|
||||||
raise RPCException(_("You cannot recv on this socket."))
|
|
||||||
return self.sock.recv_multipart(**kwargs)
|
|
||||||
|
|
||||||
def send(self, data, **kwargs):
|
|
||||||
if not self.can_send:
|
|
||||||
raise RPCException(_("You cannot send on this socket."))
|
|
||||||
self.sock.send_multipart(data, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqClient(object):
|
|
||||||
"""Client for ZMQ sockets."""
|
|
||||||
|
|
||||||
def __init__(self, addr):
|
|
||||||
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
|
||||||
|
|
||||||
def cast(self, msg_id, topic, data, envelope):
|
|
||||||
msg_id = msg_id or 0
|
|
||||||
|
|
||||||
if not envelope:
|
|
||||||
self.outq.send(map(bytes,
|
|
||||||
(msg_id, topic, 'cast', _serialize(data))))
|
|
||||||
return
|
|
||||||
|
|
||||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
|
||||||
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
|
|
||||||
self.outq.send(map(bytes,
|
|
||||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.outq.close()
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
"""Context that supports replying to a rpc.call."""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.replies = []
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
values['replies'] = self.replies
|
|
||||||
return self.__class__(**values)
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False):
|
|
||||||
if ending:
|
|
||||||
return
|
|
||||||
self.replies.append(reply)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def marshal(self, ctx):
|
|
||||||
ctx_data = ctx.to_dict()
|
|
||||||
return _serialize(ctx_data)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def unmarshal(self, data):
|
|
||||||
return RpcContext.from_dict(_deserialize(data))
|
|
||||||
|
|
||||||
|
|
||||||
class InternalContext(object):
|
|
||||||
"""Used by ConsumerBase as a private context for - methods."""
|
|
||||||
|
|
||||||
def __init__(self, proxy):
|
|
||||||
self.proxy = proxy
|
|
||||||
self.msg_waiter = None
|
|
||||||
|
|
||||||
def _get_response(self, ctx, proxy, topic, data):
|
|
||||||
"""Process a curried message and cast the result to topic."""
|
|
||||||
LOG.debug("Running func with context: %s", ctx.to_dict())
|
|
||||||
data.setdefault('version', None)
|
|
||||||
data.setdefault('args', {})
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = proxy.dispatch(
|
|
||||||
ctx, data['version'], data['method'],
|
|
||||||
data.get('namespace'), **data['args'])
|
|
||||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
# ignore these since they are just from shutdowns
|
|
||||||
pass
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
LOG.debug("Expected exception during message handling (%s)" %
|
|
||||||
e._exc_info[1])
|
|
||||||
return {'exc':
|
|
||||||
rpc_common.serialize_remote_exception(e._exc_info,
|
|
||||||
log_failure=False)}
|
|
||||||
except Exception:
|
|
||||||
LOG.error(_LE("Exception during message handling"))
|
|
||||||
return {'exc':
|
|
||||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
|
||||||
|
|
||||||
def reply(self, ctx, proxy,
|
|
||||||
msg_id=None, context=None, topic=None, msg=None):
|
|
||||||
"""Reply to a casted call."""
|
|
||||||
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
|
||||||
# this may be able to be removed earlier than
|
|
||||||
# 'I' if ConsumerBase.process were refactored.
|
|
||||||
if type(msg) is list:
|
|
||||||
payload = msg[-1]
|
|
||||||
else:
|
|
||||||
payload = msg
|
|
||||||
|
|
||||||
response = ConsumerBase.normalize_reply(
|
|
||||||
self._get_response(ctx, proxy, topic, payload),
|
|
||||||
ctx.replies)
|
|
||||||
|
|
||||||
LOG.debug("Sending reply")
|
|
||||||
_multi_send(_cast, ctx, topic, {
|
|
||||||
'method': '-process_reply',
|
|
||||||
'args': {
|
|
||||||
'msg_id': msg_id, # Include for Folsom compat.
|
|
||||||
'response': response
|
|
||||||
}
|
|
||||||
}, _msg_id=msg_id)
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Base Consumer."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.private_ctx = InternalContext(None)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def normalize_reply(self, result, replies):
|
|
||||||
# TODO(ewindisch): re-evaluate and document this method.
|
|
||||||
if isinstance(result, types.GeneratorType):
|
|
||||||
return list(result)
|
|
||||||
elif replies:
|
|
||||||
return replies
|
|
||||||
else:
|
|
||||||
return [result]
|
|
||||||
|
|
||||||
def process(self, proxy, ctx, data):
|
|
||||||
data.setdefault('version', None)
|
|
||||||
data.setdefault('args', {})
|
|
||||||
|
|
||||||
# Method starting with - are
|
|
||||||
# processed internally. (non-valid method name)
|
|
||||||
method = data.get('method')
|
|
||||||
if not method:
|
|
||||||
LOG.error(_LE("RPC message did not include method."))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Internal method
|
|
||||||
# uses internal context for safety.
|
|
||||||
if method == '-reply':
|
|
||||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
|
||||||
return
|
|
||||||
|
|
||||||
proxy.dispatch(ctx, data['version'],
|
|
||||||
data['method'], data.get('namespace'), **data['args'])
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqBaseReactor(ConsumerBase):
|
|
||||||
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
|
||||||
|
|
||||||
Used for RoundRobin requests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqBaseReactor, self).__init__()
|
|
||||||
|
|
||||||
self.proxies = {}
|
|
||||||
self.threads = []
|
|
||||||
self.sockets = []
|
|
||||||
self.subscribe = {}
|
|
||||||
|
|
||||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
|
||||||
|
|
||||||
def register(self, proxy, in_addr, zmq_type_in,
|
|
||||||
in_bind=True, subscribe=None):
|
|
||||||
|
|
||||||
LOG.info(_LI("Registering reactor"))
|
|
||||||
|
|
||||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
|
||||||
raise RPCException("Bad input socktype")
|
|
||||||
|
|
||||||
# Items push in.
|
|
||||||
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
|
|
||||||
subscribe=subscribe)
|
|
||||||
|
|
||||||
self.proxies[inq] = proxy
|
|
||||||
self.sockets.append(inq)
|
|
||||||
|
|
||||||
LOG.info(_LI("In reactor registered"))
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consume(sock):
|
|
||||||
LOG.info(_LI("Consuming socket"))
|
|
||||||
while True:
|
|
||||||
self.consume(sock)
|
|
||||||
|
|
||||||
for k in self.proxies.keys():
|
|
||||||
self.threads.append(
|
|
||||||
self.pool.spawn(_consume, k)
|
|
||||||
)
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
for t in self.threads:
|
|
||||||
t.wait()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
for s in self.sockets:
|
|
||||||
s.close()
|
|
||||||
|
|
||||||
for t in self.threads:
|
|
||||||
t.kill()
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqProxy(ZmqBaseReactor):
|
|
||||||
"""A consumer class implementing a topic-based proxy.
|
|
||||||
|
|
||||||
Forwards to IPC sockets.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqProxy, self).__init__(conf)
|
|
||||||
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
|
|
||||||
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
|
|
||||||
|
|
||||||
self.topic_proxy = {}
|
|
||||||
|
|
||||||
def consume(self, sock):
|
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
|
||||||
|
|
||||||
data = sock.recv(copy=False)
|
|
||||||
topic = data[1].bytes
|
|
||||||
|
|
||||||
if topic.startswith('fanout~'):
|
|
||||||
sock_type = zmq.PUB
|
|
||||||
topic = topic.split('.', 1)[0]
|
|
||||||
elif topic.startswith('zmq_replies'):
|
|
||||||
sock_type = zmq.PUB
|
|
||||||
else:
|
|
||||||
sock_type = zmq.PUSH
|
|
||||||
|
|
||||||
if topic not in self.topic_proxy:
|
|
||||||
def publisher(waiter):
|
|
||||||
LOG.info(_LI("Creating proxy for topic: %s"), topic)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# The topic is received over the network,
|
|
||||||
# don't trust this input.
|
|
||||||
if self.badchars.search(topic) is not None:
|
|
||||||
emsg = _("Topic contained dangerous characters.")
|
|
||||||
LOG.warn(emsg)
|
|
||||||
raise RPCException(emsg)
|
|
||||||
|
|
||||||
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
|
|
||||||
(ipc_dir, topic),
|
|
||||||
sock_type, bind=True)
|
|
||||||
except RPCException:
|
|
||||||
waiter.send_exception(*sys.exc_info())
|
|
||||||
return
|
|
||||||
|
|
||||||
self.topic_proxy[topic] = eventlet.queue.LightQueue(
|
|
||||||
CONF.rpc_zmq_topic_backlog)
|
|
||||||
self.sockets.append(out_sock)
|
|
||||||
|
|
||||||
# It takes some time for a pub socket to open,
|
|
||||||
# before we can have any faith in doing a send() to it.
|
|
||||||
if sock_type == zmq.PUB:
|
|
||||||
eventlet.sleep(.5)
|
|
||||||
|
|
||||||
waiter.send(True)
|
|
||||||
|
|
||||||
while(True):
|
|
||||||
data = self.topic_proxy[topic].get()
|
|
||||||
out_sock.send(data, copy=False)
|
|
||||||
|
|
||||||
wait_sock_creation = eventlet.event.Event()
|
|
||||||
eventlet.spawn(publisher, wait_sock_creation)
|
|
||||||
|
|
||||||
try:
|
|
||||||
wait_sock_creation.wait()
|
|
||||||
except RPCException:
|
|
||||||
LOG.error(_LE("Topic socket file creation failed."))
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
|
||||||
except eventlet.queue.Full:
|
|
||||||
LOG.error(_LE("Local per-topic backlog buffer full for topic "
|
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Runs the ZmqProxy service."""
|
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
|
||||||
consume_in = "tcp://%s:%s" % \
|
|
||||||
(CONF.rpc_zmq_bind_address,
|
|
||||||
CONF.rpc_zmq_port)
|
|
||||||
consumption_proxy = InternalContext(None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.makedirs(ipc_dir)
|
|
||||||
except os.error:
|
|
||||||
if not os.path.isdir(ipc_dir):
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_LE("Required IPC directory does not exist at"
|
|
||||||
" %s") % (ipc_dir, ))
|
|
||||||
try:
|
|
||||||
self.register(consumption_proxy,
|
|
||||||
consume_in,
|
|
||||||
zmq.PULL)
|
|
||||||
except zmq.ZMQError:
|
|
||||||
if os.access(ipc_dir, os.X_OK):
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_LE("Permission denied to IPC directory at"
|
|
||||||
" %s") % (ipc_dir, ))
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
|
|
||||||
"Socket may already be in use."))
|
|
||||||
|
|
||||||
super(ZmqProxy, self).consume_in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def unflatten_envelope(packenv):
|
|
||||||
"""Unflattens the RPC envelope.
|
|
||||||
|
|
||||||
Takes a list and returns a dictionary.
|
|
||||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
|
||||||
"""
|
|
||||||
i = iter(packenv)
|
|
||||||
h = {}
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
k = six.next(i)
|
|
||||||
h[k] = six.next(i)
|
|
||||||
except StopIteration:
|
|
||||||
return h
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqReactor(ZmqBaseReactor):
|
|
||||||
"""A consumer class implementing a consumer for messages.
|
|
||||||
|
|
||||||
Can also be used as a 1:1 proxy
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqReactor, self).__init__(conf)
|
|
||||||
|
|
||||||
def consume(self, sock):
|
|
||||||
# TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
|
||||||
data = sock.recv()
|
|
||||||
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
|
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
|
||||||
|
|
||||||
if data[2] == 'cast': # Legacy protocol
|
|
||||||
packenv = data[3]
|
|
||||||
|
|
||||||
ctx, msg = _deserialize(packenv)
|
|
||||||
request = rpc_common.deserialize_msg(msg)
|
|
||||||
ctx = RpcContext.unmarshal(ctx)
|
|
||||||
elif data[2] == 'impl_zmq_v2':
|
|
||||||
packenv = data[4:]
|
|
||||||
|
|
||||||
msg = unflatten_envelope(packenv)
|
|
||||||
request = rpc_common.deserialize_msg(msg)
|
|
||||||
|
|
||||||
# Unmarshal only after verifying the message.
|
|
||||||
ctx = RpcContext.unmarshal(data[3])
|
|
||||||
else:
|
|
||||||
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
|
|
||||||
return
|
|
||||||
|
|
||||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(rpc_common.Connection):
|
|
||||||
"""Manages connections and threads."""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
self.topics = []
|
|
||||||
self.reactor = ZmqReactor(conf)
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
# Register with matchmaker.
|
|
||||||
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
|
|
||||||
|
|
||||||
# Subscription scenarios
|
|
||||||
if fanout:
|
|
||||||
sock_type = zmq.SUB
|
|
||||||
subscribe = ('', fanout)[type(fanout) == str]
|
|
||||||
topic = 'fanout~' + topic.split('.', 1)[0]
|
|
||||||
else:
|
|
||||||
sock_type = zmq.PULL
|
|
||||||
subscribe = None
|
|
||||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
|
||||||
|
|
||||||
if topic in self.topics:
|
|
||||||
LOG.info(_LI("Skipping topic registration. Already registered."))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Receive messages from (local) proxy
|
|
||||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
|
||||||
(CONF.rpc_zmq_ipc_dir, topic)
|
|
||||||
|
|
||||||
LOG.debug("Consumer is a zmq.%s",
|
|
||||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
|
||||||
|
|
||||||
self.reactor.register(proxy, inaddr, sock_type,
|
|
||||||
subscribe=subscribe, in_bind=False)
|
|
||||||
self.topics.append(topic)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
_get_matchmaker().stop_heartbeat()
|
|
||||||
for topic in self.topics:
|
|
||||||
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
|
|
||||||
|
|
||||||
self.reactor.close()
|
|
||||||
self.topics = []
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
self.reactor.wait()
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
_get_matchmaker().start_heartbeat()
|
|
||||||
self.reactor.consume_in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
|
|
||||||
_msg_id=None):
|
|
||||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
|
||||||
payload = [RpcContext.marshal(context), msg]
|
|
||||||
|
|
||||||
with Timeout(timeout_cast, exception=rpc_common.Timeout):
|
|
||||||
try:
|
|
||||||
conn = ZmqClient(addr)
|
|
||||||
|
|
||||||
# assumes cast can't return an exception
|
|
||||||
conn.cast(_msg_id, topic, payload, envelope)
|
|
||||||
except zmq.ZMQError:
|
|
||||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
|
||||||
finally:
|
|
||||||
if 'conn' in vars():
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
def _call(addr, context, topic, msg, timeout=None,
|
|
||||||
envelope=False):
|
|
||||||
# timeout_response is how long we wait for a response
|
|
||||||
timeout = timeout or CONF.rpc_response_timeout
|
|
||||||
|
|
||||||
# The msg_id is used to track replies.
|
|
||||||
msg_id = uuid.uuid4().hex
|
|
||||||
|
|
||||||
# Replies always come into the reply service.
|
|
||||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
|
||||||
|
|
||||||
LOG.debug("Creating payload")
|
|
||||||
# Curry the original request into a reply method.
|
|
||||||
mcontext = RpcContext.marshal(context)
|
|
||||||
payload = {
|
|
||||||
'method': '-reply',
|
|
||||||
'args': {
|
|
||||||
'msg_id': msg_id,
|
|
||||||
'topic': reply_topic,
|
|
||||||
# TODO(ewindisch): safe to remove mcontext in I.
|
|
||||||
'msg': [mcontext, msg]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG.debug("Creating queue socket for reply waiter")
|
|
||||||
|
|
||||||
# Messages arriving async.
|
|
||||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
|
||||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
|
||||||
try:
|
|
||||||
msg_waiter = ZmqSocket(
|
|
||||||
"ipc://%s/zmq_topic_zmq_replies.%s" %
|
|
||||||
(CONF.rpc_zmq_ipc_dir,
|
|
||||||
CONF.rpc_zmq_host),
|
|
||||||
zmq.SUB, subscribe=msg_id, bind=False
|
|
||||||
)
|
|
||||||
|
|
||||||
LOG.debug("Sending cast")
|
|
||||||
_cast(addr, context, topic, payload, envelope)
|
|
||||||
|
|
||||||
LOG.debug("Cast sent; Waiting reply")
|
|
||||||
# Blocks until receives reply
|
|
||||||
msg = msg_waiter.recv()
|
|
||||||
LOG.debug("Received message: %s", msg)
|
|
||||||
LOG.debug("Unpacking response")
|
|
||||||
|
|
||||||
if msg[2] == 'cast': # Legacy version
|
|
||||||
raw_msg = _deserialize(msg[-1])[-1]
|
|
||||||
elif msg[2] == 'impl_zmq_v2':
|
|
||||||
rpc_envelope = unflatten_envelope(msg[4:])
|
|
||||||
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
|
|
||||||
else:
|
|
||||||
raise rpc_common.UnsupportedRpcEnvelopeVersion(
|
|
||||||
_("Unsupported or unknown ZMQ envelope returned."))
|
|
||||||
|
|
||||||
responses = raw_msg['args']['response']
|
|
||||||
# ZMQError trumps the Timeout error.
|
|
||||||
except zmq.ZMQError:
|
|
||||||
raise RPCException("ZMQ Socket Error")
|
|
||||||
except (IndexError, KeyError):
|
|
||||||
raise RPCException(_("RPC Message Invalid."))
|
|
||||||
finally:
|
|
||||||
if 'msg_waiter' in vars():
|
|
||||||
msg_waiter.close()
|
|
||||||
|
|
||||||
# It seems we don't need to do all of the following,
|
|
||||||
# but perhaps it would be useful for multicall?
|
|
||||||
# One effect of this is that we're checking all
|
|
||||||
# responses for Exceptions.
|
|
||||||
for resp in responses:
|
|
||||||
if isinstance(resp, types.DictType) and 'exc' in resp:
|
|
||||||
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
|
|
||||||
|
|
||||||
return responses[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def _multi_send(method, context, topic, msg, timeout=None,
|
|
||||||
envelope=False, _msg_id=None):
|
|
||||||
"""Wraps the sending of messages.
|
|
||||||
|
|
||||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
|
||||||
"""
|
|
||||||
conf = CONF
|
|
||||||
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
|
||||||
|
|
||||||
queues = _get_matchmaker().queues(topic)
|
|
||||||
LOG.debug("Sending message(s) to: %s", queues)
|
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
|
||||||
if not queues:
|
|
||||||
LOG.warn(_("No matchmaker results. Not casting."))
|
|
||||||
# While not strictly a timeout, callers know how to handle
|
|
||||||
# this exception and a timeout isn't too big a lie.
|
|
||||||
raise rpc_common.Timeout(_("No match from matchmaker."))
|
|
||||||
|
|
||||||
# This supports brokerless fanout (addresses > 1)
|
|
||||||
for queue in queues:
|
|
||||||
(_topic, ip_addr) = queue
|
|
||||||
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
|
|
||||||
|
|
||||||
if method.__name__ == '_cast':
|
|
||||||
eventlet.spawn_n(method, _addr, context,
|
|
||||||
_topic, msg, timeout, envelope,
|
|
||||||
_msg_id)
|
|
||||||
return
|
|
||||||
return method(_addr, context, _topic, msg, timeout,
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
return Connection(conf)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, *args, **kwargs):
|
|
||||||
"""Multiple calls."""
|
|
||||||
return _multi_send(_call, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, *args, **kwargs):
|
|
||||||
"""Send a message, expect a response."""
|
|
||||||
data = _multi_send(_call, *args, **kwargs)
|
|
||||||
return data[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, *args, **kwargs):
|
|
||||||
"""Send a message expecting no reply."""
|
|
||||||
_multi_send(_cast, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, **kwargs):
|
|
||||||
"""Send a message to all listening and expect no reply."""
|
|
||||||
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
|
|
||||||
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
|
|
||||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Send notification event.
|
|
||||||
|
|
||||||
Notifications are sent to topic-priority.
|
|
||||||
This differs from the AMQP drivers which send to topic.priority.
|
|
||||||
"""
|
|
||||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
|
||||||
# work with our assumptions.
|
|
||||||
topic = topic.replace('.', '-')
|
|
||||||
cast(conf, context, topic, msg, envelope=envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
"""Clean up resources in use by implementation."""
|
|
||||||
global ZMQ_CTX
|
|
||||||
if ZMQ_CTX:
|
|
||||||
ZMQ_CTX.term()
|
|
||||||
ZMQ_CTX = None
|
|
||||||
|
|
||||||
global matchmaker
|
|
||||||
matchmaker = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_ctxt():
|
|
||||||
if not zmq:
|
|
||||||
raise ImportError("Failed to import eventlet.green.zmq")
|
|
||||||
|
|
||||||
global ZMQ_CTX
|
|
||||||
if not ZMQ_CTX:
|
|
||||||
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
|
|
||||||
return ZMQ_CTX
|
|
||||||
|
|
||||||
|
|
||||||
def _get_matchmaker(*args, **kwargs):
|
|
||||||
global matchmaker
|
|
||||||
if not matchmaker:
|
|
||||||
mm = CONF.rpc_zmq_matchmaker
|
|
||||||
if mm.endswith('matchmaker.MatchMakerRing'):
|
|
||||||
mm.replace('matchmaker', 'matchmaker_ring')
|
|
||||||
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
|
|
||||||
' %(new)s instead') % dict(
|
|
||||||
orig=CONF.rpc_zmq_matchmaker, new=mm))
|
|
||||||
matchmaker = importutils.import_object(mm, *args, **kwargs)
|
|
||||||
return matchmaker
|
|
@ -1,323 +0,0 @@
|
|||||||
# Copyright 2011 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _, _LI
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_opts = [
|
|
||||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
|
||||||
default=300,
|
|
||||||
help='Heartbeat frequency'),
|
|
||||||
cfg.IntOpt('matchmaker_heartbeat_ttl',
|
|
||||||
default=600,
|
|
||||||
help='Heartbeat time-to-live.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(matchmaker_opts)
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
contextmanager = contextlib.contextmanager
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerException(Exception):
|
|
||||||
"""Signified a match could not be found."""
|
|
||||||
message = _("Match not found by MatchMaker.")
|
|
||||||
|
|
||||||
|
|
||||||
class Exchange(object):
|
|
||||||
"""Implements lookups.
|
|
||||||
|
|
||||||
Subclass this to support hashtables, dns, etc.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class Binding(object):
|
|
||||||
"""A binding on which to perform a lookup."""
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test(self, key):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerBase(object):
|
|
||||||
"""Match Maker Base Class.
|
|
||||||
|
|
||||||
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
|
||||||
MatchMaker.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
|
||||||
self.bindings = []
|
|
||||||
|
|
||||||
self.no_heartbeat_msg = _('Matchmaker does not implement '
|
|
||||||
'registration or heartbeat.')
|
|
||||||
|
|
||||||
def register(self, key, host):
|
|
||||||
"""Register a host on a backend.
|
|
||||||
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
"""Acknowledge that a key.host is alive.
|
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
|
||||||
publicly to acknowledge a system is alive (i.e. rpc message
|
|
||||||
successfully sent to host)
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
|
||||||
"""Checks if a host is alive."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def expire(self, topic, host):
|
|
||||||
"""Explicitly expire a host's registration."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def send_heartbeats(self):
|
|
||||||
"""Send all heartbeats.
|
|
||||||
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
|
||||||
which loops this method.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def unregister(self, key, host):
|
|
||||||
"""Unregister a topic."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def start_heartbeat(self):
|
|
||||||
"""Spawn heartbeat greenthread."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
|
||||||
"""Destroys the heartbeat greenthread."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def add_binding(self, binding, rule, last=True):
|
|
||||||
self.bindings.append((binding, rule, False, last))
|
|
||||||
|
|
||||||
# NOTE(ewindisch): kept the following method in case we implement the
|
|
||||||
# underlying support.
|
|
||||||
# def add_negate_binding(self, binding, rule, last=True):
|
|
||||||
# self.bindings.append((binding, rule, True, last))
|
|
||||||
|
|
||||||
def queues(self, key):
|
|
||||||
workers = []
|
|
||||||
|
|
||||||
# bit is for negate bindings - if we choose to implement it.
|
|
||||||
# last stops processing rules if this matches.
|
|
||||||
for (binding, exchange, bit, last) in self.bindings:
|
|
||||||
if binding.test(key):
|
|
||||||
workers.extend(exchange.run(key))
|
|
||||||
|
|
||||||
# Support last.
|
|
||||||
if last:
|
|
||||||
return workers
|
|
||||||
return workers
|
|
||||||
|
|
||||||
|
|
||||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
|
||||||
"""Base for a heart-beat capable MatchMaker.
|
|
||||||
|
|
||||||
Provides common methods for registering, unregistering, and maintaining
|
|
||||||
heartbeats.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.hosts = set()
|
|
||||||
self._heart = None
|
|
||||||
self.host_topic = {}
|
|
||||||
|
|
||||||
super(HeartbeatMatchMakerBase, self).__init__()
|
|
||||||
|
|
||||||
def send_heartbeats(self):
|
|
||||||
"""Send all heartbeats.
|
|
||||||
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
|
||||||
which loops this method.
|
|
||||||
"""
|
|
||||||
for key, host in self.host_topic:
|
|
||||||
self.ack_alive(key, host)
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
"""Acknowledge that a host.topic is alive.
|
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
|
||||||
publicly to acknowledge a system is alive (i.e. rpc message
|
|
||||||
successfully sent to host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement ack_alive")
|
|
||||||
|
|
||||||
def backend_register(self, key, host):
|
|
||||||
"""Implements registration logic.
|
|
||||||
|
|
||||||
Called by register(self,key,host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement backend_register")
|
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
|
||||||
"""Implements de-registration logic.
|
|
||||||
|
|
||||||
Called by unregister(self,key,host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement backend_unregister")
|
|
||||||
|
|
||||||
def register(self, key, host):
|
|
||||||
"""Register a host on a backend.
|
|
||||||
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
|
||||||
"""
|
|
||||||
self.hosts.add(host)
|
|
||||||
self.host_topic[(key, host)] = host
|
|
||||||
key_host = '.'.join((key, host))
|
|
||||||
|
|
||||||
self.backend_register(key, key_host)
|
|
||||||
|
|
||||||
self.ack_alive(key, host)
|
|
||||||
|
|
||||||
def unregister(self, key, host):
|
|
||||||
"""Unregister a topic."""
|
|
||||||
if (key, host) in self.host_topic:
|
|
||||||
del self.host_topic[(key, host)]
|
|
||||||
|
|
||||||
self.hosts.discard(host)
|
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
|
||||||
|
|
||||||
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
|
|
||||||
{'key': key, 'host': host})
|
|
||||||
|
|
||||||
def start_heartbeat(self):
|
|
||||||
"""Implementation of MatchMakerBase.start_heartbeat.
|
|
||||||
|
|
||||||
Launches greenthread looping send_heartbeats(),
|
|
||||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
|
||||||
between iterations.
|
|
||||||
"""
|
|
||||||
if not self.hosts:
|
|
||||||
raise MatchMakerException(
|
|
||||||
_("Register before starting heartbeat."))
|
|
||||||
|
|
||||||
def do_heartbeat():
|
|
||||||
while True:
|
|
||||||
self.send_heartbeats()
|
|
||||||
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
|
|
||||||
|
|
||||||
self._heart = eventlet.spawn(do_heartbeat)
|
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
|
||||||
"""Destroys the heartbeat greenthread."""
|
|
||||||
if self._heart:
|
|
||||||
self._heart.kill()
|
|
||||||
|
|
||||||
|
|
||||||
class DirectBinding(Binding):
|
|
||||||
"""Specifies a host in the key via a '.' character.
|
|
||||||
|
|
||||||
Although dots are used in the key, the behavior here is
|
|
||||||
that it maps directly to a host, thus direct.
|
|
||||||
"""
|
|
||||||
def test(self, key):
|
|
||||||
return '.' in key
|
|
||||||
|
|
||||||
|
|
||||||
class TopicBinding(Binding):
|
|
||||||
"""Where a 'bare' key without dots.
|
|
||||||
|
|
||||||
AMQP generally considers topic exchanges to be those *with* dots,
|
|
||||||
but we deviate here in terminology as the behavior here matches
|
|
||||||
that of a topic exchange (whereas where there are dots, behavior
|
|
||||||
matches that of a direct exchange.
|
|
||||||
"""
|
|
||||||
def test(self, key):
|
|
||||||
return '.' not in key
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutBinding(Binding):
|
|
||||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
|
||||||
def test(self, key):
|
|
||||||
return key.startswith('fanout~')
|
|
||||||
|
|
||||||
|
|
||||||
class StubExchange(Exchange):
|
|
||||||
"""Exchange that does nothing."""
|
|
||||||
def run(self, key):
|
|
||||||
return [(key, None)]
|
|
||||||
|
|
||||||
|
|
||||||
class LocalhostExchange(Exchange):
|
|
||||||
"""Exchange where all direct topics are local."""
|
|
||||||
def __init__(self, host='localhost'):
|
|
||||||
self.host = host
|
|
||||||
super(Exchange, self).__init__()
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
return [('.'.join((key.split('.')[0], self.host)), self.host)]
|
|
||||||
|
|
||||||
|
|
||||||
class DirectExchange(Exchange):
|
|
||||||
"""Exchange where all topic keys are split, sending to second half.
|
|
||||||
|
|
||||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(Exchange, self).__init__()
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
e = key.split('.', 1)[1]
|
|
||||||
return [(key, e)]
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerLocalhost(MatchMakerBase):
|
|
||||||
"""Match Maker where all bare topics resolve to localhost.
|
|
||||||
|
|
||||||
Useful for testing.
|
|
||||||
"""
|
|
||||||
def __init__(self, host='localhost'):
|
|
||||||
super(MatchMakerLocalhost, self).__init__()
|
|
||||||
self.add_binding(FanoutBinding(), LocalhostExchange(host))
|
|
||||||
self.add_binding(DirectBinding(), DirectExchange())
|
|
||||||
self.add_binding(TopicBinding(), LocalhostExchange(host))
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerStub(MatchMakerBase):
|
|
||||||
"""Match Maker where topics are untouched.
|
|
||||||
|
|
||||||
Useful for testing, or for AMQP/brokered queues.
|
|
||||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(MatchMakerStub, self).__init__()
|
|
||||||
|
|
||||||
self.add_binding(FanoutBinding(), StubExchange())
|
|
||||||
self.add_binding(DirectBinding(), StubExchange())
|
|
||||||
self.add_binding(TopicBinding(), StubExchange())
|
|
@ -1,143 +0,0 @@
|
|||||||
# Copyright 2013 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import importutils
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common.rpc import matchmaker as mm_common
|
|
||||||
|
|
||||||
redis = importutils.try_import('redis')
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_redis_opts = [
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
default='127.0.0.1',
|
|
||||||
help='Host to locate redis'),
|
|
||||||
cfg.IntOpt('port',
|
|
||||||
default=6379,
|
|
||||||
help='Use this port to connect to redis host.'),
|
|
||||||
cfg.StrOpt('password',
|
|
||||||
help='Password for Redis server. (optional)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
opt_group = cfg.OptGroup(name='matchmaker_redis',
|
|
||||||
title='Options for Redis-based MatchMaker')
|
|
||||||
CONF.register_group(opt_group)
|
|
||||||
CONF.register_opts(matchmaker_redis_opts, opt_group)
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisExchange(mm_common.Exchange):
|
|
||||||
def __init__(self, matchmaker):
|
|
||||||
self.matchmaker = matchmaker
|
|
||||||
self.redis = matchmaker.redis
|
|
||||||
super(RedisExchange, self).__init__()
|
|
||||||
|
|
||||||
|
|
||||||
class RedisTopicExchange(RedisExchange):
|
|
||||||
"""Exchange where all topic keys are split, sending to second half.
|
|
||||||
|
|
||||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
|
||||||
"""
|
|
||||||
def run(self, topic):
|
|
||||||
while True:
|
|
||||||
member_name = self.redis.srandmember(topic)
|
|
||||||
|
|
||||||
if not member_name:
|
|
||||||
# If this happens, there are no
|
|
||||||
# longer any members.
|
|
||||||
break
|
|
||||||
|
|
||||||
if not self.matchmaker.is_alive(topic, member_name):
|
|
||||||
continue
|
|
||||||
|
|
||||||
host = member_name.split('.', 1)[1]
|
|
||||||
return [(member_name, host)]
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class RedisFanoutExchange(RedisExchange):
|
|
||||||
"""Return a list of all hosts."""
|
|
||||||
def run(self, topic):
|
|
||||||
topic = topic.split('~', 1)[1]
|
|
||||||
hosts = self.redis.smembers(topic)
|
|
||||||
good_hosts = filter(
|
|
||||||
lambda host: self.matchmaker.is_alive(topic, host), hosts)
|
|
||||||
|
|
||||||
return [(x, x.split('.', 1)[1]) for x in good_hosts]
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
|
||||||
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
|
||||||
def __init__(self):
|
|
||||||
super(MatchMakerRedis, self).__init__()
|
|
||||||
|
|
||||||
if not redis:
|
|
||||||
raise ImportError("Failed to import module redis.")
|
|
||||||
|
|
||||||
self.redis = redis.Redis(
|
|
||||||
host=CONF.matchmaker_redis.host,
|
|
||||||
port=CONF.matchmaker_redis.port,
|
|
||||||
password=CONF.matchmaker_redis.password)
|
|
||||||
|
|
||||||
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
|
|
||||||
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
|
|
||||||
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
topic = "%s.%s" % (key, host)
|
|
||||||
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
|
|
||||||
# If we could not update the expiration, the key
|
|
||||||
# might have been pruned. Re-register, creating a new
|
|
||||||
# key in Redis.
|
|
||||||
self.register(self.topic_host[host], host)
|
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
|
||||||
if self.redis.ttl(host) == -1:
|
|
||||||
self.expire(topic, host)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def expire(self, topic, host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.delete(host)
|
|
||||||
pipe.srem(topic, host)
|
|
||||||
pipe.execute()
|
|
||||||
|
|
||||||
def backend_register(self, key, key_host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.sadd(key, key_host)
|
|
||||||
|
|
||||||
# No value is needed, we just
|
|
||||||
# care if it exists. Sets aren't viable
|
|
||||||
# because only keys can expire.
|
|
||||||
pipe.set(key_host, '')
|
|
||||||
|
|
||||||
pipe.execute()
|
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.srem(key, key_host)
|
|
||||||
pipe.delete(key_host)
|
|
||||||
pipe.execute()
|
|
@ -1,106 +0,0 @@
|
|||||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _LW
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common.rpc import matchmaker as mm
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_opts = [
|
|
||||||
# Matchmaker ring file
|
|
||||||
cfg.StrOpt('ringfile',
|
|
||||||
deprecated_name='matchmaker_ringfile',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default='/etc/oslo/matchmaker_ring.json',
|
|
||||||
help='Matchmaker ring file (JSON)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RingExchange(mm.Exchange):
|
|
||||||
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
|
||||||
|
|
||||||
__init__ takes optional ring dictionary argument, otherwise
|
|
||||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
|
||||||
"""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RingExchange, self).__init__()
|
|
||||||
|
|
||||||
if ring:
|
|
||||||
self.ring = ring
|
|
||||||
else:
|
|
||||||
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
|
|
||||||
self.ring = json.load(fh)
|
|
||||||
|
|
||||||
self.ring0 = {}
|
|
||||||
for k in self.ring.keys():
|
|
||||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
|
||||||
|
|
||||||
def _ring_has(self, key):
|
|
||||||
return key in self.ring0
|
|
||||||
|
|
||||||
|
|
||||||
class RoundRobinRingExchange(RingExchange):
|
|
||||||
"""A Topic Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RoundRobinRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
if not self._ring_has(key):
|
|
||||||
LOG.warn(
|
|
||||||
_LW("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (key, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
host = next(self.ring0[key])
|
|
||||||
return [(key + '.' + host, host)]
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutRingExchange(RingExchange):
|
|
||||||
"""Fanout Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(FanoutRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
# Assume starts with "fanout~", strip it for lookup.
|
|
||||||
nkey = key.split('fanout~')[1:][0]
|
|
||||||
if not self._ring_has(nkey):
|
|
||||||
LOG.warn(
|
|
||||||
_LW("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (nkey, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerRing(mm.MatchMakerBase):
|
|
||||||
"""Match Maker where hosts are loaded from a static hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(MatchMakerRing, self).__init__()
|
|
||||||
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
|
||||||
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
|
|
||||||
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
|
|
@ -1,225 +0,0 @@
|
|||||||
# Copyright 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A helper class for proxy objects to remote APIs.
|
|
||||||
|
|
||||||
For more information about rpc API version numbers, see:
|
|
||||||
rpc/dispatcher.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from heat.openstack.common import rpc
|
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common.rpc import serializer as rpc_serializer
|
|
||||||
|
|
||||||
|
|
||||||
class RpcProxy(object):
|
|
||||||
"""A helper class for rpc clients.
|
|
||||||
|
|
||||||
This class is a wrapper around the RPC client API. It allows you to
|
|
||||||
specify the topic and API version in a single place. This is intended to
|
|
||||||
be used as a base class for a class that implements the client side of an
|
|
||||||
rpc API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# The default namespace, which can be overridden in a subclass.
|
|
||||||
RPC_API_NAMESPACE = None
|
|
||||||
|
|
||||||
def __init__(self, topic, default_version, version_cap=None,
|
|
||||||
serializer=None):
|
|
||||||
"""Initialize an RpcProxy.
|
|
||||||
|
|
||||||
:param topic: The topic to use for all messages.
|
|
||||||
:param default_version: The default API version to request in all
|
|
||||||
outgoing messages. This can be overridden on a per-message
|
|
||||||
basis.
|
|
||||||
:param version_cap: Optionally cap the maximum version used for sent
|
|
||||||
messages.
|
|
||||||
:param serializer: Optionally (de-)serialize entities with a
|
|
||||||
provided helper.
|
|
||||||
"""
|
|
||||||
self.topic = topic
|
|
||||||
self.default_version = default_version
|
|
||||||
self.version_cap = version_cap
|
|
||||||
if serializer is None:
|
|
||||||
serializer = rpc_serializer.NoOpSerializer()
|
|
||||||
self.serializer = serializer
|
|
||||||
super(RpcProxy, self).__init__()
|
|
||||||
|
|
||||||
def _set_version(self, msg, vers):
|
|
||||||
"""Helper method to set the version in a message.
|
|
||||||
|
|
||||||
:param msg: The message having a version added to it.
|
|
||||||
:param vers: The version number to add to the message.
|
|
||||||
"""
|
|
||||||
v = vers if vers else self.default_version
|
|
||||||
if (self.version_cap and not
|
|
||||||
rpc_common.version_is_compatible(self.version_cap, v)):
|
|
||||||
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
|
|
||||||
msg['version'] = v
|
|
||||||
|
|
||||||
def _get_topic(self, topic):
|
|
||||||
"""Return the topic to use for a message."""
|
|
||||||
return topic if topic else self.topic
|
|
||||||
|
|
||||||
def can_send_version(self, version):
|
|
||||||
"""Check to see if a version is compatible with the version cap."""
|
|
||||||
return (not self.version_cap or
|
|
||||||
rpc_common.version_is_compatible(self.version_cap, version))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def make_namespaced_msg(method, namespace, **kwargs):
|
|
||||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
|
||||||
|
|
||||||
def make_msg(self, method, **kwargs):
|
|
||||||
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
def _serialize_msg_args(self, context, kwargs):
|
|
||||||
"""Helper method called to serialize message arguments.
|
|
||||||
|
|
||||||
This calls our serializer on each argument, returning a new
|
|
||||||
set of args that have been serialized.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param kwargs: The arguments to serialize
|
|
||||||
:returns: A new set of serialized arguments
|
|
||||||
"""
|
|
||||||
new_kwargs = dict()
|
|
||||||
for argname, arg in six.iteritems(kwargs):
|
|
||||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
|
||||||
arg)
|
|
||||||
return new_kwargs
|
|
||||||
|
|
||||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
|
||||||
"""rpc.call() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
|
||||||
response. If no timeout is specified, a default timeout will be
|
|
||||||
used that is usually sufficient.
|
|
||||||
|
|
||||||
:returns: The return value from the remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
real_topic = self._get_topic(topic)
|
|
||||||
try:
|
|
||||||
result = rpc.call(context, real_topic, msg, timeout)
|
|
||||||
return self.serializer.deserialize_entity(context, result)
|
|
||||||
except rpc.common.Timeout as exc:
|
|
||||||
raise rpc.common.Timeout(
|
|
||||||
exc.info, real_topic, msg.get('method'))
|
|
||||||
|
|
||||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
|
||||||
"""rpc.multicall() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
|
||||||
response. If no timeout is specified, a default timeout will be
|
|
||||||
used that is usually sufficient.
|
|
||||||
|
|
||||||
:returns: An iterator that lets you process each of the returned values
|
|
||||||
from the remote method as they arrive.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
real_topic = self._get_topic(topic)
|
|
||||||
try:
|
|
||||||
result = rpc.multicall(context, real_topic, msg, timeout)
|
|
||||||
return self.serializer.deserialize_entity(context, result)
|
|
||||||
except rpc.common.Timeout as exc:
|
|
||||||
raise rpc.common.Timeout(
|
|
||||||
exc.info, real_topic, msg.get('method'))
|
|
||||||
|
|
||||||
def cast(self, context, msg, topic=None, version=None):
|
|
||||||
"""rpc.cast() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.cast() does not wait on any return value from the
|
|
||||||
remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.cast(context, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
|
||||||
"""rpc.fanout_cast() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.fanout_cast() does not wait on any return value
|
|
||||||
from the remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
|
||||||
version=None):
|
|
||||||
"""rpc.cast_to_server() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
|
||||||
details.
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.cast_to_server() does not wait on any
|
|
||||||
return values.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
|
||||||
version=None):
|
|
||||||
"""rpc.fanout_cast_to_server() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
|
||||||
details.
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.fanout_cast_to_server() does not wait on any
|
|
||||||
return values.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.fanout_cast_to_server(context, server_params,
|
|
||||||
self._get_topic(topic), msg)
|
|
@ -1,54 +0,0 @@
|
|||||||
# Copyright 2013 IBM Corp.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Provides the definition of an RPC serialization handler"""
|
|
||||||
|
|
||||||
import abc
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class Serializer(object):
|
|
||||||
"""Generic (de-)serialization definition base class."""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def serialize_entity(self, context, entity):
|
|
||||||
"""Serialize something to primitive form.
|
|
||||||
|
|
||||||
:param context: Security context
|
|
||||||
:param entity: Entity to be serialized
|
|
||||||
:returns: Serialized form of entity
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def deserialize_entity(self, context, entity):
|
|
||||||
"""Deserialize something from primitive form.
|
|
||||||
|
|
||||||
:param context: Security context
|
|
||||||
:param entity: Primitive to be deserialized
|
|
||||||
:returns: Deserialized form of entity
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class NoOpSerializer(Serializer):
|
|
||||||
"""A serializer that does nothing."""
|
|
||||||
|
|
||||||
def serialize_entity(self, context, entity):
|
|
||||||
return entity
|
|
||||||
|
|
||||||
def deserialize_entity(self, context, entity):
|
|
||||||
return entity
|
|
@ -1,75 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import rpc
|
|
||||||
from heat.openstack.common.rpc import dispatcher as rpc_dispatcher
|
|
||||||
from heat.openstack.common import service
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Service(service.Service):
|
|
||||||
"""Service object for binaries running on hosts.
|
|
||||||
|
|
||||||
A service enables rpc by listening to queues based on topic and host.
|
|
||||||
"""
|
|
||||||
def __init__(self, host, topic, manager=None, serializer=None):
|
|
||||||
super(Service, self).__init__()
|
|
||||||
self.host = host
|
|
||||||
self.topic = topic
|
|
||||||
self.serializer = serializer
|
|
||||||
if manager is None:
|
|
||||||
self.manager = self
|
|
||||||
else:
|
|
||||||
self.manager = manager
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
super(Service, self).start()
|
|
||||||
|
|
||||||
self.conn = rpc.create_connection(new=True)
|
|
||||||
LOG.debug("Creating Consumer connection for Service %s" %
|
|
||||||
self.topic)
|
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
|
||||||
self.serializer)
|
|
||||||
|
|
||||||
# Share this same connection for these Consumers
|
|
||||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
|
||||||
|
|
||||||
node_topic = '%s.%s' % (self.topic, self.host)
|
|
||||||
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
|
|
||||||
|
|
||||||
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
|
|
||||||
|
|
||||||
# Hook to allow the manager to do other initializations after
|
|
||||||
# the rpc connection is created.
|
|
||||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
|
||||||
self.manager.initialize_service_hook(self)
|
|
||||||
|
|
||||||
# Consume from all consumers in a thread
|
|
||||||
self.conn.consume_in_thread()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
# Try to shut the connection down, but if we get any sort of
|
|
||||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
|
||||||
try:
|
|
||||||
self.conn.close()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
super(Service, self).stop()
|
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
eventlet.monkey_patch()
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.openstack.common import log as logging
|
|
||||||
from heat.openstack.common import rpc
|
|
||||||
from heat.openstack.common.rpc import impl_zmq
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(rpc.rpc_opts)
|
|
||||||
CONF.register_opts(impl_zmq.zmq_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
CONF(sys.argv[1:], project='oslo')
|
|
||||||
logging.setup("oslo")
|
|
||||||
|
|
||||||
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
|
||||||
reactor.consume_in_thread()
|
|
||||||
reactor.wait()
|
|
@ -17,11 +17,11 @@
|
|||||||
Client side of the heat engine RPC API.
|
Client side of the heat engine RPC API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import heat.openstack.common.rpc.proxy
|
from heat.common import messaging
|
||||||
from heat.rpc import api
|
from heat.rpc import api
|
||||||
|
|
||||||
|
|
||||||
class EngineClient(heat.openstack.common.rpc.proxy.RpcProxy):
|
class EngineClient(object):
|
||||||
'''Client side of the heat engine rpc API.
|
'''Client side of the heat engine rpc API.
|
||||||
|
|
||||||
API version history::
|
API version history::
|
||||||
@ -33,9 +33,29 @@ class EngineClient(heat.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
BASE_RPC_API_VERSION = '1.0'
|
BASE_RPC_API_VERSION = '1.0'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(EngineClient, self).__init__(
|
self._client = messaging.get_rpc_client(
|
||||||
topic=api.ENGINE_TOPIC,
|
topic=api.ENGINE_TOPIC,
|
||||||
default_version=self.BASE_RPC_API_VERSION)
|
version=self.BASE_RPC_API_VERSION)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def make_msg(method, **kwargs):
|
||||||
|
return method, kwargs
|
||||||
|
|
||||||
|
def call(self, ctxt, msg, version=None):
|
||||||
|
method, kwargs = msg
|
||||||
|
if version is not None:
|
||||||
|
client = self._client.prepare(version=version)
|
||||||
|
else:
|
||||||
|
client = self._client
|
||||||
|
return client.call(ctxt, method, **kwargs)
|
||||||
|
|
||||||
|
def cast(self, ctxt, msg, version=None):
|
||||||
|
method, kwargs = msg
|
||||||
|
if version is not None:
|
||||||
|
client = self._client.prepare(version=version)
|
||||||
|
else:
|
||||||
|
client = self._client
|
||||||
|
return client.cast(ctxt, method, **kwargs)
|
||||||
|
|
||||||
def identify_stack(self, ctxt, stack_name):
|
def identify_stack(self, ctxt, stack_name):
|
||||||
"""
|
"""
|
||||||
|
@ -24,6 +24,7 @@ from oslotest import mockpatch
|
|||||||
import testscenarios
|
import testscenarios
|
||||||
import testtools
|
import testtools
|
||||||
|
|
||||||
|
from heat.common import messaging
|
||||||
from heat.engine import clients
|
from heat.engine import clients
|
||||||
from heat.engine import environment
|
from heat.engine import environment
|
||||||
from heat.engine import resources
|
from heat.engine import resources
|
||||||
@ -79,10 +80,11 @@ class HeatTestCase(testscenarios.WithScenarios,
|
|||||||
'environment.d')
|
'environment.d')
|
||||||
|
|
||||||
cfg.CONF.set_default('environment_dir', env_dir)
|
cfg.CONF.set_default('environment_dir', env_dir)
|
||||||
cfg.CONF.set_override('allowed_rpc_exception_modules',
|
|
||||||
['heat.common.exception', 'exceptions'])
|
|
||||||
self.addCleanup(cfg.CONF.reset)
|
self.addCleanup(cfg.CONF.reset)
|
||||||
|
|
||||||
|
messaging.setup("fake://", optional=True)
|
||||||
|
self.addCleanup(messaging.cleanup)
|
||||||
|
|
||||||
tri = resources.global_env().get_resource_info(
|
tri = resources.global_env().get_resource_info(
|
||||||
'AWS::RDS::DBInstance',
|
'AWS::RDS::DBInstance',
|
||||||
registry_type=environment.TemplateResourceInfo)
|
registry_type=environment.TemplateResourceInfo)
|
||||||
|
@ -23,8 +23,8 @@ from heat.common import exception as heat_exception
|
|||||||
from heat.common import identifier
|
from heat.common import identifier
|
||||||
from heat.common import policy
|
from heat.common import policy
|
||||||
from heat.common.wsgi import Request
|
from heat.common.wsgi import Request
|
||||||
from heat.openstack.common import rpc
|
|
||||||
from heat.rpc import api as rpc_api
|
from heat.rpc import api as rpc_api
|
||||||
|
from heat.rpc import client as rpc_client
|
||||||
from heat.tests.common import HeatTestCase
|
from heat.tests.common import HeatTestCase
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self.assertRaises(exception.HeatInternalFailureError,
|
self.assertRaises(exception.HeatInternalFailureError,
|
||||||
self.controller._enforce, dummy_req, 'ListStacks')
|
self.controller._enforce, dummy_req, 'ListStacks')
|
||||||
|
|
||||||
@mock.patch.object(rpc, 'call')
|
@mock.patch.object(rpc_client.EngineClient, 'call')
|
||||||
def test_list(self, mock_call):
|
def test_list(self, mock_call):
|
||||||
# Format a dummy GET request to pass into the WSGI handler
|
# Format a dummy GET request to pass into the WSGI handler
|
||||||
params = {'Action': 'ListStacks'}
|
params = {'Action': 'ListStacks'}
|
||||||
@ -160,14 +160,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
|
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
|
||||||
'sort_dir': None, 'filters': None, 'tenant_safe': True,
|
'sort_dir': None, 'filters': None, 'tenant_safe': True,
|
||||||
'show_deleted': False}
|
'show_deleted': False}
|
||||||
mock_call.assert_called_once_with(dummy_req.context, self.topic,
|
mock_call.assert_called_once_with(
|
||||||
{'namespace': None,
|
dummy_req.context, ('list_stacks', default_args))
|
||||||
'method': 'list_stacks',
|
|
||||||
'args': default_args,
|
|
||||||
'version': self.api_version},
|
|
||||||
None)
|
|
||||||
|
|
||||||
@mock.patch.object(rpc, 'call')
|
@mock.patch.object(rpc_client.EngineClient, 'call')
|
||||||
def test_list_rmt_aterr(self, mock_call):
|
def test_list_rmt_aterr(self, mock_call):
|
||||||
params = {'Action': 'ListStacks'}
|
params = {'Action': 'ListStacks'}
|
||||||
dummy_req = self._dummy_GET_request(params)
|
dummy_req = self._dummy_GET_request(params)
|
||||||
@ -180,14 +176,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
# Call the list controller function and compare the response
|
# Call the list controller function and compare the response
|
||||||
result = self.controller.list(dummy_req)
|
result = self.controller.list(dummy_req)
|
||||||
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
|
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
|
||||||
mock_call.assert_called_once_with(dummy_req.context, self.topic,
|
mock_call.assert_called_once_with(
|
||||||
{'namespace': None,
|
dummy_req.context, ('list_stacks', mock.ANY))
|
||||||
'method': 'list_stacks',
|
|
||||||
'args': mock.ANY,
|
|
||||||
'version': self.api_version},
|
|
||||||
None)
|
|
||||||
|
|
||||||
@mock.patch.object(rpc, 'call')
|
@mock.patch.object(rpc_client.EngineClient, 'call')
|
||||||
def test_list_rmt_interr(self, mock_call):
|
def test_list_rmt_interr(self, mock_call):
|
||||||
params = {'Action': 'ListStacks'}
|
params = {'Action': 'ListStacks'}
|
||||||
dummy_req = self._dummy_GET_request(params)
|
dummy_req = self._dummy_GET_request(params)
|
||||||
@ -200,12 +192,8 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
# Call the list controller function and compare the response
|
# Call the list controller function and compare the response
|
||||||
result = self.controller.list(dummy_req)
|
result = self.controller.list(dummy_req)
|
||||||
self.assertIsInstance(result, exception.HeatInternalFailureError)
|
self.assertIsInstance(result, exception.HeatInternalFailureError)
|
||||||
mock_call.assert_called_once_with(dummy_req.context, self.topic,
|
mock_call.assert_called_once_with(
|
||||||
{'namespace': None,
|
dummy_req.context, ('list_stacks', mock.ANY))
|
||||||
'method': 'list_stacks',
|
|
||||||
'args': mock.ANY,
|
|
||||||
'version': self.api_version},
|
|
||||||
None)
|
|
||||||
|
|
||||||
def test_describe_last_updated_time(self):
|
def test_describe_last_updated_time(self):
|
||||||
params = {'Action': 'DescribeStacks'}
|
params = {'Action': 'DescribeStacks'}
|
||||||
@ -217,12 +205,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_action': u'CREATE',
|
u'stack_action': u'CREATE',
|
||||||
u'stack_status': u'COMPLETE'}]
|
u'stack_status': u'COMPLETE'}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('show_stack', {'stack_identity': None})
|
||||||
'method': 'show_stack',
|
).AndReturn(engine_resp)
|
||||||
'args': {'stack_identity': None},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -241,12 +227,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_action': u'CREATE',
|
u'stack_action': u'CREATE',
|
||||||
u'stack_status': u'COMPLETE'}]
|
u'stack_status': u'COMPLETE'}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('show_stack', {'stack_identity': None})
|
||||||
'method': 'show_stack',
|
).AndReturn(engine_resp)
|
||||||
'args': {'stack_identity': None},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -293,17 +277,15 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'timeout_mins':60,
|
u'timeout_mins':60,
|
||||||
u'capabilities':[]}]
|
u'capabilities':[]}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'identify_stack',
|
('identify_stack', {'stack_name': stack_name})
|
||||||
'args': {'stack_name': stack_name},
|
).AndReturn(identity)
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
rpc_client.EngineClient.call(
|
||||||
rpc.call(dummy_req.context, self.topic,
|
dummy_req.context,
|
||||||
{'namespace': None,
|
('show_stack', {'stack_identity': identity})
|
||||||
'method': 'show_stack',
|
).AndReturn(engine_resp)
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -383,12 +365,11 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'timeout_mins':60,
|
u'timeout_mins':60,
|
||||||
u'capabilities':[]}]
|
u'capabilities':[]}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'show_stack',
|
('show_stack', {'stack_identity': identity})
|
||||||
'args': {'stack_identity': identity},
|
).AndReturn(engine_resp)
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -439,14 +420,11 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
dummy_req = self._dummy_GET_request(params)
|
dummy_req = self._dummy_GET_request(params)
|
||||||
self._stub_enforce(dummy_req, 'DescribeStacks')
|
self._stub_enforce(dummy_req, 'DescribeStacks')
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('show_stack', {'stack_identity': identity})
|
||||||
'method': 'show_stack',
|
).AndRaise(heat_exception.InvalidTenant(target='test',
|
||||||
'args': {'stack_identity': identity},
|
actual='test'))
|
||||||
'version': self.api_version},
|
|
||||||
None).AndRaise(heat_exception.InvalidTenant(target='test',
|
|
||||||
actual='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -462,18 +440,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context, ('show_stack', {'stack_identity': identity})
|
||||||
rpc.call(dummy_req.context, self.topic,
|
).AndRaise(AttributeError())
|
||||||
{'namespace': None,
|
|
||||||
'method': 'show_stack',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(AttributeError())
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -488,13 +461,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -532,16 +502,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_id': u'1',
|
u'stack_id': u'1',
|
||||||
u'path': u''}
|
u'path': u''}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
).AndReturn(engine_resp)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -578,16 +548,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_id': u'1',
|
u'stack_id': u'1',
|
||||||
u'path': u''}
|
u'path': u''}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
).AndReturn(engine_resp)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -624,16 +594,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_id': u'1',
|
u'stack_id': u'1',
|
||||||
u'path': u''}
|
u'path': u''}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
).AndReturn(engine_resp)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -670,16 +640,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_id': u'1',
|
u'stack_id': u'1',
|
||||||
u'path': u''}
|
u'path': u''}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
).AndReturn(engine_resp)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -716,16 +686,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'stack_id': u'1',
|
u'stack_id': u'1',
|
||||||
u'path': u''}
|
u'path': u''}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
).AndReturn(engine_resp)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -797,46 +767,43 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
|
|
||||||
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
||||||
).AndReturn(True)
|
).AndReturn(True)
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None
|
).AndRaise(AttributeError())
|
||||||
).AndRaise(AttributeError())
|
|
||||||
|
|
||||||
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
||||||
).AndReturn(True)
|
).AndReturn(True)
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None
|
).AndRaise(heat_exception.UnknownUserParameter(key='test'))
|
||||||
).AndRaise(heat_exception.UnknownUserParameter(key='test'))
|
|
||||||
|
|
||||||
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
|
||||||
).AndReturn(True)
|
).AndReturn(True)
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None
|
).AndRaise(heat_exception.UserParameterMissing(key='test'))
|
||||||
).AndRaise(heat_exception.UserParameterMissing(key='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -865,18 +832,17 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
|
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None
|
).AndRaise(heat_exception.StackExists(stack_name='test'))
|
||||||
).AndRaise(heat_exception.StackExists(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -899,19 +865,18 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'CreateStack')
|
self._stub_enforce(dummy_req, 'CreateStack')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
|
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'create_stack',
|
('create_stack',
|
||||||
'args': {'stack_name': stack_name,
|
{'stack_name': stack_name,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version}, None).AndRaise(
|
).AndRaise(heat_exception.StackValidationFailed(
|
||||||
heat_exception.StackValidationFailed(
|
message='Something went wrong'))
|
||||||
message='Something went wrong'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -935,23 +900,21 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
|
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'identify_stack',
|
('identify_stack', {'stack_name': stack_name})
|
||||||
'args': {'stack_name': stack_name},
|
).AndReturn(identity)
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
|
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'update_stack',
|
('update_stack',
|
||||||
'args': {'stack_identity': identity,
|
{'stack_identity': identity,
|
||||||
'template': self.template,
|
'template': self.template,
|
||||||
'params': engine_parms,
|
'params': engine_parms,
|
||||||
'files': {},
|
'files': {},
|
||||||
'args': engine_args},
|
'args': engine_args})
|
||||||
'version': self.api_version},
|
).AndReturn(identity)
|
||||||
None).AndReturn(identity)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -979,13 +942,11 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'identify_stack',
|
('identify_stack', {'stack_name': stack_name})
|
||||||
'args': {'stack_name': stack_name},
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1007,17 +968,15 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
engine_resp = self.template
|
engine_resp = self.template
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'identify_stack',
|
('identify_stack', {'stack_name': stack_name})
|
||||||
'args': {'stack_name': stack_name},
|
).AndReturn(identity)
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
rpc_client.EngineClient.call(
|
||||||
rpc.call(dummy_req.context, self.topic,
|
dummy_req.context,
|
||||||
{'namespace': None,
|
('get_template', {'stack_identity': identity})
|
||||||
'method': 'get_template',
|
).AndReturn(engine_resp)
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1038,18 +997,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context, ('get_template', {'stack_identity': identity})
|
||||||
rpc.call(dummy_req.context, self.topic,
|
).AndRaise(AttributeError())
|
||||||
{'namespace': None,
|
|
||||||
'method': 'get_template',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(AttributeError())
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1065,13 +1019,11 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'identify_stack',
|
('identify_stack', {'stack_name': stack_name})
|
||||||
'args': {'stack_name': stack_name},
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1089,17 +1041,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
# this test the "no such stack" error path
|
# this test the "no such stack" error path
|
||||||
engine_resp = None
|
engine_resp = None
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context, ('get_template', {'stack_identity': identity})
|
||||||
rpc.call(dummy_req.context, self.topic,
|
).AndReturn(engine_resp)
|
||||||
{'namespace': None,
|
|
||||||
'method': 'get_template',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1143,12 +1091,11 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'ValidateTemplate')
|
self._stub_enforce(dummy_req, 'ValidateTemplate')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'validate_template',
|
('validate_template', {'template': json_template, 'params': None})
|
||||||
'args': {'template': json_template, 'params': None},
|
).AndReturn(response)
|
||||||
'version': self.api_version}, None).AndReturn(response)
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
response = self.controller.validate_template(dummy_req)
|
response = self.controller.validate_template(dummy_req)
|
||||||
@ -1168,18 +1115,15 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'DeleteStack')
|
self._stub_enforce(dummy_req, 'DeleteStack')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
# Engine returns None when delete successful
|
# Engine returns None when delete successful
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'delete_stack',
|
('delete_stack', {'stack_identity': identity})
|
||||||
'args': {'stack_identity': identity},
|
).AndReturn(None)
|
||||||
'version': self.api_version}, None).AndReturn(None)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1197,21 +1141,16 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'DeleteStack')
|
self._stub_enforce(dummy_req, 'DeleteStack')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('delete_stack', {'stack_identity': identity})
|
||||||
'method': 'delete_stack',
|
).AndRaise(AttributeError())
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(AttributeError())
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1227,13 +1166,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1275,17 +1211,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'resource_properties': {u'UserData': u'blah'},
|
u'resource_properties': {u'UserData': u'blah'},
|
||||||
u'resource_type': u'AWS::EC2::Instance'}]
|
u'resource_type': u'AWS::EC2::Instance'}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context, ('list_events', {'stack_identity': identity})
|
||||||
rpc.call(dummy_req.context, self.topic,
|
).AndReturn(engine_resp)
|
||||||
{'namespace': None,
|
|
||||||
'method': 'list_events',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1317,18 +1249,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context, ('list_events', {'stack_identity': identity})
|
||||||
rpc.call(dummy_req.context, self.topic,
|
).AndRaise(Exception())
|
||||||
{'namespace': None,
|
|
||||||
'method': 'list_events',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(Exception())
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1344,13 +1271,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1390,21 +1314,17 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'resource_type': u'AWS::EC2::Instance',
|
u'resource_type': u'AWS::EC2::Instance',
|
||||||
u'metadata': {u'wordpress': []}}
|
u'metadata': {u'wordpress': []}}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
args = {
|
args = {
|
||||||
'stack_identity': identity,
|
'stack_identity': identity,
|
||||||
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
||||||
}
|
}
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('describe_stack_resource', args)
|
||||||
'method': 'describe_stack_resource',
|
).AndReturn(engine_resp)
|
||||||
'args': args,
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1437,13 +1357,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'DescribeStackResource')
|
self._stub_enforce(dummy_req, 'DescribeStackResource')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1461,23 +1378,18 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'DescribeStackResource')
|
self._stub_enforce(dummy_req, 'DescribeStackResource')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
args = {
|
args = {
|
||||||
'stack_identity': identity,
|
'stack_identity': identity,
|
||||||
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
||||||
}
|
}
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('describe_stack_resource', args)
|
||||||
'method': 'describe_stack_resource',
|
).AndRaise(heat_exception.ResourceNotFound(
|
||||||
'args': args,
|
resource_name='test', stack_name='test'))
|
||||||
'version': self.api_version},
|
|
||||||
None).AndRaise(heat_exception.ResourceNotFound(
|
|
||||||
resource_name='test', stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1517,21 +1429,17 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'resource_type': u'AWS::EC2::Instance',
|
u'resource_type': u'AWS::EC2::Instance',
|
||||||
u'metadata': {u'ensureRunning': u'true''true'}}]
|
u'metadata': {u'ensureRunning': u'true''true'}}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
args = {
|
args = {
|
||||||
'stack_identity': identity,
|
'stack_identity': identity,
|
||||||
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
||||||
}
|
}
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('describe_stack_resources', args)
|
||||||
'method': 'describe_stack_resources',
|
).AndReturn(engine_resp)
|
||||||
'args': args,
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1563,13 +1471,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1609,22 +1514,19 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'resource_type': u'AWS::EC2::Instance',
|
u'resource_type': u'AWS::EC2::Instance',
|
||||||
u'metadata': {u'ensureRunning': u'true''true'}}]
|
u'metadata': {u'ensureRunning': u'true''true'}}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'find_physical_resource',
|
('find_physical_resource',
|
||||||
'args': {'physical_resource_id':
|
{'physical_resource_id': 'a3455d8c-9f88-404d-a85b-5315293e67de'})
|
||||||
'a3455d8c-9f88-404d-a85b-5315293e67de'},
|
).AndReturn(identity)
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
|
||||||
args = {
|
args = {
|
||||||
'stack_identity': identity,
|
'stack_identity': identity,
|
||||||
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
'resource_name': dummy_req.params.get('LogicalResourceId'),
|
||||||
}
|
}
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('describe_stack_resources', args)
|
||||||
'method': 'describe_stack_resources',
|
).AndReturn(engine_resp)
|
||||||
'args': args,
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1655,15 +1557,13 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
self._stub_enforce(dummy_req, 'DescribeStackResources')
|
self._stub_enforce(dummy_req, 'DescribeStackResources')
|
||||||
|
|
||||||
# Stub out the RPC call to the engine with a pre-canned response
|
# Stub out the RPC call to the engine with a pre-canned response
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'method': 'find_physical_resource',
|
('find_physical_resource',
|
||||||
'args': {'physical_resource_id':
|
{'physical_resource_id': 'aaaaaaaa-9f88-404d-cccc-ffffffffffff'})
|
||||||
'aaaaaaaa-9f88-404d-cccc-ffffffffffff'},
|
).AndRaise(heat_exception.PhysicalResourceNotFound(
|
||||||
'version': self.api_version},
|
resource_id='1'))
|
||||||
None).AndRaise(
|
|
||||||
heat_exception.PhysicalResourceNotFound(resource_id='1'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1715,17 +1615,14 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
u'a3455d8c-9f88-404d-a85b-5315293e67de',
|
u'a3455d8c-9f88-404d-a85b-5315293e67de',
|
||||||
u'resource_type': u'AWS::EC2::Instance'}]
|
u'resource_type': u'AWS::EC2::Instance'}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndReturn(identity)
|
||||||
'args': {'stack_name': stack_name},
|
rpc_client.EngineClient.call(
|
||||||
'version': self.api_version}, None).AndReturn(identity)
|
dummy_req.context,
|
||||||
rpc.call(dummy_req.context, self.topic,
|
('list_stack_resources', {'stack_identity': identity})
|
||||||
{'namespace': None,
|
).AndReturn(engine_resp)
|
||||||
'method': 'list_stack_resources',
|
|
||||||
'args': {'stack_identity': identity},
|
|
||||||
'version': self.api_version}, None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -1752,13 +1649,10 @@ class CfnStackControllerTest(HeatTestCase):
|
|||||||
|
|
||||||
# Insert an engine RPC error and ensure we map correctly to the
|
# Insert an engine RPC error and ensure we map correctly to the
|
||||||
# heat exception type
|
# heat exception type
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
|
||||||
'method': 'identify_stack',
|
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
||||||
'args': {'stack_name': stack_name},
|
|
||||||
'version': self.api_version}, None
|
|
||||||
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
|
@ -19,8 +19,8 @@ from heat.api.aws import exception
|
|||||||
import heat.api.cloudwatch.watch as watches
|
import heat.api.cloudwatch.watch as watches
|
||||||
from heat.common import policy
|
from heat.common import policy
|
||||||
from heat.common.wsgi import Request
|
from heat.common.wsgi import Request
|
||||||
from heat.openstack.common import rpc
|
|
||||||
from heat.rpc import api as engine_api
|
from heat.rpc import api as engine_api
|
||||||
|
from heat.rpc import client as rpc_client
|
||||||
from heat.tests.common import HeatTestCase
|
from heat.tests.common import HeatTestCase
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
|
|
||||||
@ -131,13 +131,11 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
u'name': u'HttpFailureAlarm',
|
u'name': u'HttpFailureAlarm',
|
||||||
u'updated_time': u'2012-08-30T14:10:46Z'}]
|
u'updated_time': u'2012-08-30T14:10:46Z'}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'args': {'watch_name': watch_name},
|
('show_watch', {'watch_name': watch_name})
|
||||||
'method': 'show_watch',
|
).AndReturn(engine_resp)
|
||||||
'version': self.api_version},
|
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -225,16 +223,15 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
u'metric_name': u'ServiceFailure3',
|
u'metric_name': u'ServiceFailure3',
|
||||||
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
# Current engine implementation means we filter in the API
|
# Current engine implementation means we filter in the API
|
||||||
# and pass None/None for namespace/watch_name which returns
|
# and pass None/None for namespace/watch_name which returns
|
||||||
# all metric data which we post-process in the API
|
# all metric data which we post-process in the API
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'namespace': None,
|
dummy_req.context,
|
||||||
'args': {'metric_namespace': None, 'metric_name': None},
|
('show_watch_metric',
|
||||||
'method': 'show_watch_metric',
|
{'metric_namespace': None, 'metric_name': None})
|
||||||
'version': self.api_version},
|
).AndReturn(engine_resp)
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -305,17 +302,15 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
u'metric_name': u'ServiceFailure3',
|
u'metric_name': u'ServiceFailure3',
|
||||||
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
# Current engine implementation means we filter in the API
|
# Current engine implementation means we filter in the API
|
||||||
# and pass None/None for namespace/watch_name which returns
|
# and pass None/None for namespace/watch_name which returns
|
||||||
# all metric data which we post-process in the API
|
# all metric data which we post-process in the API
|
||||||
rpc.call(dummy_req.context, self.topic, {'args':
|
rpc_client.EngineClient.call(
|
||||||
{'metric_namespace': None,
|
dummy_req.context,
|
||||||
'metric_name': None},
|
('show_watch_metric',
|
||||||
'namespace': None,
|
{'metric_namespace': None, 'metric_name': None})
|
||||||
'method': 'show_watch_metric',
|
).AndReturn(engine_resp)
|
||||||
'version': self.api_version},
|
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -365,16 +360,15 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
u'metric_name': u'ServiceFailure3',
|
u'metric_name': u'ServiceFailure3',
|
||||||
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
u'data': {u'Units': u'Counter', u'Value': 1}}]
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
# Current engine implementation means we filter in the API
|
# Current engine implementation means we filter in the API
|
||||||
# and pass None/None for namespace/watch_name which returns
|
# and pass None/None for namespace/watch_name which returns
|
||||||
# all metric data which we post-process in the API
|
# all metric data which we post-process in the API
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'args': {'metric_namespace': None, 'metric_name': None},
|
dummy_req.context,
|
||||||
'namespace': None,
|
('show_watch_metric',
|
||||||
'method': 'show_watch_metric',
|
{'metric_namespace': None, 'metric_name': None})
|
||||||
'version': self.api_version},
|
).AndReturn(engine_resp)
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -429,20 +423,16 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
# Stub out the RPC call to verify the engine call parameters
|
# Stub out the RPC call to verify the engine call parameters
|
||||||
engine_resp = {}
|
engine_resp = {}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'args':
|
dummy_req.context,
|
||||||
{'stats_data':
|
('create_watch_data',
|
||||||
{'Namespace': u'system/linux',
|
{'watch_name': u'HttpFailureAlarm',
|
||||||
u'ServiceFailure':
|
'stats_data': {
|
||||||
{'Value': u'1',
|
'Namespace': u'system/linux',
|
||||||
'Unit': u'Count',
|
'ServiceFailure': {
|
||||||
'Dimensions': []}},
|
'Value': u'1', 'Unit': u'Count', 'Dimensions': []}}})
|
||||||
'watch_name': u'HttpFailureAlarm'},
|
).AndReturn(engine_resp)
|
||||||
'namespace': None,
|
|
||||||
'method': 'create_watch_data',
|
|
||||||
'version': self.api_version},
|
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -469,15 +459,13 @@ class WatchControllerTest(HeatTestCase):
|
|||||||
# of the response at present we pass nothing back from the stub
|
# of the response at present we pass nothing back from the stub
|
||||||
engine_resp = {}
|
engine_resp = {}
|
||||||
|
|
||||||
self.m.StubOutWithMock(rpc, 'call')
|
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
|
||||||
rpc.call(dummy_req.context, self.topic,
|
rpc_client.EngineClient.call(
|
||||||
{'args':
|
dummy_req.context,
|
||||||
{'state': state_map[state],
|
('set_watch_state',
|
||||||
'watch_name': u'HttpFailureAlarm'},
|
{'state': state_map[state],
|
||||||
'namespace': None,
|
'watch_name': u'HttpFailureAlarm'})
|
||||||
'method': 'set_watch_state',
|
).AndReturn(engine_resp)
|
||||||
'version': self.api_version},
|
|
||||||
None).AndReturn(engine_resp)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -21,6 +21,9 @@ import uuid
|
|||||||
import mock
|
import mock
|
||||||
import mox
|
import mox
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
from oslo.messaging.rpc import client as rpc_client
|
||||||
|
from oslo.messaging.rpc import dispatcher
|
||||||
from oslotest import mockpatch
|
from oslotest import mockpatch
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
@ -40,8 +43,6 @@ from heat.engine.resources import nova_utils
|
|||||||
from heat.engine import service
|
from heat.engine import service
|
||||||
from heat.engine import stack_lock
|
from heat.engine import stack_lock
|
||||||
from heat.engine import watchrule
|
from heat.engine import watchrule
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common.rpc import proxy
|
|
||||||
from heat.openstack.common import threadgroup
|
from heat.openstack.common import threadgroup
|
||||||
from heat.rpc import api as engine_api
|
from heat.rpc import api as engine_api
|
||||||
from heat.tests.common import HeatTestCase
|
from heat.tests.common import HeatTestCase
|
||||||
@ -479,11 +480,11 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
def test_stack_create_exceeds_max_per_tenant(self):
|
def test_stack_create_exceeds_max_per_tenant(self):
|
||||||
cfg.CONF.set_override('max_stacks_per_tenant', 0)
|
cfg.CONF.set_override('max_stacks_per_tenant', 0)
|
||||||
stack_name = 'service_create_test_stack_exceeds_max'
|
stack_name = 'service_create_test_stack_exceeds_max'
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self._test_stack_create, stack_name)
|
self._test_stack_create, stack_name)
|
||||||
self.assertEqual(ex._exc_info[0], exception.RequestLimitExceeded)
|
self.assertEqual(ex.exc_info[0], exception.RequestLimitExceeded)
|
||||||
self.assertIn("You have reached the maximum stacks per tenant",
|
self.assertIn("You have reached the maximum stacks per tenant",
|
||||||
str(ex._exc_info[1]))
|
str(ex.exc_info[1]))
|
||||||
|
|
||||||
def test_stack_create_verify_err(self):
|
def test_stack_create_verify_err(self):
|
||||||
stack_name = 'service_create_verify_err_test_stack'
|
stack_name = 'service_create_verify_err_test_stack'
|
||||||
@ -509,11 +510,11 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(
|
ex = self.assertRaises(
|
||||||
rpc_common.ClientException,
|
dispatcher.ExpectedException,
|
||||||
self.man.create_stack,
|
self.man.create_stack,
|
||||||
self.ctx, stack_name,
|
self.ctx, stack_name,
|
||||||
template, params, None, {})
|
template, params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackValidationFailed)
|
self.assertEqual(ex.exc_info[0], exception.StackValidationFailed)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_create_invalid_stack_name(self):
|
def test_stack_create_invalid_stack_name(self):
|
||||||
@ -564,21 +565,21 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.create_stack,
|
self.man.create_stack,
|
||||||
ctx_no_pwd, stack_name,
|
ctx_no_pwd, stack_name,
|
||||||
template, params, None, {})
|
template, params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.MissingCredentialError)
|
self.assertEqual(ex.exc_info[0], exception.MissingCredentialError)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'Missing required credential: X-Auth-Key', str(ex._exc_info[1]))
|
'Missing required credential: X-Auth-Key', str(ex.exc_info[1]))
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.create_stack,
|
self.man.create_stack,
|
||||||
ctx_no_user, stack_name,
|
ctx_no_user, stack_name,
|
||||||
template, params, None, {})
|
template, params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.MissingCredentialError)
|
self.assertEqual(ex.exc_info[0], exception.MissingCredentialError)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'Missing required credential: X-Auth-User', str(ex._exc_info[1]))
|
'Missing required credential: X-Auth-User', str(ex.exc_info[1]))
|
||||||
|
|
||||||
def test_stack_create_total_resources_equals_max(self):
|
def test_stack_create_total_resources_equals_max(self):
|
||||||
stack_name = 'service_create_stack_total_resources_equals_max'
|
stack_name = 'service_create_stack_total_resources_equals_max'
|
||||||
@ -628,12 +629,12 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
'B': {'Type': 'GenericResourceType'},
|
'B': {'Type': 'GenericResourceType'},
|
||||||
'C': {'Type': 'GenericResourceType'}}}
|
'C': {'Type': 'GenericResourceType'}}}
|
||||||
cfg.CONF.set_override('max_resources_per_stack', 2)
|
cfg.CONF.set_override('max_resources_per_stack', 2)
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.create_stack, self.ctx, stack_name,
|
self.man.create_stack, self.ctx, stack_name,
|
||||||
tpl, params, None, {})
|
tpl, params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.RequestLimitExceeded)
|
self.assertEqual(ex.exc_info[0], exception.RequestLimitExceeded)
|
||||||
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
|
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
|
||||||
str(ex._exc_info[1]))
|
str(ex.exc_info[1]))
|
||||||
|
|
||||||
def test_stack_validate(self):
|
def test_stack_validate(self):
|
||||||
stack_name = 'service_create_test_validate'
|
stack_name = 'service_create_test_validate'
|
||||||
@ -682,10 +683,10 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.delete_stack,
|
self.man.delete_stack,
|
||||||
self.ctx, stack.identifier())
|
self.ctx, stack.identifier())
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_delete_acquired_lock(self):
|
def test_stack_delete_acquired_lock(self):
|
||||||
@ -748,6 +749,7 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_delete_other_engine_active_lock_failed(self):
|
def test_stack_delete_other_engine_active_lock_failed(self):
|
||||||
|
self.man.start()
|
||||||
stack_name = 'service_delete_test_stack'
|
stack_name = 'service_delete_test_stack'
|
||||||
stack = get_wordpress_stack(stack_name, self.ctx)
|
stack = get_wordpress_stack(stack_name, self.ctx)
|
||||||
sid = stack.store()
|
sid = stack.store()
|
||||||
@ -766,21 +768,21 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
|
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
|
||||||
.AndReturn(True)
|
.AndReturn(True)
|
||||||
|
|
||||||
rpc = proxy.RpcProxy("other-engine-fake-uuid", "1.0")
|
self.m.StubOutWithMock(rpc_client._CallContext, 'call')
|
||||||
msg = rpc.make_msg("stop_stack", stack_identity=mox.IgnoreArg())
|
rpc_client._CallContext.call(
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, 'call')
|
self.ctx, 'stop_stack',
|
||||||
proxy.RpcProxy.call(self.ctx, msg, topic='other-engine-fake-uuid',
|
stack_identity=mox.IgnoreArg()
|
||||||
timeout=cfg.CONF.engine_life_check_timeout)\
|
).AndRaise(messaging.MessagingTimeout)
|
||||||
.AndRaise(rpc_common.Timeout)
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.delete_stack,
|
self.man.delete_stack,
|
||||||
self.ctx, stack.identifier())
|
self.ctx, stack.identifier())
|
||||||
self.assertEqual(ex._exc_info[0], exception.StopActionFailed)
|
self.assertEqual(ex.exc_info[0], exception.StopActionFailed)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_delete_other_engine_active_lock_succeeded(self):
|
def test_stack_delete_other_engine_active_lock_succeeded(self):
|
||||||
|
self.man.start()
|
||||||
stack_name = 'service_delete_test_stack'
|
stack_name = 'service_delete_test_stack'
|
||||||
stack = get_wordpress_stack(stack_name, self.ctx)
|
stack = get_wordpress_stack(stack_name, self.ctx)
|
||||||
sid = stack.store()
|
sid = stack.store()
|
||||||
@ -799,12 +801,10 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
|
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
|
||||||
.AndReturn(True)
|
.AndReturn(True)
|
||||||
|
|
||||||
rpc = proxy.RpcProxy("other-engine-fake-uuid", "1.0")
|
self.m.StubOutWithMock(rpc_client._CallContext, 'call')
|
||||||
msg = rpc.make_msg("stop_stack", stack_identity=mox.IgnoreArg())
|
rpc_client._CallContext.call(
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, 'call')
|
self.ctx, 'stop_stack',
|
||||||
proxy.RpcProxy.call(self.ctx, msg, topic='other-engine-fake-uuid',
|
stack_identity=mox.IgnoreArg()).AndReturn(None)
|
||||||
timeout=cfg.CONF.engine_life_check_timeout)\
|
|
||||||
.AndReturn(None)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(stack_lock.StackLock, 'acquire')
|
self.m.StubOutWithMock(stack_lock.StackLock, 'acquire')
|
||||||
stack_lock.StackLock.acquire().AndReturn(None)
|
stack_lock.StackLock.acquire().AndReturn(None)
|
||||||
@ -1083,13 +1083,13 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
|
|
||||||
cfg.CONF.set_override('max_resources_per_stack', 2)
|
cfg.CONF.set_override('max_resources_per_stack', 2)
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.update_stack, self.ctx,
|
self.man.update_stack, self.ctx,
|
||||||
old_stack.identifier(), tpl, params,
|
old_stack.identifier(), tpl, params,
|
||||||
None, {})
|
None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.RequestLimitExceeded)
|
self.assertEqual(ex.exc_info[0], exception.RequestLimitExceeded)
|
||||||
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
|
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
|
||||||
str(ex._exc_info[1]))
|
str(ex.exc_info[1]))
|
||||||
|
|
||||||
def test_stack_update_verify_err(self):
|
def test_stack_update_verify_err(self):
|
||||||
stack_name = 'service_update_verify_err_test_stack'
|
stack_name = 'service_update_verify_err_test_stack'
|
||||||
@ -1123,11 +1123,11 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
|
|
||||||
api_args = {'timeout_mins': 60}
|
api_args = {'timeout_mins': 60}
|
||||||
ex = self.assertRaises(
|
ex = self.assertRaises(
|
||||||
rpc_common.ClientException,
|
dispatcher.ExpectedException,
|
||||||
self.man.update_stack,
|
self.man.update_stack,
|
||||||
self.ctx, old_stack.identifier(),
|
self.ctx, old_stack.identifier(),
|
||||||
template, params, None, api_args)
|
template, params, None, api_args)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackValidationFailed)
|
self.assertEqual(ex.exc_info[0], exception.StackValidationFailed)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_update_nonexist(self):
|
def test_stack_update_nonexist(self):
|
||||||
@ -1138,11 +1138,11 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.update_stack,
|
self.man.update_stack,
|
||||||
self.ctx, stack.identifier(), template,
|
self.ctx, stack.identifier(), template,
|
||||||
params, None, {})
|
params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_update_no_credentials(self):
|
def test_stack_update_no_credentials(self):
|
||||||
@ -1178,13 +1178,13 @@ class StackServiceCreateUpdateDeleteTest(HeatTestCase):
|
|||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
api_args = {'timeout_mins': 60}
|
api_args = {'timeout_mins': 60}
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.update_stack, self.ctx,
|
self.man.update_stack, self.ctx,
|
||||||
old_stack.identifier(),
|
old_stack.identifier(),
|
||||||
template, params, None, api_args)
|
template, params, None, api_args)
|
||||||
self.assertEqual(ex._exc_info[0], exception.MissingCredentialError)
|
self.assertEqual(ex.exc_info[0], exception.MissingCredentialError)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'Missing required credential: X-Auth-Key', str(ex._exc_info[1]))
|
'Missing required credential: X-Auth-Key', str(ex.exc_info[1]))
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -1259,11 +1259,11 @@ class StackServiceUpdateSuspendedNotSupportedTest(HeatTestCase):
|
|||||||
|
|
||||||
params = {'foo': 'bar'}
|
params = {'foo': 'bar'}
|
||||||
template = '{ "Resources": {} }'
|
template = '{ "Resources": {} }'
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.update_stack,
|
self.man.update_stack,
|
||||||
self.ctx, old_stack.identifier(), template,
|
self.ctx, old_stack.identifier(), template,
|
||||||
params, None, {})
|
params, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.NotSupported)
|
self.assertEqual(ex.exc_info[0], exception.NotSupported)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
|
|
||||||
@ -1321,10 +1321,10 @@ class StackServiceSuspendResumeTest(HeatTestCase):
|
|||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.stack_suspend, self.ctx,
|
self.man.stack_suspend, self.ctx,
|
||||||
stack.identifier())
|
stack.identifier())
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_resume_nonexist(self):
|
def test_stack_resume_nonexist(self):
|
||||||
@ -1333,10 +1333,10 @@ class StackServiceSuspendResumeTest(HeatTestCase):
|
|||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.man.stack_resume, self.ctx,
|
self.man.stack_resume, self.ctx,
|
||||||
stack.identifier())
|
stack.identifier())
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
|
|
||||||
@ -1505,16 +1505,16 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_identify_nonexist(self):
|
def test_stack_identify_nonexist(self):
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.identify_stack, self.ctx, 'wibble')
|
self.eng.identify_stack, self.ctx, 'wibble')
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
|
|
||||||
@stack_context('service_create_existing_test_stack', False)
|
@stack_context('service_create_existing_test_stack', False)
|
||||||
def test_stack_create_existing(self):
|
def test_stack_create_existing(self):
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.create_stack, self.ctx,
|
self.eng.create_stack, self.ctx,
|
||||||
self.stack.name, self.stack.t.t, {}, None, {})
|
self.stack.name, self.stack.t.t, {}, None, {})
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackExists)
|
self.assertEqual(ex.exc_info[0], exception.StackExists)
|
||||||
|
|
||||||
@stack_context('service_name_tenants_test_stack', False)
|
@stack_context('service_name_tenants_test_stack', False)
|
||||||
def test_stack_by_name_tenants(self):
|
def test_stack_by_name_tenants(self):
|
||||||
@ -1789,6 +1789,7 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.assertEqual(expected_res, ret['resources'])
|
self.assertEqual(expected_res, ret['resources'])
|
||||||
self.assertEqual(self.stack.t.t, ret['template'])
|
self.assertEqual(self.stack.t.t, ret['template'])
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
self.eng.thread_group_mgr.groups[self.stack.id].wait()
|
||||||
|
|
||||||
def test_stack_describe_nonexistent(self):
|
def test_stack_describe_nonexistent(self):
|
||||||
non_exist_identifier = identifier.HeatIdentifier(
|
non_exist_identifier = identifier.HeatIdentifier(
|
||||||
@ -1802,10 +1803,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
show_deleted=True).AndRaise(stack_not_found_exc)
|
show_deleted=True).AndRaise(stack_not_found_exc)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.show_stack,
|
self.eng.show_stack,
|
||||||
self.ctx, non_exist_identifier)
|
self.ctx, non_exist_identifier)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_describe_bad_tenant(self):
|
def test_stack_describe_bad_tenant(self):
|
||||||
@ -1821,10 +1822,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
show_deleted=True).AndRaise(invalid_tenant_exc)
|
show_deleted=True).AndRaise(invalid_tenant_exc)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.show_stack,
|
self.eng.show_stack,
|
||||||
self.ctx, non_exist_identifier)
|
self.ctx, non_exist_identifier)
|
||||||
self.assertEqual(ex._exc_info[0], exception.InvalidTenant)
|
self.assertEqual(ex.exc_info[0], exception.InvalidTenant)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -1954,10 +1955,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
|
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.describe_stack_resource,
|
self.eng.describe_stack_resource,
|
||||||
self.ctx, non_exist_identifier, 'WebServer')
|
self.ctx, non_exist_identifier, 'WebServer')
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -1968,10 +1969,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
stack=mox.IgnoreArg()).AndReturn(self.stack)
|
stack=mox.IgnoreArg()).AndReturn(self.stack)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.describe_stack_resource,
|
self.eng.describe_stack_resource,
|
||||||
self.ctx, self.stack.identifier(), 'foo')
|
self.ctx, self.stack.identifier(), 'foo')
|
||||||
self.assertEqual(ex._exc_info[0], exception.ResourceNotFound)
|
self.assertEqual(ex.exc_info[0], exception.ResourceNotFound)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -1983,10 +1984,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
'foo').AndReturn(False)
|
'foo').AndReturn(False)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.describe_stack_resource,
|
self.eng.describe_stack_resource,
|
||||||
self.ctx, self.stack.identifier(), 'foo')
|
self.ctx, self.stack.identifier(), 'foo')
|
||||||
self.assertEqual(ex._exc_info[0], exception.Forbidden)
|
self.assertEqual(ex.exc_info[0], exception.Forbidden)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -2053,10 +2054,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.ctx.tenant_id, 'wibble',
|
self.ctx.tenant_id, 'wibble',
|
||||||
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
|
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.describe_stack_resources,
|
self.eng.describe_stack_resources,
|
||||||
self.ctx, non_exist_identifier, 'WebServer')
|
self.ctx, non_exist_identifier, 'WebServer')
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
|
|
||||||
@stack_context('find_phys_res_stack')
|
@stack_context('find_phys_res_stack')
|
||||||
def test_find_physical_resource(self):
|
def test_find_physical_resource(self):
|
||||||
@ -2072,10 +2073,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.assertEqual('WebServer', resource_identity.resource_name)
|
self.assertEqual('WebServer', resource_identity.resource_name)
|
||||||
|
|
||||||
def test_find_physical_resource_nonexist(self):
|
def test_find_physical_resource_nonexist(self):
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.find_physical_resource,
|
self.eng.find_physical_resource,
|
||||||
self.ctx, 'foo')
|
self.ctx, 'foo')
|
||||||
self.assertEqual(ex._exc_info[0], exception.PhysicalResourceNotFound)
|
self.assertEqual(ex.exc_info[0], exception.PhysicalResourceNotFound)
|
||||||
|
|
||||||
@stack_context('service_resources_list_test_stack')
|
@stack_context('service_resources_list_test_stack')
|
||||||
def test_stack_resources_list(self):
|
def test_stack_resources_list(self):
|
||||||
@ -2111,10 +2112,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
|
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.list_stack_resources,
|
self.eng.list_stack_resources,
|
||||||
self.ctx, non_exist_identifier)
|
self.ctx, non_exist_identifier)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -2162,12 +2163,12 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.stack.identifier()).AndReturn(s)
|
self.stack.identifier()).AndReturn(s)
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.resource_signal, self.ctx,
|
self.eng.resource_signal, self.ctx,
|
||||||
dict(self.stack.identifier()),
|
dict(self.stack.identifier()),
|
||||||
'resource_does_not_exist',
|
'resource_does_not_exist',
|
||||||
test_data)
|
test_data)
|
||||||
self.assertEqual(ex._exc_info[0], exception.ResourceNotFound)
|
self.assertEqual(ex.exc_info[0], exception.ResourceNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
self.stack.delete()
|
self.stack.delete()
|
||||||
|
|
||||||
@ -2205,11 +2206,11 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
|
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.metadata_update,
|
self.eng.metadata_update,
|
||||||
self.ctx, non_exist_identifier,
|
self.ctx, non_exist_identifier,
|
||||||
'WebServer', test_metadata)
|
'WebServer', test_metadata)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackNotFound)
|
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@stack_context('service_metadata_err_resource_test_stack', False)
|
@stack_context('service_metadata_err_resource_test_stack', False)
|
||||||
@ -2220,11 +2221,11 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
|
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.metadata_update,
|
self.eng.metadata_update,
|
||||||
self.ctx, dict(self.stack.identifier()),
|
self.ctx, dict(self.stack.identifier()),
|
||||||
'NooServer', test_metadata)
|
'NooServer', test_metadata)
|
||||||
self.assertEqual(ex._exc_info[0], exception.ResourceNotFound)
|
self.assertEqual(ex.exc_info[0], exception.ResourceNotFound)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
@ -2315,10 +2316,10 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.assertIn('name', result[0])
|
self.assertIn('name', result[0])
|
||||||
self.assertEqual('show_watch_2', result[0]['name'])
|
self.assertEqual('show_watch_2', result[0]['name'])
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.show_watch,
|
self.eng.show_watch,
|
||||||
self.ctx, watch_name="nonexistent")
|
self.ctx, watch_name="nonexistent")
|
||||||
self.assertEqual(ex._exc_info[0], exception.WatchRuleNotFound)
|
self.assertEqual(ex.exc_info[0], exception.WatchRuleNotFound)
|
||||||
|
|
||||||
# Check the response has all keys defined in the engine API
|
# Check the response has all keys defined in the engine API
|
||||||
for key in engine_api.WATCH_KEYS:
|
for key in engine_api.WATCH_KEYS:
|
||||||
@ -2391,7 +2392,8 @@ class StackServiceTest(HeatTestCase):
|
|||||||
self.wr.store()
|
self.wr.store()
|
||||||
|
|
||||||
class DummyAction(object):
|
class DummyAction(object):
|
||||||
signal = "dummyfoo"
|
def signal(self):
|
||||||
|
return "dummyfoo"
|
||||||
|
|
||||||
dummy_action = DummyAction()
|
dummy_action = DummyAction()
|
||||||
self.m.StubOutWithMock(parser.Stack, 'resource_by_refid')
|
self.m.StubOutWithMock(parser.Stack, 'resource_by_refid')
|
||||||
@ -2426,7 +2428,7 @@ class StackServiceTest(HeatTestCase):
|
|||||||
state=state)
|
state=state)
|
||||||
self.assertEqual(state, result[engine_api.WATCH_STATE_VALUE])
|
self.assertEqual(state, result[engine_api.WATCH_STATE_VALUE])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[DummyAction.signal],
|
[dummy_action.signal],
|
||||||
self.eng.thread_group_mgr.groups[self.stack.id].threads)
|
self.eng.thread_group_mgr.groups[self.stack.id].threads)
|
||||||
|
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
@ -2473,11 +2475,11 @@ class StackServiceTest(HeatTestCase):
|
|||||||
.AndRaise(exception.WatchRuleNotFound(watch_name='test'))
|
.AndRaise(exception.WatchRuleNotFound(watch_name='test'))
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.eng.set_watch_state,
|
self.eng.set_watch_state,
|
||||||
self.ctx, watch_name="nonexistent",
|
self.ctx, watch_name="nonexistent",
|
||||||
state=state)
|
state=state)
|
||||||
self.assertEqual(ex._exc_info[0], exception.WatchRuleNotFound)
|
self.assertEqual(ex.exc_info[0], exception.WatchRuleNotFound)
|
||||||
self.m.VerifyAll()
|
self.m.VerifyAll()
|
||||||
|
|
||||||
def test_stack_list_all_empty(self):
|
def test_stack_list_all_empty(self):
|
||||||
@ -2569,9 +2571,9 @@ class StackServiceTest(HeatTestCase):
|
|||||||
def test_preview_stack_validates_new_stack(self):
|
def test_preview_stack_validates_new_stack(self):
|
||||||
exc = exception.StackExists(stack_name='Validation Failed')
|
exc = exception.StackExists(stack_name='Validation Failed')
|
||||||
self.eng._validate_new_stack = mock.Mock(side_effect=exc)
|
self.eng._validate_new_stack = mock.Mock(side_effect=exc)
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self._preview_stack)
|
self._preview_stack)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackExists)
|
self.assertEqual(ex.exc_info[0], exception.StackExists)
|
||||||
|
|
||||||
@mock.patch.object(service.api, 'format_stack_preview', new=mock.Mock())
|
@mock.patch.object(service.api, 'format_stack_preview', new=mock.Mock())
|
||||||
@mock.patch.object(service.parser, 'Stack')
|
@mock.patch.object(service.parser, 'Stack')
|
||||||
@ -2580,9 +2582,9 @@ class StackServiceTest(HeatTestCase):
|
|||||||
mock_parsed_stack = mock.Mock()
|
mock_parsed_stack = mock.Mock()
|
||||||
mock_parsed_stack.validate.side_effect = exc
|
mock_parsed_stack.validate.side_effect = exc
|
||||||
mock_parser.return_value = mock_parsed_stack
|
mock_parser.return_value = mock_parsed_stack
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self._preview_stack)
|
self._preview_stack)
|
||||||
self.assertEqual(ex._exc_info[0], exception.StackValidationFailed)
|
self.assertEqual(ex.exc_info[0], exception.StackValidationFailed)
|
||||||
|
|
||||||
@mock.patch.object(service.db_api, 'stack_get_by_name')
|
@mock.patch.object(service.db_api, 'stack_get_by_name')
|
||||||
def test_validate_new_stack_checks_existing_stack(self, mock_stack_get):
|
def test_validate_new_stack_checks_existing_stack(self, mock_stack_get):
|
||||||
@ -2627,10 +2629,10 @@ class SoftwareConfigServiceTest(HeatTestCase):
|
|||||||
def test_show_software_config(self):
|
def test_show_software_config(self):
|
||||||
config_id = str(uuid.uuid4())
|
config_id = str(uuid.uuid4())
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.engine.show_software_config,
|
self.engine.show_software_config,
|
||||||
self.ctx, config_id)
|
self.ctx, config_id)
|
||||||
self.assertEqual(ex._exc_info[0], exception.NotFound)
|
self.assertEqual(ex.exc_info[0], exception.NotFound)
|
||||||
|
|
||||||
config = self._create_software_config()
|
config = self._create_software_config()
|
||||||
config_id = config['id']
|
config_id = config['id']
|
||||||
@ -2667,10 +2669,10 @@ class SoftwareConfigServiceTest(HeatTestCase):
|
|||||||
config_id = config['id']
|
config_id = config['id']
|
||||||
self.engine.delete_software_config(self.ctx, config_id)
|
self.engine.delete_software_config(self.ctx, config_id)
|
||||||
|
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.engine.show_software_config,
|
self.engine.show_software_config,
|
||||||
self.ctx, config_id)
|
self.ctx, config_id)
|
||||||
self.assertEqual(ex._exc_info[0], exception.NotFound)
|
self.assertEqual(ex.exc_info[0], exception.NotFound)
|
||||||
|
|
||||||
def _create_software_deployment(self, config_id=None, input_values={},
|
def _create_software_deployment(self, config_id=None, input_values={},
|
||||||
action='INIT',
|
action='INIT',
|
||||||
@ -2787,10 +2789,10 @@ class SoftwareConfigServiceTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_show_software_deployment(self):
|
def test_show_software_deployment(self):
|
||||||
deployment_id = str(uuid.uuid4())
|
deployment_id = str(uuid.uuid4())
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.engine.show_software_deployment,
|
self.engine.show_software_deployment,
|
||||||
self.ctx, deployment_id)
|
self.ctx, deployment_id)
|
||||||
self.assertEqual(ex._exc_info[0], exception.NotFound)
|
self.assertEqual(ex.exc_info[0], exception.NotFound)
|
||||||
|
|
||||||
deployment = self._create_software_deployment()
|
deployment = self._create_software_deployment()
|
||||||
self.assertIsNotNone(deployment)
|
self.assertIsNotNone(deployment)
|
||||||
@ -2912,10 +2914,10 @@ class SoftwareConfigServiceTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_delete_software_deployment(self):
|
def test_delete_software_deployment(self):
|
||||||
deployment_id = str(uuid.uuid4())
|
deployment_id = str(uuid.uuid4())
|
||||||
ex = self.assertRaises(rpc_common.ClientException,
|
ex = self.assertRaises(dispatcher.ExpectedException,
|
||||||
self.engine.delete_software_deployment,
|
self.engine.delete_software_deployment,
|
||||||
self.ctx, deployment_id)
|
self.ctx, deployment_id)
|
||||||
self.assertEqual(ex._exc_info[0], exception.NotFound)
|
self.assertEqual(ex.exc_info[0], exception.NotFound)
|
||||||
|
|
||||||
deployment = self._create_software_deployment()
|
deployment = self._create_software_deployment()
|
||||||
self.assertIsNotNone(deployment)
|
self.assertIsNotNone(deployment)
|
||||||
|
@ -14,9 +14,10 @@
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from oslo.messaging._drivers import common as rpc_common
|
||||||
|
|
||||||
import heat.api.middleware.fault as fault
|
import heat.api.middleware.fault as fault
|
||||||
from heat.common import exception as heat_exc
|
from heat.common import exception as heat_exc
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.tests.common import HeatTestCase
|
from heat.tests.common import HeatTestCase
|
||||||
|
|
||||||
|
|
||||||
@ -77,8 +78,8 @@ class FaultMiddlewareTest(HeatTestCase):
|
|||||||
error = heat_exc.StackNotFound(stack_name='a')
|
error = heat_exc.StackNotFound(stack_name='a')
|
||||||
exc_info = (type(error), error, None)
|
exc_info = (type(error), error, None)
|
||||||
serialized = rpc_common.serialize_remote_exception(exc_info)
|
serialized = rpc_common.serialize_remote_exception(exc_info)
|
||||||
remote_error = rpc_common.deserialize_remote_exception(cfg.CONF,
|
remote_error = rpc_common.deserialize_remote_exception(
|
||||||
serialized)
|
serialized, ["heat.common.exception"])
|
||||||
wrapper = fault.FaultWrapper(None)
|
wrapper = fault.FaultWrapper(None)
|
||||||
msg = wrapper._error(remote_error)
|
msg = wrapper._error(remote_error)
|
||||||
expected_message, expected_traceback = six.text_type(remote_error).\
|
expected_message, expected_traceback = six.text_type(remote_error).\
|
||||||
@ -123,14 +124,12 @@ class FaultMiddlewareTest(HeatTestCase):
|
|||||||
def test_should_not_ignore_parent_classes_even_for_remote_ones(self):
|
def test_should_not_ignore_parent_classes_even_for_remote_ones(self):
|
||||||
# We want tracebacks
|
# We want tracebacks
|
||||||
cfg.CONF.set_override('debug', True)
|
cfg.CONF.set_override('debug', True)
|
||||||
cfg.CONF.set_override('allowed_rpc_exception_modules',
|
|
||||||
['heat.tests.test_fault_middleware'])
|
|
||||||
|
|
||||||
error = StackNotFoundChild(stack_name='a')
|
error = StackNotFoundChild(stack_name='a')
|
||||||
exc_info = (type(error), error, None)
|
exc_info = (type(error), error, None)
|
||||||
serialized = rpc_common.serialize_remote_exception(exc_info)
|
serialized = rpc_common.serialize_remote_exception(exc_info)
|
||||||
remote_error = rpc_common.deserialize_remote_exception(cfg.CONF,
|
remote_error = rpc_common.deserialize_remote_exception(
|
||||||
serialized)
|
serialized, ["heat.tests.test_fault_middleware"])
|
||||||
|
|
||||||
wrapper = fault.FaultWrapper(None)
|
wrapper = fault.FaultWrapper(None)
|
||||||
msg = wrapper._error(remote_error)
|
msg = wrapper._error(remote_error)
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.common import template_format
|
from heat.common import template_format
|
||||||
@ -41,13 +40,6 @@ class NotificationTest(common.HeatTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NotificationTest, self).setUp()
|
super(NotificationTest, self).setUp()
|
||||||
|
|
||||||
cfg.CONF.import_opt('notification_driver',
|
|
||||||
'heat.openstack.common.notifier.api')
|
|
||||||
|
|
||||||
cfg.CONF.set_default('notification_driver',
|
|
||||||
['heat.openstack.common.notifier.test_notifier'])
|
|
||||||
cfg.CONF.set_default('host', 'test_host')
|
|
||||||
resource._register_class('GenericResource',
|
resource._register_class('GenericResource',
|
||||||
generic_resource.ResourceWithProps)
|
generic_resource.ResourceWithProps)
|
||||||
|
|
||||||
@ -87,9 +79,7 @@ class NotificationTest(common.HeatTestCase):
|
|||||||
stack_arn = self.stack.identifier().arn()
|
stack_arn = self.stack.identifier().arn()
|
||||||
self.expected[action] = [
|
self.expected[action] = [
|
||||||
mock.call(self.ctx,
|
mock.call(self.ctx,
|
||||||
'orchestration.test_host',
|
|
||||||
'orchestration.stack.%s.start' % action,
|
'orchestration.stack.%s.start' % action,
|
||||||
'INFO',
|
|
||||||
{'state_reason': 'Stack %s started' % action.upper(),
|
{'state_reason': 'Stack %s started' % action.upper(),
|
||||||
'user_id': 'test_username',
|
'user_id': 'test_username',
|
||||||
'stack_identity': stack_arn,
|
'stack_identity': stack_arn,
|
||||||
@ -97,9 +87,8 @@ class NotificationTest(common.HeatTestCase):
|
|||||||
'create_at': self.create_at,
|
'create_at': self.create_at,
|
||||||
'stack_name': self.stack_name,
|
'stack_name': self.stack_name,
|
||||||
'state': '%s_IN_PROGRESS' % action.upper()}),
|
'state': '%s_IN_PROGRESS' % action.upper()}),
|
||||||
mock.call(self.ctx, 'orchestration.test_host',
|
mock.call(self.ctx,
|
||||||
'orchestration.stack.%s.end' % action,
|
'orchestration.stack.%s.end' % action,
|
||||||
'INFO',
|
|
||||||
{'state_reason':
|
{'state_reason':
|
||||||
'Stack %s completed successfully' % action.upper(),
|
'Stack %s completed successfully' % action.upper(),
|
||||||
'user_id': 'test_username',
|
'user_id': 'test_username',
|
||||||
@ -109,60 +98,50 @@ class NotificationTest(common.HeatTestCase):
|
|||||||
'stack_name': self.stack_name,
|
'stack_name': self.stack_name,
|
||||||
'state': '%s_COMPLETE' % action.upper()})]
|
'state': '%s_COMPLETE' % action.upper()})]
|
||||||
|
|
||||||
def test_create_stack(self):
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.info')
|
||||||
with mock.patch('heat.openstack.common.notifier.api.notify') \
|
def test_create_stack(self, mock_notify):
|
||||||
as mock_notify:
|
self.create_test_stack()
|
||||||
self.create_test_stack()
|
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
||||||
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
self.stack.state)
|
||||||
self.stack.state)
|
|
||||||
|
|
||||||
self.assertEqual(self.expected['create'],
|
self.assertEqual(self.expected['create'],
|
||||||
mock_notify.call_args_list)
|
mock_notify.call_args_list)
|
||||||
|
|
||||||
def test_create_and_suspend_stack(self):
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.info')
|
||||||
with mock.patch('heat.openstack.common.notifier.api.notify') \
|
def test_create_and_suspend_stack(self, mock_notify):
|
||||||
as mock_notify:
|
self.create_test_stack()
|
||||||
self.create_test_stack()
|
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
||||||
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
self.stack.state)
|
||||||
self.stack.state)
|
|
||||||
|
|
||||||
self.assertEqual(self.expected['create'],
|
self.assertEqual(self.expected['create'],
|
||||||
mock_notify.call_args_list)
|
mock_notify.call_args_list)
|
||||||
self.stack.suspend()
|
self.stack.suspend()
|
||||||
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
|
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
|
||||||
self.stack.state)
|
self.stack.state)
|
||||||
|
|
||||||
expected = self.expected['create'] + self.expected['suspend']
|
expected = self.expected['create'] + self.expected['suspend']
|
||||||
self.assertEqual(expected, mock_notify.call_args_list)
|
self.assertEqual(expected, mock_notify.call_args_list)
|
||||||
|
|
||||||
def test_create_and_delete_stack(self):
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.info')
|
||||||
with mock.patch('heat.openstack.common.notifier.api.notify') \
|
def test_create_and_delete_stack(self, mock_notify):
|
||||||
as mock_notify:
|
self.create_test_stack()
|
||||||
self.create_test_stack()
|
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
||||||
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
|
self.stack.state)
|
||||||
self.stack.state)
|
|
||||||
|
|
||||||
self.assertEqual(self.expected['create'],
|
self.assertEqual(self.expected['create'],
|
||||||
mock_notify.call_args_list)
|
mock_notify.call_args_list)
|
||||||
self.stack.delete()
|
self.stack.delete()
|
||||||
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
|
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
|
||||||
self.stack.state)
|
self.stack.state)
|
||||||
expected = self.expected['create'] + self.expected['delete']
|
expected = self.expected['create'] + self.expected['delete']
|
||||||
|
|
||||||
self.assertEqual(expected, mock_notify.call_args_list)
|
self.assertEqual(expected, mock_notify.call_args_list)
|
||||||
|
|
||||||
|
|
||||||
class ScaleNotificationTest(common.HeatTestCase):
|
class ScaleNotificationTest(common.HeatTestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ScaleNotificationTest, self).setUp()
|
super(ScaleNotificationTest, self).setUp()
|
||||||
|
|
||||||
cfg.CONF.import_opt('notification_driver',
|
|
||||||
'heat.openstack.common.notifier.api')
|
|
||||||
|
|
||||||
cfg.CONF.set_default('notification_driver',
|
|
||||||
['heat.openstack.common.notifier.test_notifier'])
|
|
||||||
cfg.CONF.set_default('host', 'test_host')
|
|
||||||
self.ctx = utils.dummy_context()
|
self.ctx = utils.dummy_context()
|
||||||
self.ctx.tenant_id = 'test_tenant'
|
self.ctx.tenant_id = 'test_tenant'
|
||||||
|
|
||||||
@ -208,9 +187,7 @@ class ScaleNotificationTest(common.HeatTestCase):
|
|||||||
|
|
||||||
stack_arn = self.stack.identifier().arn()
|
stack_arn = self.stack.identifier().arn()
|
||||||
expected = [mock.call(self.ctx,
|
expected = [mock.call(self.ctx,
|
||||||
'orchestration.test_host',
|
|
||||||
'orchestration.autoscaling.start',
|
'orchestration.autoscaling.start',
|
||||||
'INFO',
|
|
||||||
{'state_reason':
|
{'state_reason':
|
||||||
'Stack CREATE completed successfully',
|
'Stack CREATE completed successfully',
|
||||||
'user_id': 'test_username',
|
'user_id': 'test_username',
|
||||||
@ -228,9 +205,7 @@ class ScaleNotificationTest(common.HeatTestCase):
|
|||||||
]
|
]
|
||||||
if with_error:
|
if with_error:
|
||||||
expected += [mock.call(self.ctx,
|
expected += [mock.call(self.ctx,
|
||||||
'orchestration.test_host',
|
|
||||||
'orchestration.autoscaling.error',
|
'orchestration.autoscaling.error',
|
||||||
'ERROR',
|
|
||||||
{'state_reason':
|
{'state_reason':
|
||||||
'Stack CREATE completed successfully',
|
'Stack CREATE completed successfully',
|
||||||
'user_id': 'test_username',
|
'user_id': 'test_username',
|
||||||
@ -247,9 +222,7 @@ class ScaleNotificationTest(common.HeatTestCase):
|
|||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
expected += [mock.call(self.ctx,
|
expected += [mock.call(self.ctx,
|
||||||
'orchestration.test_host',
|
|
||||||
'orchestration.autoscaling.end',
|
'orchestration.autoscaling.end',
|
||||||
'INFO',
|
|
||||||
{'state_reason':
|
{'state_reason':
|
||||||
'Stack CREATE completed successfully',
|
'Stack CREATE completed successfully',
|
||||||
'user_id': 'test_username',
|
'user_id': 'test_username',
|
||||||
@ -268,48 +241,45 @@ class ScaleNotificationTest(common.HeatTestCase):
|
|||||||
|
|
||||||
return expected
|
return expected
|
||||||
|
|
||||||
def test_scale_success(self):
|
@mock.patch('heat.engine.notification.stack.send')
|
||||||
with mock.patch('heat.engine.notification.stack.send'):
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.info')
|
||||||
with mock.patch('heat.openstack.common.notifier.api.notify') \
|
def test_scale_success(self, mock_notify, mock_send):
|
||||||
as mock_notify:
|
self.mock_stack_except_for_group()
|
||||||
|
group = self.create_autoscaling_stack_and_get_group()
|
||||||
|
expected = self.expected_notifs_calls(group,
|
||||||
|
adjust=1,
|
||||||
|
start_capacity=1,
|
||||||
|
end_capacity=2,
|
||||||
|
)
|
||||||
|
group.adjust(1)
|
||||||
|
self.assertEqual(2, len(group.get_instance_names()))
|
||||||
|
mock_notify.assert_has_calls(expected)
|
||||||
|
|
||||||
self.mock_stack_except_for_group()
|
expected = self.expected_notifs_calls(group,
|
||||||
group = self.create_autoscaling_stack_and_get_group()
|
adjust=-1,
|
||||||
expected = self.expected_notifs_calls(group,
|
start_capacity=2,
|
||||||
adjust=1,
|
end_capacity=1,
|
||||||
start_capacity=1,
|
)
|
||||||
end_capacity=2,
|
group.adjust(-1)
|
||||||
)
|
self.assertEqual(1, len(group.get_instance_names()))
|
||||||
group.adjust(1)
|
mock_notify.assert_has_calls(expected)
|
||||||
self.assertEqual(2, len(group.get_instance_names()))
|
|
||||||
mock_notify.assert_has_calls(expected)
|
|
||||||
|
|
||||||
expected = self.expected_notifs_calls(group,
|
@mock.patch('heat.engine.notification.stack.send')
|
||||||
adjust=-1,
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.info')
|
||||||
start_capacity=2,
|
@mock.patch('oslo.messaging.notify.notifier.Notifier.error')
|
||||||
end_capacity=1,
|
def test_scaleup_failure(self, mock_error, mock_info, mock_send):
|
||||||
)
|
self.mock_stack_except_for_group()
|
||||||
group.adjust(-1)
|
group = self.create_autoscaling_stack_and_get_group()
|
||||||
self.assertEqual(1, len(group.get_instance_names()))
|
|
||||||
mock_notify.assert_has_calls(expected)
|
|
||||||
|
|
||||||
def test_scaleup_failure(self):
|
err_message = 'Boooom'
|
||||||
with mock.patch('heat.engine.notification.stack.send'):
|
m_as = self.patchobject(autoscaling.AutoScalingGroup, 'resize')
|
||||||
with mock.patch('heat.openstack.common.notifier.api.notify') \
|
m_as.side_effect = exception.Error(err_message)
|
||||||
as mock_notify:
|
|
||||||
|
|
||||||
self.mock_stack_except_for_group()
|
info, error = self.expected_notifs_calls(group,
|
||||||
group = self.create_autoscaling_stack_and_get_group()
|
adjust=2,
|
||||||
|
start_capacity=1,
|
||||||
err_message = 'Boooom'
|
with_error=err_message)
|
||||||
m_as = self.patchobject(autoscaling.AutoScalingGroup, 'resize')
|
self.assertRaises(exception.Error, group.adjust, 2)
|
||||||
m_as.side_effect = exception.Error(err_message)
|
self.assertEqual(1, len(group.get_instance_names()))
|
||||||
|
mock_error.assert_has_calls([error])
|
||||||
expected = self.expected_notifs_calls(group,
|
mock_info.assert_has_calls([info])
|
||||||
adjust=2,
|
|
||||||
start_capacity=1,
|
|
||||||
with_error=err_message,
|
|
||||||
)
|
|
||||||
self.assertRaises(exception.Error, group.adjust, 2)
|
|
||||||
self.assertEqual(1, len(group.get_instance_names()))
|
|
||||||
mock_notify.assert_has_calls(expected)
|
|
||||||
|
@ -19,13 +19,11 @@ Unit Tests for heat.rpc.client
|
|||||||
|
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
from oslo.config import cfg
|
|
||||||
import stubout
|
import stubout
|
||||||
import testtools
|
import testtools
|
||||||
|
|
||||||
from heat.common import identifier
|
from heat.common import identifier
|
||||||
from heat.openstack.common import rpc
|
from heat.common import messaging
|
||||||
from heat.rpc import api as rpc_api
|
|
||||||
from heat.rpc import client as rpc_client
|
from heat.rpc import client as rpc_client
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
|
|
||||||
@ -33,11 +31,9 @@ from heat.tests import utils
|
|||||||
class EngineRpcAPITestCase(testtools.TestCase):
|
class EngineRpcAPITestCase(testtools.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
messaging.setup("fake://", optional=True)
|
||||||
|
self.addCleanup(messaging.cleanup)
|
||||||
self.context = utils.dummy_context()
|
self.context = utils.dummy_context()
|
||||||
cfg.CONF.set_default('rpc_backend',
|
|
||||||
'heat.openstack.common.rpc.impl_fake')
|
|
||||||
cfg.CONF.set_default('verbose', True)
|
|
||||||
cfg.CONF.set_default('host', 'host')
|
|
||||||
|
|
||||||
self.stubs = stubout.StubOutForTesting()
|
self.stubs = stubout.StubOutForTesting()
|
||||||
self.identity = dict(identifier.HeatIdentifier('engine_test_tenant',
|
self.identity = dict(identifier.HeatIdentifier('engine_test_tenant',
|
||||||
@ -55,24 +51,20 @@ class EngineRpcAPITestCase(testtools.TestCase):
|
|||||||
rpcapi = rpcapi_class()
|
rpcapi = rpcapi_class()
|
||||||
expected_retval = 'foo' if method == 'call' else None
|
expected_retval = 'foo' if method == 'call' else None
|
||||||
|
|
||||||
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
|
kwargs.pop('version', None)
|
||||||
expected_msg = rpcapi.make_msg(method, **kwargs)
|
expected_msg = rpcapi.make_msg(method, **kwargs)
|
||||||
|
|
||||||
expected_msg['version'] = expected_version
|
|
||||||
expected_topic = rpc_api.ENGINE_TOPIC
|
|
||||||
|
|
||||||
cast_and_call = ['delete_stack']
|
cast_and_call = ['delete_stack']
|
||||||
if rpc_method == 'call' and method in cast_and_call:
|
if rpc_method == 'call' and method in cast_and_call:
|
||||||
kwargs['cast'] = False
|
kwargs['cast'] = False
|
||||||
|
|
||||||
with mock.patch.object(rpc, rpc_method) as mock_rpc_method:
|
with mock.patch.object(rpcapi, rpc_method) as mock_rpc_method:
|
||||||
mock_rpc_method.return_value = expected_retval
|
mock_rpc_method.return_value = expected_retval
|
||||||
|
|
||||||
retval = getattr(rpcapi, method)(ctxt, **kwargs)
|
retval = getattr(rpcapi, method)(ctxt, **kwargs)
|
||||||
|
|
||||||
self.assertEqual(expected_retval, retval)
|
self.assertEqual(expected_retval, retval)
|
||||||
expected_args = [ctxt, expected_topic, expected_msg,
|
expected_args = [ctxt, expected_msg, mock.ANY]
|
||||||
mock.ANY]
|
|
||||||
actual_args, _ = mock_rpc_method.call_args
|
actual_args, _ = mock_rpc_method.call_args
|
||||||
for expected_arg, actual_arg in zip(expected_args,
|
for expected_arg, actual_arg in zip(expected_args,
|
||||||
actual_args):
|
actual_args):
|
||||||
|
@ -13,11 +13,11 @@
|
|||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from heat.common import exception
|
from heat.common import exception
|
||||||
from heat.db import api as db_api
|
from heat.db import api as db_api
|
||||||
from heat.engine import stack_lock
|
from heat.engine import stack_lock
|
||||||
from heat.openstack.common.rpc import common as rpc_common
|
|
||||||
from heat.openstack.common.rpc import proxy
|
|
||||||
from heat.tests.common import HeatTestCase
|
from heat.tests.common import HeatTestCase
|
||||||
from heat.tests import utils
|
from heat.tests import utils
|
||||||
|
|
||||||
@ -34,8 +34,8 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_successful_acquire_new_lock(self):
|
def test_successful_acquire_new_lock(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn(None)
|
self.stack.id, self.engine_id).AndReturn(None)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -45,8 +45,8 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_failed_acquire_existing_lock_current_engine(self):
|
def test_failed_acquire_existing_lock_current_engine(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn(self.engine_id)
|
self.stack.id, self.engine_id).AndReturn(self.engine_id)
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -56,14 +56,12 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_successful_acquire_existing_lock_engine_dead(self):
|
def test_successful_acquire_existing_lock_engine_dead(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
self.m.StubOutWithMock(messaging.rpc.client._CallContext, "call")
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, "call")
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
||||||
@ -77,14 +75,12 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_failed_acquire_existing_lock_engine_alive(self):
|
def test_failed_acquire_existing_lock_engine_alive(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
self.m.StubOutWithMock(messaging.rpc.client._CallContext, "call")
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, "call")
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndReturn(True)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndReturn(True)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -94,19 +90,17 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_failed_acquire_existing_lock_engine_dead(self):
|
def test_failed_acquire_existing_lock_engine_dead(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
self.m.StubOutWithMock(messaging.rpc.client._CallContext, "call")
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, "call")
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(
|
||||||
self.engine_id).\
|
self.stack.id, "fake-engine-id",
|
||||||
AndReturn("fake-engine-id2")
|
self.engine_id).AndReturn("fake-engine-id2")
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -116,31 +110,25 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_successful_acquire_with_retry(self):
|
def test_successful_acquire_with_retry(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
self.m.StubOutWithMock(messaging.rpc.client._CallContext, "call")
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, "call")
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(
|
||||||
self.engine_id).\
|
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
|
||||||
AndReturn(True)
|
|
||||||
|
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(
|
||||||
self.engine_id).\
|
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(None)
|
||||||
AndReturn(None)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
@ -150,31 +138,25 @@ class StackLockTest(HeatTestCase):
|
|||||||
|
|
||||||
def test_failed_acquire_one_retry_only(self):
|
def test_failed_acquire_one_retry_only(self):
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
self.m.StubOutWithMock(db_api, "stack_lock_create")
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
self.m.StubOutWithMock(messaging.rpc.client._CallContext, "call")
|
||||||
self.m.StubOutWithMock(proxy.RpcProxy, "call")
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
self.m.StubOutWithMock(db_api, "stack_lock_steal")
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(
|
||||||
self.engine_id).\
|
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
|
||||||
AndReturn(True)
|
|
||||||
|
|
||||||
db_api.stack_lock_create(self.stack.id, self.engine_id).\
|
db_api.stack_lock_create(
|
||||||
AndReturn("fake-engine-id")
|
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
|
||||||
|
|
||||||
topic = self.stack.id
|
messaging.rpc.client._CallContext.call(
|
||||||
rpc = proxy.RpcProxy(topic, "1.0")
|
self.context, "listening").AndRaise(messaging.MessagingTimeout)
|
||||||
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
|
|
||||||
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
|
|
||||||
|
|
||||||
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
|
db_api.stack_lock_steal(
|
||||||
self.engine_id).\
|
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
|
||||||
AndReturn(True)
|
|
||||||
|
|
||||||
self.m.ReplayAll()
|
self.m.ReplayAll()
|
||||||
|
|
||||||
|
@ -83,11 +83,11 @@ basic_configuration() {
|
|||||||
if detect_rabbit
|
if detect_rabbit
|
||||||
then
|
then
|
||||||
echo "rabbitmq detected, configuring $conf_path for rabbit" >&2
|
echo "rabbitmq detected, configuring $conf_path for rabbit" >&2
|
||||||
iniset $conf_path DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
|
iniset $conf_path DEFAULT rpc_backend kombu
|
||||||
iniset $conf_path DEFAULT rabbit_password guest
|
iniset $conf_path DEFAULT rabbit_password guest
|
||||||
else
|
else
|
||||||
echo "qpid detected, configuring $conf_path for qpid" >&2
|
echo "qpid detected, configuring $conf_path for qpid" >&2
|
||||||
iniset $conf_path DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
|
iniset $conf_path DEFAULT rpc_backend qpid
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -13,13 +13,10 @@ module=importutils
|
|||||||
module=jsonutils
|
module=jsonutils
|
||||||
module=local
|
module=local
|
||||||
module=log
|
module=log
|
||||||
module=log_handler
|
|
||||||
module=loopingcall
|
module=loopingcall
|
||||||
module=network_utils
|
module=network_utils
|
||||||
module=notifier
|
|
||||||
module=policy
|
module=policy
|
||||||
module=processutils
|
module=processutils
|
||||||
module=rpc
|
|
||||||
module=service
|
module=service
|
||||||
module=strutils
|
module=strutils
|
||||||
module=threadgroup
|
module=threadgroup
|
||||||
|
@ -8,6 +8,7 @@ kombu>=2.4.8
|
|||||||
lxml>=2.3
|
lxml>=2.3
|
||||||
netaddr>=0.7.6
|
netaddr>=0.7.6
|
||||||
oslo.config>=1.2.0
|
oslo.config>=1.2.0
|
||||||
|
oslo.messaging>=1.3.0
|
||||||
PasteDeploy>=1.5.0
|
PasteDeploy>=1.5.0
|
||||||
pbr>=0.6,!=0.7,<1.0
|
pbr>=0.6,!=0.7,<1.0
|
||||||
posix_ipc
|
posix_ipc
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
export HEAT_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
export HEAT_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
||||||
export HEAT_CONFIG_GENERATOR_EXTRA_LIBRARIES="heat.common.config heat.common.wsgi"
|
export HEAT_CONFIG_GENERATOR_EXTRA_LIBRARIES="heat.common.config heat.common.wsgi oslo.messaging"
|
||||||
export HEAT_CONFIG_GENERATOR_EXCLUDED_FILES="heat/common/config.py heat/common/wsgi.py heat/openstack/common/sslutils.py"
|
export HEAT_CONFIG_GENERATOR_EXCLUDED_FILES="heat/common/config.py heat/common/wsgi.py heat/openstack/common/sslutils.py"
|
||||||
|
Loading…
Reference in New Issue
Block a user