Switch to oslo.messaging
Change-Id: Ie223ca4c6384902ec006380c68911f57d0ed501e
This commit is contained in:
parent
2d864e111a
commit
0d3a4427d9
|
@ -16,11 +16,10 @@
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
|
|
||||||
cfg.CONF.import_opt('default_log_levels', 'designate.openstack.common.log')
|
cfg.CONF.import_opt('default_log_levels', 'designate.openstack.common.log')
|
||||||
cfg.CONF.import_opt('control_exchange', 'designate.openstack.common.rpc')
|
|
||||||
cfg.CONF.import_opt('allowed_rpc_exception_modules',
|
|
||||||
'designate.openstack.common.rpc')
|
|
||||||
|
|
||||||
cfg.CONF.register_opts([
|
cfg.CONF.register_opts([
|
||||||
cfg.StrOpt('host', default=socket.gethostname(),
|
cfg.StrOpt('host', default=socket.gethostname(),
|
||||||
|
@ -32,7 +31,6 @@ cfg.CONF.register_opts([
|
||||||
cfg.StrOpt('state-path', default='/var/lib/designate',
|
cfg.StrOpt('state-path', default='/var/lib/designate',
|
||||||
help='Top-level directory for maintaining designate\'s state'),
|
help='Top-level directory for maintaining designate\'s state'),
|
||||||
|
|
||||||
|
|
||||||
cfg.StrOpt('central-topic', default='central', help='Central Topic'),
|
cfg.StrOpt('central-topic', default='central', help='Central Topic'),
|
||||||
cfg.StrOpt('agent-topic', default='agent', help='Agent Topic'),
|
cfg.StrOpt('agent-topic', default='agent', help='Agent Topic'),
|
||||||
|
|
||||||
|
@ -59,7 +57,4 @@ cfg.CONF.set_default('default_log_levels',
|
||||||
'keystoneclient.middleware.auth_token=INFO'])
|
'keystoneclient.middleware.auth_token=INFO'])
|
||||||
|
|
||||||
# Set some Oslo RPC defaults
|
# Set some Oslo RPC defaults
|
||||||
cfg.CONF.set_default('control_exchange', 'designate')
|
messaging.set_transport_defaults('designate')
|
||||||
cfg.CONF.set_default('allowed_rpc_exception_modules',
|
|
||||||
['designate.exceptions',
|
|
||||||
'designate.openstack.common.exception'])
|
|
||||||
|
|
|
@ -14,13 +14,16 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import proxy as rpc_proxy
|
from designate import rpc
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AgentAPI(rpc_proxy.RpcProxy):
|
class AgentAPI(object):
|
||||||
"""
|
"""
|
||||||
Client side of the agent Rpc API.
|
Client side of the agent Rpc API.
|
||||||
|
|
||||||
|
@ -28,108 +31,80 @@ class AgentAPI(rpc_proxy.RpcProxy):
|
||||||
|
|
||||||
1.0 - Initial version
|
1.0 - Initial version
|
||||||
"""
|
"""
|
||||||
|
RPC_API_VERSION = '1.0'
|
||||||
|
|
||||||
def __init__(self, topic=None):
|
def __init__(self, topic=None):
|
||||||
topic = topic if topic else cfg.CONF.agent_topic
|
topic = topic if topic else cfg.CONF.agent_topic
|
||||||
super(AgentAPI, self).__init__(topic=topic, default_version='1.0')
|
|
||||||
|
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
|
||||||
|
self.client = rpc.get_client(target, version_cap='1.0')
|
||||||
|
|
||||||
# Server Methods
|
# Server Methods
|
||||||
def create_server(self, context, server):
|
def create_server(self, context, server):
|
||||||
msg = self.make_msg('create_server', server=server)
|
return self.client.call(context, 'create_server', server=server)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_server(self, context, server):
|
def update_server(self, context, server):
|
||||||
msg = self.make_msg('update_server', server=server)
|
return self.client.call(context, 'update_server', server=server)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_server(self, context, server):
|
def delete_server(self, context, server):
|
||||||
msg = self.make_msg('delete_server', server=server)
|
return self.client.call(context, 'delete_server', server=server)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# TSIG Key Methods
|
# TSIG Key Methods
|
||||||
def create_tsigkey(self, context, tsigkey):
|
def create_tsigkey(self, context, tsigkey):
|
||||||
msg = self.make_msg('create_tsigkey', tsigkey=tsigkey)
|
return self.client.call(context, 'create_tsigkey', tsigkey=tsigkey)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_tsigkey(self, context, tsigkey):
|
def update_tsigkey(self, context, tsigkey):
|
||||||
msg = self.make_msg('update_tsigkey', tsigkey=tsigkey)
|
return self.client.call(context, 'update_tsigkey', tsigkey=tsigkey)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_tsigkey(self, context, tsigkey):
|
def delete_tsigkey(self, context, tsigkey):
|
||||||
msg = self.make_msg('delete_tsigkey', tsigkey=tsigkey)
|
return self.client.call(context, 'delete_tsigkey', tsigkey=tsigkey)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Domain Methods
|
# Domain Methods
|
||||||
def create_domain(self, context, domain):
|
def create_domain(self, context, domain):
|
||||||
msg = self.make_msg('create_domain', domain=domain)
|
return self.client.call(context, 'create_domain', domain=domain)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_domain(self, context, domain):
|
def update_domain(self, context, domain):
|
||||||
msg = self.make_msg('update_domain', domain=domain)
|
return self.client.call(context, 'update_domain', domain=domain)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_domain(self, context, domain):
|
def delete_domain(self, context, domain):
|
||||||
msg = self.make_msg('delete_domain', domain=domain)
|
return self.client.call(context, 'delete_domain', domain=domain)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Record Methods
|
# Record Methods
|
||||||
def update_recordset(self, context, domain, recordset):
|
def update_recordset(self, context, domain, recordset):
|
||||||
msg = self.make_msg('update_recordset',
|
return self.client.call(context, 'update_recordset',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
recordset=recordset)
|
recordset=recordset)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_recordset(self, context, domain, recordset):
|
def delete_recordset(self, context, domain, recordset):
|
||||||
msg = self.make_msg('delete_recordset',
|
return self.client.call(context, 'delete_recordset',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
recordset=recordset)
|
recordset=recordset)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def create_record(self, context, domain, recordset, record):
|
def create_record(self, context, domain, recordset, record):
|
||||||
msg = self.make_msg('create_record',
|
return self.client.call(context, 'create_record',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
recordset=recordset,
|
recordset=recordset,
|
||||||
record=record)
|
record=record)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_record(self, context, domain, recordset, record):
|
def update_record(self, context, domain, recordset, record):
|
||||||
msg = self.make_msg('update_record',
|
return self.client.call(context, 'update_record',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
recordset=recordset,
|
recordset=recordset,
|
||||||
record=record)
|
record=record)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_record(self, context, domain, recordset, record):
|
def delete_record(self, context, domain, recordset, record):
|
||||||
msg = self.make_msg('delete_record',
|
return self.client.call(context, 'delete_record',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
recordset=recordset,
|
recordset=recordset,
|
||||||
record=record)
|
record=record)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Sync Methods
|
# Sync Methods
|
||||||
def sync_domain(self, context, domain, records):
|
def sync_domain(self, context, domain, records):
|
||||||
msg = self.make_msg('sync_domains',
|
return self.client.call(context, 'sync_domains',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
records=records)
|
records=records)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def sync_record(self, context, domain, record):
|
def sync_record(self, context, domain, record):
|
||||||
msg = self.make_msg('sync_domains',
|
return self.client.call(context, 'sync_domains',
|
||||||
domain=domain,
|
domain=domain,
|
||||||
record=record)
|
record=record)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
|
@ -15,31 +15,33 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import service as rpc_service
|
|
||||||
from designate import backend
|
from designate import backend
|
||||||
|
from designate import rpc
|
||||||
|
from designate import service
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.central import rpcapi as central_rpcapi
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class Service(rpc_service.Service):
|
class Service(service.Service):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, host, binary, topic, service_name=None, endpoints=None,
|
||||||
manager = backend.get_backend(cfg.CONF['service:agent'].backend_driver,
|
*args, **kwargs):
|
||||||
central_service=central_api)
|
# Central api needs a transport if not it fails. This is normally done
|
||||||
|
# by the service init method.
|
||||||
|
rpc.init(cfg.CONF)
|
||||||
|
central_api = central_rpcapi.CentralAPI()
|
||||||
|
|
||||||
kwargs.update(
|
manager = backend.get_backend(
|
||||||
host=cfg.CONF.host,
|
cfg.CONF['service:agent'].backend_driver,
|
||||||
topic=cfg.CONF.agent_topic,
|
central_service=central_api)
|
||||||
manager=manager
|
|
||||||
)
|
|
||||||
|
|
||||||
super(Service, self).__init__(*args, **kwargs)
|
super(Service, self).__init__(host, binary, topic,
|
||||||
|
endpoints=[manager],
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.manager.start()
|
self.endpoints[0].start()
|
||||||
super(Service, self).start()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
super(Service, self).stop()
|
super(Service, self).stop()
|
||||||
self.manager.stop()
|
self.endpoints[0].stop()
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from designate.central import rpcapi
|
||||||
|
|
||||||
cfg.CONF.register_group(cfg.OptGroup(
|
cfg.CONF.register_group(cfg.OptGroup(
|
||||||
name='service:api', title="Configuration for API Service"
|
name='service:api', title="Configuration for API Service"
|
||||||
|
@ -36,3 +36,20 @@ cfg.CONF.register_opts([
|
||||||
cfg.BoolOpt('enable-api-v1', default=True),
|
cfg.BoolOpt('enable-api-v1', default=True),
|
||||||
cfg.BoolOpt('enable-api-v2', default=False),
|
cfg.BoolOpt('enable-api-v2', default=False),
|
||||||
], group='service:api')
|
], group='service:api')
|
||||||
|
|
||||||
|
|
||||||
|
CENTRAL_API = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_central_api():
|
||||||
|
"""
|
||||||
|
The rpc.get_client() which is called upon the API object initialization
|
||||||
|
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup by
|
||||||
|
rpc.init() before.
|
||||||
|
|
||||||
|
This fixes that by creating the rpcapi when demanded.
|
||||||
|
"""
|
||||||
|
global CENTRAL_API
|
||||||
|
if not CENTRAL_API:
|
||||||
|
CENTRAL_API = rpcapi.CentralAPI()
|
||||||
|
return CENTRAL_API
|
||||||
|
|
|
@ -15,7 +15,10 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
import flask
|
import flask
|
||||||
import webob.dec
|
import webob.dec
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate import notifications
|
from designate import notifications
|
||||||
from designate import wsgi
|
from designate import wsgi
|
||||||
|
@ -24,7 +27,6 @@ from designate.openstack.common import jsonutils as json
|
||||||
from designate.openstack.common import local
|
from designate.openstack.common import local
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import strutils
|
from designate.openstack.common import strutils
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -210,7 +212,7 @@ class FaultWrapperMiddleware(wsgi.Middleware):
|
||||||
response['errors'] = e.errors
|
response['errors'] = e.errors
|
||||||
|
|
||||||
return self._handle_exception(request, e, status, response)
|
return self._handle_exception(request, e, status, response)
|
||||||
except rpc_common.Timeout as e:
|
except messaging.MessagingTimeout as e:
|
||||||
# Special case for RPC timeout's
|
# Special case for RPC timeout's
|
||||||
response = {
|
response = {
|
||||||
'code': 504,
|
'code': 504,
|
||||||
|
@ -242,7 +244,8 @@ class FaultWrapperMiddleware(wsgi.Middleware):
|
||||||
if 'context' in request.environ:
|
if 'context' in request.environ:
|
||||||
response['request_id'] = request.environ['context'].request_id
|
response['request_id'] = request.environ['context'].request_id
|
||||||
|
|
||||||
notifications.send_api_fault(url, response['code'], e)
|
notifications.send_api_fault(request.environ['context'], url,
|
||||||
|
response['code'], e)
|
||||||
else:
|
else:
|
||||||
#TODO(ekarlso): Remove after verifying that there's actually a
|
#TODO(ekarlso): Remove after verifying that there's actually a
|
||||||
# context always set
|
# context always set
|
||||||
|
|
|
@ -16,10 +16,9 @@
|
||||||
import flask
|
import flask
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.api import get_central_api
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
blueprint = flask.Blueprint('domains', __name__)
|
blueprint = flask.Blueprint('domains', __name__)
|
||||||
domain_schema = schema.Schema('v1', 'domain')
|
domain_schema = schema.Schema('v1', 'domain')
|
||||||
domains_schema = schema.Schema('v1', 'domains')
|
domains_schema = schema.Schema('v1', 'domains')
|
||||||
|
@ -42,7 +41,7 @@ def create_domain():
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
domain_schema.validate(values)
|
domain_schema.validate(values)
|
||||||
domain = central_api.create_domain(context, values)
|
domain = get_central_api().create_domain(context, values)
|
||||||
|
|
||||||
response = flask.jsonify(domain_schema.filter(domain))
|
response = flask.jsonify(domain_schema.filter(domain))
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
@ -55,7 +54,7 @@ def create_domain():
|
||||||
def get_domains():
|
def get_domains():
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
domains = central_api.find_domains(context)
|
domains = get_central_api().find_domains(context)
|
||||||
|
|
||||||
return flask.jsonify(domains_schema.filter({'domains': domains}))
|
return flask.jsonify(domains_schema.filter({'domains': domains}))
|
||||||
|
|
||||||
|
@ -64,7 +63,7 @@ def get_domains():
|
||||||
def get_domain(domain_id):
|
def get_domain(domain_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
domain = central_api.get_domain(context, domain_id)
|
domain = get_central_api().get_domain(context, domain_id)
|
||||||
|
|
||||||
return flask.jsonify(domain_schema.filter(domain))
|
return flask.jsonify(domain_schema.filter(domain))
|
||||||
|
|
||||||
|
@ -74,12 +73,12 @@ def update_domain(domain_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
domain = central_api.get_domain(context, domain_id)
|
domain = get_central_api().get_domain(context, domain_id)
|
||||||
domain = domain_schema.filter(domain)
|
domain = domain_schema.filter(domain)
|
||||||
domain.update(values)
|
domain.update(values)
|
||||||
|
|
||||||
domain_schema.validate(domain)
|
domain_schema.validate(domain)
|
||||||
domain = central_api.update_domain(context, domain_id, values)
|
domain = get_central_api().update_domain(context, domain_id, values)
|
||||||
|
|
||||||
return flask.jsonify(domain_schema.filter(domain))
|
return flask.jsonify(domain_schema.filter(domain))
|
||||||
|
|
||||||
|
@ -88,7 +87,7 @@ def update_domain(domain_id):
|
||||||
def delete_domain(domain_id):
|
def delete_domain(domain_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
central_api.delete_domain(context, domain_id)
|
get_central_api().delete_domain(context, domain_id)
|
||||||
|
|
||||||
return flask.Response(status=200)
|
return flask.Response(status=200)
|
||||||
|
|
||||||
|
@ -97,6 +96,6 @@ def delete_domain(domain_id):
|
||||||
def get_domain_servers(domain_id):
|
def get_domain_servers(domain_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
servers = central_api.get_domain_servers(context, domain_id)
|
servers = get_central_api().get_domain_servers(context, domain_id)
|
||||||
|
|
||||||
return flask.jsonify(servers_schema.filter({'servers': servers}))
|
return flask.jsonify(servers_schema.filter({'servers': servers}))
|
||||||
|
|
|
@ -14,8 +14,10 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
import flask
|
import flask
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import rpc
|
from designate import rpc
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
blueprint = flask.Blueprint('diagnostics', __name__)
|
blueprint = flask.Blueprint('diagnostics', __name__)
|
||||||
|
@ -24,13 +26,10 @@ blueprint = flask.Blueprint('diagnostics', __name__)
|
||||||
@blueprint.route('/diagnostics/ping/<topic>/<host>', methods=['GET'])
|
@blueprint.route('/diagnostics/ping/<topic>/<host>', methods=['GET'])
|
||||||
def ping_host(topic, host):
|
def ping_host(topic, host):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
queue = rpc.queue_get_for(context, topic, host)
|
|
||||||
|
|
||||||
msg = {
|
client = rpc.get_client(messaging.Target(topic=topic))
|
||||||
'method': 'ping',
|
cctxt = client.prepare(server=host, timeout=10)
|
||||||
'args': {},
|
|
||||||
}
|
|
||||||
|
|
||||||
pong = rpc.call(context, queue, msg, timeout=10)
|
pong = cctxt.call(context, 'ping')
|
||||||
|
|
||||||
return flask.jsonify(pong)
|
return flask.jsonify(pong)
|
||||||
|
|
|
@ -16,10 +16,9 @@
|
||||||
import flask
|
import flask
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.api import get_central_api
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
blueprint = flask.Blueprint('limits', __name__)
|
blueprint = flask.Blueprint('limits', __name__)
|
||||||
limits_schema = schema.Schema('v1', 'limits')
|
limits_schema = schema.Schema('v1', 'limits')
|
||||||
|
|
||||||
|
@ -33,7 +32,7 @@ def get_limits_schema():
|
||||||
def get_limits():
|
def get_limits():
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
absolute_limits = central_api.get_absolute_limits(context)
|
absolute_limits = get_central_api().get_absolute_limits(context)
|
||||||
|
|
||||||
return flask.jsonify(limits_schema.filter({
|
return flask.jsonify(limits_schema.filter({
|
||||||
"limits": {
|
"limits": {
|
||||||
|
|
|
@ -17,17 +17,16 @@ import flask
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.api import get_central_api
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
blueprint = flask.Blueprint('records', __name__)
|
blueprint = flask.Blueprint('records', __name__)
|
||||||
record_schema = schema.Schema('v1', 'record')
|
record_schema = schema.Schema('v1', 'record')
|
||||||
records_schema = schema.Schema('v1', 'records')
|
records_schema = schema.Schema('v1', 'records')
|
||||||
|
|
||||||
|
|
||||||
def _find_recordset(context, domain_id, name, type):
|
def _find_recordset(context, domain_id, name, type):
|
||||||
return central_api.find_recordset(context, {
|
return get_central_api().find_recordset(context, {
|
||||||
'domain_id': domain_id,
|
'domain_id': domain_id,
|
||||||
'name': name,
|
'name': name,
|
||||||
'type': type,
|
'type': type,
|
||||||
|
@ -38,7 +37,7 @@ def _find_or_create_recordset(context, domain_id, name, type, ttl):
|
||||||
try:
|
try:
|
||||||
recordset = _find_recordset(context, domain_id, name, type)
|
recordset = _find_recordset(context, domain_id, name, type)
|
||||||
except exceptions.RecordSetNotFound:
|
except exceptions.RecordSetNotFound:
|
||||||
recordset = central_api.create_recordset(context, domain_id, {
|
recordset = get_central_api().create_recordset(context, domain_id, {
|
||||||
'name': name,
|
'name': name,
|
||||||
'type': type,
|
'type': type,
|
||||||
'ttl': ttl,
|
'ttl': ttl,
|
||||||
|
@ -70,7 +69,7 @@ def _format_record_v1(record, recordset):
|
||||||
def _fetch_domain_recordsets(context, domain_id):
|
def _fetch_domain_recordsets(context, domain_id):
|
||||||
criterion = {'domain_id': domain_id}
|
criterion = {'domain_id': domain_id}
|
||||||
|
|
||||||
recordsets = central_api.find_recordsets(context, criterion)
|
recordsets = get_central_api().find_recordsets(context, criterion)
|
||||||
|
|
||||||
return dict((r['id'], r) for r in recordsets)
|
return dict((r['id'], r) for r in recordsets)
|
||||||
|
|
||||||
|
@ -98,8 +97,9 @@ def create_record(domain_id):
|
||||||
values['type'],
|
values['type'],
|
||||||
values.get('ttl', None))
|
values.get('ttl', None))
|
||||||
|
|
||||||
record = central_api.create_record(context, domain_id, recordset['id'],
|
record = get_central_api().create_record(context, domain_id,
|
||||||
_extract_record_values(values))
|
recordset['id'],
|
||||||
|
_extract_record_values(values))
|
||||||
|
|
||||||
record = _format_record_v1(record, recordset)
|
record = _format_record_v1(record, recordset)
|
||||||
|
|
||||||
|
@ -117,9 +117,9 @@ def get_records(domain_id):
|
||||||
|
|
||||||
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
||||||
# return an empty records array instead of a domain not found
|
# return an empty records array instead of a domain not found
|
||||||
central_api.get_domain(context, domain_id)
|
get_central_api().get_domain(context, domain_id)
|
||||||
|
|
||||||
records = central_api.find_records(context, {'domain_id': domain_id})
|
records = get_central_api().find_records(context, {'domain_id': domain_id})
|
||||||
|
|
||||||
recordsets = _fetch_domain_recordsets(context, domain_id)
|
recordsets = _fetch_domain_recordsets(context, domain_id)
|
||||||
|
|
||||||
|
@ -139,12 +139,12 @@ def get_record(domain_id, record_id):
|
||||||
|
|
||||||
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
||||||
# return an record not found instead of a domain not found
|
# return an record not found instead of a domain not found
|
||||||
central_api.get_domain(context, domain_id)
|
get_central_api().get_domain(context, domain_id)
|
||||||
|
|
||||||
criterion = {'domain_id': domain_id, 'id': record_id}
|
criterion = {'domain_id': domain_id, 'id': record_id}
|
||||||
record = central_api.find_record(context, criterion)
|
record = get_central_api().find_record(context, criterion)
|
||||||
|
|
||||||
recordset = central_api.get_recordset(
|
recordset = get_central_api().get_recordset(
|
||||||
context, domain_id, record['recordset_id'])
|
context, domain_id, record['recordset_id'])
|
||||||
|
|
||||||
record = _format_record_v1(record, recordset)
|
record = _format_record_v1(record, recordset)
|
||||||
|
@ -160,14 +160,14 @@ def update_record(domain_id, record_id):
|
||||||
|
|
||||||
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
||||||
# return an record not found instead of a domain not found
|
# return an record not found instead of a domain not found
|
||||||
central_api.get_domain(context, domain_id)
|
get_central_api().get_domain(context, domain_id)
|
||||||
|
|
||||||
# Find the record
|
# Find the record
|
||||||
criterion = {'domain_id': domain_id, 'id': record_id}
|
criterion = {'domain_id': domain_id, 'id': record_id}
|
||||||
record = central_api.find_record(context, criterion)
|
record = get_central_api().find_record(context, criterion)
|
||||||
|
|
||||||
# Find the associated recordset
|
# Find the associated recordset
|
||||||
recordset = central_api.get_recordset(
|
recordset = get_central_api().get_recordset(
|
||||||
context, domain_id, record['recordset_id'])
|
context, domain_id, record['recordset_id'])
|
||||||
|
|
||||||
# Filter out any extra fields from the fetched record
|
# Filter out any extra fields from the fetched record
|
||||||
|
@ -196,13 +196,13 @@ def update_record(domain_id, record_id):
|
||||||
record_schema.validate(record)
|
record_schema.validate(record)
|
||||||
|
|
||||||
# Update the record
|
# Update the record
|
||||||
record = central_api.update_record(
|
record = get_central_api().update_record(
|
||||||
context, domain_id, recordset['id'], record_id,
|
context, domain_id, recordset['id'], record_id,
|
||||||
_extract_record_values(values))
|
_extract_record_values(values))
|
||||||
|
|
||||||
# Update the recordset (if necessary)
|
# Update the recordset (if necessary)
|
||||||
if update_recordset:
|
if update_recordset:
|
||||||
recordset = central_api.update_recordset(
|
recordset = get_central_api().update_recordset(
|
||||||
context, domain_id, recordset['id'],
|
context, domain_id, recordset['id'],
|
||||||
_extract_recordset_values(values))
|
_extract_recordset_values(values))
|
||||||
|
|
||||||
|
@ -219,13 +219,13 @@ def delete_record(domain_id, record_id):
|
||||||
|
|
||||||
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
# NOTE: We need to ensure the domain actually exists, otherwise we may
|
||||||
# return a record not found instead of a domain not found
|
# return a record not found instead of a domain not found
|
||||||
central_api.get_domain(context, domain_id)
|
get_central_api().get_domain(context, domain_id)
|
||||||
|
|
||||||
# Find the record
|
# Find the record
|
||||||
criterion = {'domain_id': domain_id, 'id': record_id}
|
criterion = {'domain_id': domain_id, 'id': record_id}
|
||||||
record = central_api.find_record(context, criterion)
|
record = get_central_api().find_record(context, criterion)
|
||||||
|
|
||||||
central_api.delete_record(
|
get_central_api().delete_record(
|
||||||
context, domain_id, record['recordset_id'], record_id)
|
context, domain_id, record['recordset_id'], record_id)
|
||||||
|
|
||||||
return flask.Response(status=200)
|
return flask.Response(status=200)
|
||||||
|
|
|
@ -16,10 +16,9 @@
|
||||||
import flask
|
import flask
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.api import get_central_api
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
blueprint = flask.Blueprint('servers', __name__)
|
blueprint = flask.Blueprint('servers', __name__)
|
||||||
server_schema = schema.Schema('v1', 'server')
|
server_schema = schema.Schema('v1', 'server')
|
||||||
servers_schema = schema.Schema('v1', 'servers')
|
servers_schema = schema.Schema('v1', 'servers')
|
||||||
|
@ -41,7 +40,8 @@ def create_server():
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
server_schema.validate(values)
|
server_schema.validate(values)
|
||||||
server = central_api.create_server(context, values=flask.request.json)
|
server = get_central_api().create_server(context,
|
||||||
|
values=flask.request.json)
|
||||||
|
|
||||||
response = flask.jsonify(server_schema.filter(server))
|
response = flask.jsonify(server_schema.filter(server))
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
@ -54,7 +54,7 @@ def create_server():
|
||||||
def get_servers():
|
def get_servers():
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
servers = central_api.find_servers(context)
|
servers = get_central_api().find_servers(context)
|
||||||
|
|
||||||
return flask.jsonify(servers_schema.filter({'servers': servers}))
|
return flask.jsonify(servers_schema.filter({'servers': servers}))
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ def get_servers():
|
||||||
def get_server(server_id):
|
def get_server(server_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
server = central_api.get_server(context, server_id)
|
server = get_central_api().get_server(context, server_id)
|
||||||
|
|
||||||
return flask.jsonify(server_schema.filter(server))
|
return flask.jsonify(server_schema.filter(server))
|
||||||
|
|
||||||
|
@ -73,12 +73,12 @@ def update_server(server_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
server = central_api.get_server(context, server_id)
|
server = get_central_api().get_server(context, server_id)
|
||||||
server = server_schema.filter(server)
|
server = server_schema.filter(server)
|
||||||
server.update(values)
|
server.update(values)
|
||||||
|
|
||||||
server_schema.validate(server)
|
server_schema.validate(server)
|
||||||
server = central_api.update_server(context, server_id, values=values)
|
server = get_central_api().update_server(context, server_id, values=values)
|
||||||
|
|
||||||
return flask.jsonify(server_schema.filter(server))
|
return flask.jsonify(server_schema.filter(server))
|
||||||
|
|
||||||
|
@ -87,6 +87,6 @@ def update_server(server_id):
|
||||||
def delete_server(server_id):
|
def delete_server(server_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
central_api.delete_server(context, server_id)
|
get_central_api().delete_server(context, server_id)
|
||||||
|
|
||||||
return flask.Response(status=200)
|
return flask.Response(status=200)
|
||||||
|
|
|
@ -16,10 +16,9 @@
|
||||||
import flask
|
import flask
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.api import get_central_api
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
blueprint = flask.Blueprint('tsigkeys', __name__)
|
blueprint = flask.Blueprint('tsigkeys', __name__)
|
||||||
tsigkey_schema = schema.Schema('v1', 'tsigkey')
|
tsigkey_schema = schema.Schema('v1', 'tsigkey')
|
||||||
tsigkeys_schema = schema.Schema('v1', 'tsigkeys')
|
tsigkeys_schema = schema.Schema('v1', 'tsigkeys')
|
||||||
|
@ -41,7 +40,8 @@ def create_tsigkey():
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
tsigkey_schema.validate(values)
|
tsigkey_schema.validate(values)
|
||||||
tsigkey = central_api.create_tsigkey(context, values=flask.request.json)
|
tsigkey = get_central_api().create_tsigkey(
|
||||||
|
context, values=flask.request.json)
|
||||||
|
|
||||||
response = flask.jsonify(tsigkey_schema.filter(tsigkey))
|
response = flask.jsonify(tsigkey_schema.filter(tsigkey))
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
@ -54,7 +54,7 @@ def create_tsigkey():
|
||||||
def get_tsigkeys():
|
def get_tsigkeys():
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
tsigkeys = central_api.find_tsigkeys(context)
|
tsigkeys = get_central_api().find_tsigkeys(context)
|
||||||
|
|
||||||
return flask.jsonify(tsigkeys_schema.filter({'tsigkeys': tsigkeys}))
|
return flask.jsonify(tsigkeys_schema.filter({'tsigkeys': tsigkeys}))
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ def get_tsigkeys():
|
||||||
def get_tsigkey(tsigkey_id):
|
def get_tsigkey(tsigkey_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
tsigkey = central_api.get_tsigkey(context, tsigkey_id)
|
tsigkey = get_central_api().get_tsigkey(context, tsigkey_id)
|
||||||
|
|
||||||
return flask.jsonify(tsigkey_schema.filter(tsigkey))
|
return flask.jsonify(tsigkey_schema.filter(tsigkey))
|
||||||
|
|
||||||
|
@ -73,12 +73,13 @@ def update_tsigkey(tsigkey_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
values = flask.request.json
|
values = flask.request.json
|
||||||
|
|
||||||
tsigkey = central_api.get_tsigkey(context, tsigkey_id)
|
tsigkey = get_central_api().get_tsigkey(context, tsigkey_id)
|
||||||
tsigkey = tsigkey_schema.filter(tsigkey)
|
tsigkey = tsigkey_schema.filter(tsigkey)
|
||||||
tsigkey.update(values)
|
tsigkey.update(values)
|
||||||
|
|
||||||
tsigkey_schema.validate(tsigkey)
|
tsigkey_schema.validate(tsigkey)
|
||||||
tsigkey = central_api.update_tsigkey(context, tsigkey_id, values=values)
|
tsigkey = get_central_api().update_tsigkey(context, tsigkey_id,
|
||||||
|
values=values)
|
||||||
|
|
||||||
return flask.jsonify(tsigkey_schema.filter(tsigkey))
|
return flask.jsonify(tsigkey_schema.filter(tsigkey))
|
||||||
|
|
||||||
|
@ -87,6 +88,6 @@ def update_tsigkey(tsigkey_id):
|
||||||
def delete_tsigkey(tsigkey_id):
|
def delete_tsigkey(tsigkey_id):
|
||||||
context = flask.request.environ.get('context')
|
context = flask.request.environ.get('context')
|
||||||
|
|
||||||
central_api.delete_tsigkey(context, tsigkey_id)
|
get_central_api().delete_tsigkey(context, tsigkey_id)
|
||||||
|
|
||||||
return flask.Response(status=200)
|
return flask.Response(status=200)
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import pecan
|
import pecan
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate import utils
|
from designate import utils
|
||||||
|
@ -23,7 +22,6 @@ from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import blacklists as blacklists_view
|
from designate.api.v2.views import blacklists as blacklists_view
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class BlacklistsController(rest.RestController):
|
class BlacklistsController(rest.RestController):
|
||||||
|
@ -40,7 +38,7 @@ class BlacklistsController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
blacklist = central_api.get_blacklist(context, blacklist_id)
|
blacklist = self.central_api.get_blacklist(context, blacklist_id)
|
||||||
|
|
||||||
return self._view.show(context, request, blacklist)
|
return self._view.show(context, request, blacklist)
|
||||||
|
|
||||||
|
@ -58,7 +56,7 @@ class BlacklistsController(rest.RestController):
|
||||||
criterion = dict((k, params[k]) for k in accepted_filters
|
criterion = dict((k, params[k]) for k in accepted_filters
|
||||||
if k in params)
|
if k in params)
|
||||||
|
|
||||||
blacklist = central_api.find_blacklists(
|
blacklist = self.central_api.find_blacklists(
|
||||||
context, criterion, marker, limit, sort_key, sort_dir)
|
context, criterion, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
return self._view.list(context, request, blacklist)
|
return self._view.list(context, request, blacklist)
|
||||||
|
@ -79,7 +77,7 @@ class BlacklistsController(rest.RestController):
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
# Create the blacklist
|
# Create the blacklist
|
||||||
blacklist = central_api.create_blacklist(context, values)
|
blacklist = self.central_api.create_blacklist(context, values)
|
||||||
|
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
|
||||||
|
@ -100,7 +98,7 @@ class BlacklistsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
|
|
||||||
# Fetch the existing blacklisted zone
|
# Fetch the existing blacklisted zone
|
||||||
blacklist = central_api.get_blacklist(context, blacklist_id)
|
blacklist = self.central_api.get_blacklist(context, blacklist_id)
|
||||||
|
|
||||||
# Convert to APIv2 Format
|
# Convert to APIv2 Format
|
||||||
blacklist = self._view.show(context, request, blacklist)
|
blacklist = self._view.show(context, request, blacklist)
|
||||||
|
@ -115,8 +113,8 @@ class BlacklistsController(rest.RestController):
|
||||||
|
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
blacklist = central_api.update_blacklist(context,
|
blacklist = self.central_api.update_blacklist(context,
|
||||||
blacklist_id, values)
|
blacklist_id, values)
|
||||||
|
|
||||||
response.status_int = 200
|
response.status_int = 200
|
||||||
|
|
||||||
|
@ -130,7 +128,7 @@ class BlacklistsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
central_api.delete_blacklist(context, blacklist_id)
|
self.central_api.delete_blacklist(context, blacklist_id)
|
||||||
|
|
||||||
response.status_int = 204
|
response.status_int = 204
|
||||||
|
|
||||||
|
|
|
@ -19,10 +19,6 @@ from designate import exceptions
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate.api.v2.controllers import rest
|
from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import floatingips as floatingips_views
|
from designate.api.v2.views import floatingips as floatingips_views
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
|
|
||||||
|
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
FIP_REGEX = '^(?P<region>[A-Za-z0-9\\.\\-_]{1,100}):' \
|
FIP_REGEX = '^(?P<region>[A-Za-z0-9\\.\\-_]{1,100}):' \
|
||||||
|
@ -52,7 +48,7 @@ class FloatingIPController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
fips = central_api.list_floatingips(context)
|
fips = self.central_api.list_floatingips(context)
|
||||||
return self._view.list(context, request, fips)
|
return self._view.list(context, request, fips)
|
||||||
|
|
||||||
@pecan.expose(template='json:', content_type='application/json')
|
@pecan.expose(template='json:', content_type='application/json')
|
||||||
|
@ -69,7 +65,7 @@ class FloatingIPController(rest.RestController):
|
||||||
# Validate the request conforms to the schema
|
# Validate the request conforms to the schema
|
||||||
self._resource_schema.validate(body)
|
self._resource_schema.validate(body)
|
||||||
|
|
||||||
fip = central_api.update_floatingip(
|
fip = self.central_api.update_floatingip(
|
||||||
context, region, id_, body['floatingip'])
|
context, region, id_, body['floatingip'])
|
||||||
|
|
||||||
if fip:
|
if fip:
|
||||||
|
@ -85,6 +81,6 @@ class FloatingIPController(rest.RestController):
|
||||||
|
|
||||||
region, id_ = fip_key_to_data(fip_key)
|
region, id_ = fip_key_to_data(fip_key)
|
||||||
|
|
||||||
fip = central_api.get_floatingip(context, region, id_)
|
fip = self.central_api.get_floatingip(context, region, id_)
|
||||||
|
|
||||||
return self._view.show(context, request, fip)
|
return self._view.show(context, request, fip)
|
||||||
|
|
|
@ -15,13 +15,11 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import pecan
|
import pecan
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.api.v2.controllers import rest
|
from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import limits as limits_view
|
from designate.api.v2.views import limits as limits_view
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class LimitsController(rest.RestController):
|
class LimitsController(rest.RestController):
|
||||||
|
@ -32,6 +30,6 @@ class LimitsController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = pecan.request.environ['context']
|
context = pecan.request.environ['context']
|
||||||
|
|
||||||
absolute_limits = central_api.get_absolute_limits(context)
|
absolute_limits = self.central_api.get_absolute_limits(context)
|
||||||
|
|
||||||
return self._view.show(context, request, absolute_limits)
|
return self._view.show(context, request, absolute_limits)
|
||||||
|
|
|
@ -16,13 +16,11 @@
|
||||||
|
|
||||||
import pecan
|
import pecan
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.api.v2.controllers import rest
|
from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import nameservers as nameservers_view
|
from designate.api.v2.views import nameservers as nameservers_view
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class NameServersController(rest.RestController):
|
class NameServersController(rest.RestController):
|
||||||
|
@ -34,6 +32,6 @@ class NameServersController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = pecan.request.environ['context']
|
context = pecan.request.environ['context']
|
||||||
|
|
||||||
servers = central_api.get_domain_servers(context, zone_id)
|
servers = self.central_api.get_domain_servers(context, zone_id)
|
||||||
|
|
||||||
return self._view.list(context, request, servers, [zone_id])
|
return self._view.list(context, request, servers, [zone_id])
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
import pecan
|
import pecan
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate import utils
|
from designate import utils
|
||||||
|
@ -22,7 +21,6 @@ from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import records as records_view
|
from designate.api.v2.views import records as records_view
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class RecordsController(rest.RestController):
|
class RecordsController(rest.RestController):
|
||||||
|
@ -39,8 +37,8 @@ class RecordsController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
record = central_api.get_record(context, zone_id, recordset_id,
|
record = self.central_api.get_record(context, zone_id, recordset_id,
|
||||||
record_id)
|
record_id)
|
||||||
|
|
||||||
return self._view.show(context, request, record)
|
return self._view.show(context, request, record)
|
||||||
|
|
||||||
|
@ -62,7 +60,7 @@ class RecordsController(rest.RestController):
|
||||||
criterion['domain_id'] = zone_id
|
criterion['domain_id'] = zone_id
|
||||||
criterion['recordset_id'] = recordset_id
|
criterion['recordset_id'] = recordset_id
|
||||||
|
|
||||||
records = central_api.find_records(
|
records = self.central_api.find_records(
|
||||||
context, criterion, marker, limit, sort_key, sort_dir)
|
context, criterion, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
return self._view.list(context, request, records,
|
return self._view.list(context, request, records,
|
||||||
|
@ -85,8 +83,8 @@ class RecordsController(rest.RestController):
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
# Create the records
|
# Create the records
|
||||||
record = central_api.create_record(context, zone_id, recordset_id,
|
record = self.central_api.create_record(context, zone_id, recordset_id,
|
||||||
values)
|
values)
|
||||||
|
|
||||||
# Prepare the response headers
|
# Prepare the response headers
|
||||||
if record['status'] == 'PENDING':
|
if record['status'] == 'PENDING':
|
||||||
|
@ -111,8 +109,8 @@ class RecordsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
|
|
||||||
# Fetch the existing record
|
# Fetch the existing record
|
||||||
record = central_api.get_record(context, zone_id, recordset_id,
|
record = self.central_api.get_record(context, zone_id, recordset_id,
|
||||||
record_id)
|
record_id)
|
||||||
|
|
||||||
# Convert to APIv2 Format
|
# Convert to APIv2 Format
|
||||||
record = self._view.show(context, request, record)
|
record = self._view.show(context, request, record)
|
||||||
|
@ -126,7 +124,7 @@ class RecordsController(rest.RestController):
|
||||||
self._resource_schema.validate(record)
|
self._resource_schema.validate(record)
|
||||||
|
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
record = central_api.update_record(
|
record = self.central_api.update_record(
|
||||||
context, zone_id, recordset_id, record_id, values)
|
context, zone_id, recordset_id, record_id, values)
|
||||||
|
|
||||||
if record['status'] == 'PENDING':
|
if record['status'] == 'PENDING':
|
||||||
|
@ -144,8 +142,8 @@ class RecordsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
record = central_api.delete_record(context, zone_id, recordset_id,
|
record = self.central_api.delete_record(context, zone_id, recordset_id,
|
||||||
record_id)
|
record_id)
|
||||||
|
|
||||||
if record['status'] == 'DELETING':
|
if record['status'] == 'DELETING':
|
||||||
response.status_int = 202
|
response.status_int = 202
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
import pecan
|
import pecan
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate import schema
|
from designate import schema
|
||||||
from designate import utils
|
from designate import utils
|
||||||
|
@ -23,7 +22,6 @@ from designate.api.v2.views import recordsets as recordsets_view
|
||||||
from designate.api.v2.controllers import records
|
from designate.api.v2.controllers import records
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class RecordSetsController(rest.RestController):
|
class RecordSetsController(rest.RestController):
|
||||||
|
@ -42,7 +40,8 @@ class RecordSetsController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
recordset = central_api.get_recordset(context, zone_id, recordset_id)
|
recordset = self.central_api.get_recordset(context, zone_id,
|
||||||
|
recordset_id)
|
||||||
|
|
||||||
return self._view.show(context, request, recordset)
|
return self._view.show(context, request, recordset)
|
||||||
|
|
||||||
|
@ -63,7 +62,7 @@ class RecordSetsController(rest.RestController):
|
||||||
|
|
||||||
criterion['domain_id'] = zone_id
|
criterion['domain_id'] = zone_id
|
||||||
|
|
||||||
recordsets = central_api.find_recordsets(
|
recordsets = self.central_api.find_recordsets(
|
||||||
context, criterion, marker, limit, sort_key, sort_dir)
|
context, criterion, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
return self._view.list(context, request, recordsets, [zone_id])
|
return self._view.list(context, request, recordsets, [zone_id])
|
||||||
|
@ -85,7 +84,7 @@ class RecordSetsController(rest.RestController):
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
# Create the recordset
|
# Create the recordset
|
||||||
recordset = central_api.create_recordset(context, zone_id, values)
|
recordset = self.central_api.create_recordset(context, zone_id, values)
|
||||||
|
|
||||||
# Prepare the response headers
|
# Prepare the response headers
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
@ -106,7 +105,8 @@ class RecordSetsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
|
|
||||||
# Fetch the existing recordset
|
# Fetch the existing recordset
|
||||||
recordset = central_api.get_recordset(context, zone_id, recordset_id)
|
recordset = self.central_api.get_recordset(context, zone_id,
|
||||||
|
recordset_id)
|
||||||
|
|
||||||
# Convert to APIv2 Format
|
# Convert to APIv2 Format
|
||||||
recordset = self._view.show(context, request, recordset)
|
recordset = self._view.show(context, request, recordset)
|
||||||
|
@ -120,7 +120,7 @@ class RecordSetsController(rest.RestController):
|
||||||
self._resource_schema.validate(recordset)
|
self._resource_schema.validate(recordset)
|
||||||
|
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
recordset = central_api.update_recordset(
|
recordset = self.central_api.update_recordset(
|
||||||
context, zone_id, recordset_id, values)
|
context, zone_id, recordset_id, values)
|
||||||
|
|
||||||
response.status_int = 200
|
response.status_int = 200
|
||||||
|
@ -135,7 +135,7 @@ class RecordSetsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
central_api.delete_recordset(context, zone_id, recordset_id)
|
self.central_api.delete_recordset(context, zone_id, recordset_id)
|
||||||
|
|
||||||
response.status_int = 204
|
response.status_int = 204
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@ import pecan
|
||||||
import pecan.rest
|
import pecan.rest
|
||||||
import pecan.routing
|
import pecan.routing
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
|
from designate import api
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.gettextutils import _
|
from designate.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
@ -47,6 +48,10 @@ class RestController(pecan.rest.RestController):
|
||||||
# default sort_keys. The Controllers can override this.
|
# default sort_keys. The Controllers can override this.
|
||||||
SORT_KEYS = ['created_at', 'id']
|
SORT_KEYS = ['created_at', 'id']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def central_api(self):
|
||||||
|
return api.get_central_api()
|
||||||
|
|
||||||
def _get_paging_params(self, params):
|
def _get_paging_params(self, params):
|
||||||
"""
|
"""
|
||||||
Extract any paging parameters
|
Extract any paging parameters
|
||||||
|
|
|
@ -18,10 +18,8 @@ from designate import schema
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.api.v2.controllers import rest
|
from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.views import tlds as tlds_view
|
from designate.api.v2.views import tlds as tlds_view
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class TldsController(rest.RestController):
|
class TldsController(rest.RestController):
|
||||||
|
@ -38,7 +36,7 @@ class TldsController(rest.RestController):
|
||||||
request = pecan.request
|
request = pecan.request
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
tld = central_api.get_tld(context, tld_id)
|
tld = self.central_api.get_tld(context, tld_id)
|
||||||
return self._view.show(context, request, tld)
|
return self._view.show(context, request, tld)
|
||||||
|
|
||||||
@pecan.expose(template='json:', content_type='application/json')
|
@pecan.expose(template='json:', content_type='application/json')
|
||||||
|
@ -55,7 +53,7 @@ class TldsController(rest.RestController):
|
||||||
criterion = dict((k, params[k]) for k in accepted_filters
|
criterion = dict((k, params[k]) for k in accepted_filters
|
||||||
if k in params)
|
if k in params)
|
||||||
|
|
||||||
tlds = central_api.find_tlds(
|
tlds = self.central_api.find_tlds(
|
||||||
context, criterion, marker, limit, sort_key, sort_dir)
|
context, criterion, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
return self._view.list(context, request, tlds)
|
return self._view.list(context, request, tlds)
|
||||||
|
@ -75,7 +73,7 @@ class TldsController(rest.RestController):
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
# Create the tld
|
# Create the tld
|
||||||
tld = central_api.create_tld(context, values)
|
tld = self.central_api.create_tld(context, values)
|
||||||
response.status_int = 201
|
response.status_int = 201
|
||||||
|
|
||||||
response.headers['Location'] = self._view._get_resource_href(request,
|
response.headers['Location'] = self._view._get_resource_href(request,
|
||||||
|
@ -94,7 +92,7 @@ class TldsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
|
|
||||||
# Fetch the existing tld
|
# Fetch the existing tld
|
||||||
tld = central_api.get_tld(context, tld_id)
|
tld = self.central_api.get_tld(context, tld_id)
|
||||||
|
|
||||||
# Convert to APIv2 Format
|
# Convert to APIv2 Format
|
||||||
tld = self._view.show(context, request, tld)
|
tld = self._view.show(context, request, tld)
|
||||||
|
@ -108,7 +106,7 @@ class TldsController(rest.RestController):
|
||||||
self._resource_schema.validate(tld)
|
self._resource_schema.validate(tld)
|
||||||
|
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
tld = central_api.update_tld(context, tld_id, values)
|
tld = self.central_api.update_tld(context, tld_id, values)
|
||||||
|
|
||||||
response.status_int = 200
|
response.status_int = 200
|
||||||
|
|
||||||
|
@ -122,7 +120,7 @@ class TldsController(rest.RestController):
|
||||||
response = pecan.response
|
response = pecan.response
|
||||||
context = request.environ['context']
|
context = request.environ['context']
|
||||||
|
|
||||||
central_api.delete_tld(context, tld_id)
|
self.central_api.delete_tld(context, tld_id)
|
||||||
|
|
||||||
response.status_int = 204
|
response.status_int = 204
|
||||||
|
|
||||||
|
|
|
@ -24,11 +24,9 @@ from designate.api.v2.controllers import rest
|
||||||
from designate.api.v2.controllers import nameservers
|
from designate.api.v2.controllers import nameservers
|
||||||
from designate.api.v2.controllers import recordsets
|
from designate.api.v2.controllers import recordsets
|
||||||
from designate.api.v2.views import zones as zones_view
|
from designate.api.v2.views import zones as zones_view
|
||||||
from designate.central import rpcapi as central_rpcapi
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
class ZonesController(rest.RestController):
|
class ZonesController(rest.RestController):
|
||||||
|
@ -64,17 +62,17 @@ class ZonesController(rest.RestController):
|
||||||
|
|
||||||
def _get_json(self, request, context, zone_id):
|
def _get_json(self, request, context, zone_id):
|
||||||
""" 'Normal' zone get """
|
""" 'Normal' zone get """
|
||||||
zone = central_api.get_domain(context, zone_id)
|
zone = self.central_api.get_domain(context, zone_id)
|
||||||
|
|
||||||
return self._view.show(context, request, zone)
|
return self._view.show(context, request, zone)
|
||||||
|
|
||||||
def _get_zonefile(self, request, context, zone_id):
|
def _get_zonefile(self, request, context, zone_id):
|
||||||
""" Export zonefile """
|
""" Export zonefile """
|
||||||
servers = central_api.get_domain_servers(context, zone_id)
|
servers = self.central_api.get_domain_servers(context, zone_id)
|
||||||
domain = central_api.get_domain(context, zone_id)
|
domain = self.central_api.get_domain(context, zone_id)
|
||||||
|
|
||||||
criterion = {'domain_id': zone_id}
|
criterion = {'domain_id': zone_id}
|
||||||
recordsets = central_api.find_recordsets(context, criterion)
|
recordsets = self.central_api.find_recordsets(context, criterion)
|
||||||
|
|
||||||
records = []
|
records = []
|
||||||
|
|
||||||
|
@ -84,7 +82,7 @@ class ZonesController(rest.RestController):
|
||||||
'recordset_id': recordset['id']
|
'recordset_id': recordset['id']
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_records = central_api.find_records(context, criterion)
|
raw_records = self.central_api.find_records(context, criterion)
|
||||||
|
|
||||||
for record in raw_records:
|
for record in raw_records:
|
||||||
records.append({
|
records.append({
|
||||||
|
@ -113,7 +111,7 @@ class ZonesController(rest.RestController):
|
||||||
criterion = dict((k, params[k]) for k in accepted_filters
|
criterion = dict((k, params[k]) for k in accepted_filters
|
||||||
if k in params)
|
if k in params)
|
||||||
|
|
||||||
zones = central_api.find_domains(
|
zones = self.central_api.find_domains(
|
||||||
context, criterion, marker, limit, sort_key, sort_dir)
|
context, criterion, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
return self._view.list(context, request, zones)
|
return self._view.list(context, request, zones)
|
||||||
|
@ -143,7 +141,7 @@ class ZonesController(rest.RestController):
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
|
|
||||||
# Create the zone
|
# Create the zone
|
||||||
zone = central_api.create_domain(context, values)
|
zone = self.central_api.create_domain(context, values)
|
||||||
|
|
||||||
# Prepare the response headers
|
# Prepare the response headers
|
||||||
# If the zone has been created asynchronously
|
# If the zone has been created asynchronously
|
||||||
|
@ -168,7 +166,7 @@ class ZonesController(rest.RestController):
|
||||||
try:
|
try:
|
||||||
self._create_records(context, zone['id'], dnspython_zone)
|
self._create_records(context, zone['id'], dnspython_zone)
|
||||||
except exceptions.Base as e:
|
except exceptions.Base as e:
|
||||||
central_api.delete_domain(context, zone['id'])
|
self.central_api.delete_domain(context, zone['id'])
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if zone['status'] == 'PENDING':
|
if zone['status'] == 'PENDING':
|
||||||
|
@ -194,7 +192,7 @@ class ZonesController(rest.RestController):
|
||||||
# TODO(kiall): Validate we have a sane UUID for zone_id
|
# TODO(kiall): Validate we have a sane UUID for zone_id
|
||||||
|
|
||||||
# Fetch the existing zone
|
# Fetch the existing zone
|
||||||
zone = central_api.get_domain(context, zone_id)
|
zone = self.central_api.get_domain(context, zone_id)
|
||||||
|
|
||||||
# Convert to APIv2 Format
|
# Convert to APIv2 Format
|
||||||
zone = self._view.show(context, request, zone)
|
zone = self._view.show(context, request, zone)
|
||||||
|
@ -220,7 +218,7 @@ class ZonesController(rest.RestController):
|
||||||
self._resource_schema.validate(zone)
|
self._resource_schema.validate(zone)
|
||||||
|
|
||||||
values = self._view.load(context, request, body)
|
values = self._view.load(context, request, body)
|
||||||
zone = central_api.update_domain(context, zone_id, values)
|
zone = self.central_api.update_domain(context, zone_id, values)
|
||||||
|
|
||||||
if zone['status'] == 'PENDING':
|
if zone['status'] == 'PENDING':
|
||||||
response.status_int = 202
|
response.status_int = 202
|
||||||
|
@ -239,7 +237,7 @@ class ZonesController(rest.RestController):
|
||||||
|
|
||||||
# TODO(kiall): Validate we have a sane UUID for zone_id
|
# TODO(kiall): Validate we have a sane UUID for zone_id
|
||||||
|
|
||||||
zone = central_api.delete_domain(context, zone_id)
|
zone = self.central_api.delete_domain(context, zone_id)
|
||||||
|
|
||||||
if zone['status'] == 'DELETING':
|
if zone['status'] == 'DELETING':
|
||||||
response.status_int = 202
|
response.status_int = 202
|
||||||
|
@ -266,7 +264,7 @@ class ZonesController(rest.RestController):
|
||||||
'email': email,
|
'email': email,
|
||||||
'ttl': str(soa.ttl)
|
'ttl': str(soa.ttl)
|
||||||
}
|
}
|
||||||
return central_api.create_domain(context, values)
|
return self.central_api.create_domain(context, values)
|
||||||
|
|
||||||
def _record2json(self, record_type, rdata):
|
def _record2json(self, record_type, rdata):
|
||||||
if record_type == 'MX':
|
if record_type == 'MX':
|
||||||
|
@ -301,7 +299,7 @@ class ZonesController(rest.RestController):
|
||||||
'type': record_type,
|
'type': record_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
recordset = central_api.create_recordset(
|
recordset = self.central_api.create_recordset(
|
||||||
context, zone_id, values)
|
context, zone_id, values)
|
||||||
|
|
||||||
for rdata in rdataset:
|
for rdata in rdataset:
|
||||||
|
@ -315,7 +313,7 @@ class ZonesController(rest.RestController):
|
||||||
# created
|
# created
|
||||||
values = self._record2json(record_type, rdata)
|
values = self._record2json(record_type, rdata)
|
||||||
|
|
||||||
central_api.create_record(
|
self.central_api.create_record(
|
||||||
context, zone_id, recordset['id'], values)
|
context, zone_id, recordset['id'], values)
|
||||||
|
|
||||||
def _parse_zonefile(self, request):
|
def _parse_zonefile(self, request):
|
||||||
|
|
|
@ -160,7 +160,7 @@ class PowerDNSBackend(base.Backend):
|
||||||
'name': domain['name'].rstrip('.'),
|
'name': domain['name'].rstrip('.'),
|
||||||
'master': servers[0]['name'].rstrip('.'),
|
'master': servers[0]['name'].rstrip('.'),
|
||||||
'type': cfg.CONF['backend:powerdns'].domain_type,
|
'type': cfg.CONF['backend:powerdns'].domain_type,
|
||||||
'account': context.tenant_id
|
'account': context.tenant
|
||||||
})
|
})
|
||||||
domain_m.save(self.session)
|
domain_m.save(self.session)
|
||||||
|
|
||||||
|
|
|
@ -14,13 +14,16 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import proxy as rpc_proxy
|
from designate import rpc
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class CentralAPI(rpc_proxy.RpcProxy):
|
class CentralAPI(object):
|
||||||
"""
|
"""
|
||||||
Client side of the central RPC API.
|
Client side of the central RPC API.
|
||||||
|
|
||||||
|
@ -37,421 +40,361 @@ class CentralAPI(rpc_proxy.RpcProxy):
|
||||||
3.2 - TLD Api changes
|
3.2 - TLD Api changes
|
||||||
3.3 - Add methods for blacklisted domains
|
3.3 - Add methods for blacklisted domains
|
||||||
"""
|
"""
|
||||||
|
RPC_API_VERSION = '3.0'
|
||||||
|
|
||||||
def __init__(self, topic=None):
|
def __init__(self, topic=None):
|
||||||
topic = topic if topic else cfg.CONF.central_topic
|
topic = topic if topic else cfg.CONF.central_topic
|
||||||
super(CentralAPI, self).__init__(topic=topic, default_version='3.0')
|
|
||||||
|
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
|
||||||
|
self.client = rpc.get_client(target, version_cap='3.3')
|
||||||
|
|
||||||
# Misc Methods
|
# Misc Methods
|
||||||
def get_absolute_limits(self, context):
|
def get_absolute_limits(self, context):
|
||||||
LOG.info("get_absolute_limits: Calling central's get_absolute_limits.")
|
LOG.info("get_absolute_limits: Calling central's get_absolute_limits.")
|
||||||
msg = self.make_msg('get_absolute_limits')
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'get_absolute_limits')
|
||||||
|
|
||||||
# Quota Methods
|
# Quota Methods
|
||||||
def get_quotas(self, context, tenant_id):
|
def get_quotas(self, context, tenant_id):
|
||||||
LOG.info("get_quotas: Calling central's get_quotas.")
|
LOG.info("get_quotas: Calling central's get_quotas.")
|
||||||
msg = self.make_msg('get_quotas', tenant_id=tenant_id)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'get_quotas', tenant_id=tenant_id)
|
||||||
|
|
||||||
def get_quota(self, context, tenant_id, resource):
|
def get_quota(self, context, tenant_id, resource):
|
||||||
LOG.info("get_quota: Calling central's get_quota.")
|
LOG.info("get_quota: Calling central's get_quota.")
|
||||||
msg = self.make_msg('get_quota', tenant_id=tenant_id,
|
|
||||||
resource=resource)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'get_quota', tenant_id=tenant_id,
|
||||||
|
resource=resource)
|
||||||
|
|
||||||
def set_quota(self, context, tenant_id, resource, hard_limit):
|
def set_quota(self, context, tenant_id, resource, hard_limit):
|
||||||
LOG.info("set_quota: Calling central's set_quota.")
|
LOG.info("set_quota: Calling central's set_quota.")
|
||||||
msg = self.make_msg('set_quota', tenant_id=tenant_id,
|
|
||||||
resource=resource, hard_limit=hard_limit)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'set_quota', tenant_id=tenant_id,
|
||||||
|
resource=resource, hard_limit=hard_limit)
|
||||||
|
|
||||||
def reset_quotas(self, context, tenant_id):
|
def reset_quotas(self, context, tenant_id):
|
||||||
LOG.info("reset_quotas: Calling central's reset_quotas.")
|
LOG.info("reset_quotas: Calling central's reset_quotas.")
|
||||||
msg = self.make_msg('reset_quotas', tenant_id=tenant_id)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'reset_quotas', tenant_id=tenant_id)
|
||||||
|
|
||||||
# Server Methods
|
# Server Methods
|
||||||
def create_server(self, context, values):
|
def create_server(self, context, values):
|
||||||
LOG.info("create_server: Calling central's create_server.")
|
LOG.info("create_server: Calling central's create_server.")
|
||||||
msg = self.make_msg('create_server', values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'create_server', values=values)
|
||||||
|
|
||||||
def find_servers(self, context, criterion=None, marker=None, limit=None,
|
def find_servers(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_servers: Calling central's find_servers.")
|
LOG.info("find_servers: Calling central's find_servers.")
|
||||||
msg = self.make_msg('find_servers', criterion=criterion, marker=marker,
|
|
||||||
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'find_servers', criterion=criterion,
|
||||||
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
def get_server(self, context, server_id):
|
def get_server(self, context, server_id):
|
||||||
LOG.info("get_server: Calling central's get_server.")
|
LOG.info("get_server: Calling central's get_server.")
|
||||||
msg = self.make_msg('get_server', server_id=server_id)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'get_server', server_id=server_id)
|
||||||
|
|
||||||
def update_server(self, context, server_id, values):
|
def update_server(self, context, server_id, values):
|
||||||
LOG.info("update_server: Calling central's update_server.")
|
LOG.info("update_server: Calling central's update_server.")
|
||||||
msg = self.make_msg('update_server', server_id=server_id,
|
|
||||||
values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'update_server', server_id=server_id,
|
||||||
|
values=values)
|
||||||
|
|
||||||
def delete_server(self, context, server_id):
|
def delete_server(self, context, server_id):
|
||||||
LOG.info("delete_server: Calling central's delete_server.")
|
LOG.info("delete_server: Calling central's delete_server.")
|
||||||
msg = self.make_msg('delete_server', server_id=server_id)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'delete_server', server_id=server_id)
|
||||||
|
|
||||||
# TSIG Key Methods
|
# TSIG Key Methods
|
||||||
def create_tsigkey(self, context, values):
|
def create_tsigkey(self, context, values):
|
||||||
LOG.info("create_tsigkey: Calling central's create_tsigkey.")
|
LOG.info("create_tsigkey: Calling central's create_tsigkey.")
|
||||||
msg = self.make_msg('create_tsigkey', values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'create_tsigkey', values=values)
|
||||||
|
|
||||||
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
|
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_tsigkeys: Calling central's find_tsigkeys.")
|
LOG.info("find_tsigkeys: Calling central's find_tsigkeys.")
|
||||||
msg = self.make_msg('find_tsigkeys', criterion=criterion,
|
|
||||||
marker=marker, limit=limit, sort_key=sort_key,
|
|
||||||
sort_dir=sort_dir)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
return self.client.call(context, 'find_tsigkeys', criterion=criterion,
|
||||||
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
def get_tsigkey(self, context, tsigkey_id):
|
def get_tsigkey(self, context, tsigkey_id):
|
||||||
LOG.info("get_tsigkey: Calling central's get_tsigkey.")
|
LOG.info("get_tsigkey: Calling central's get_tsigkey.")
|
||||||
msg = self.make_msg('get_tsigkey', tsigkey_id=tsigkey_id)
|
return self.client.call(context, 'get_tsigkey', tsigkey_id=tsigkey_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_tsigkey(self, context, tsigkey_id, values):
|
def update_tsigkey(self, context, tsigkey_id, values):
|
||||||
LOG.info("update_tsigkey: Calling central's update_tsigkey.")
|
LOG.info("update_tsigkey: Calling central's update_tsigkey.")
|
||||||
msg = self.make_msg('update_tsigkey', tsigkey_id=tsigkey_id,
|
return self.client.call(context, 'update_tsigkey',
|
||||||
values=values)
|
tsigkey_id=tsigkey_id,
|
||||||
|
values=values)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_tsigkey(self, context, tsigkey_id):
|
def delete_tsigkey(self, context, tsigkey_id):
|
||||||
LOG.info("delete_tsigkey: Calling central's delete_tsigkey.")
|
LOG.info("delete_tsigkey: Calling central's delete_tsigkey.")
|
||||||
msg = self.make_msg('delete_tsigkey', tsigkey_id=tsigkey_id)
|
return self.client.call(context, 'delete_tsigkey',
|
||||||
|
tsigkey_id=tsigkey_id)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Tenant Methods
|
# Tenant Methods
|
||||||
def find_tenants(self, context):
|
def find_tenants(self, context):
|
||||||
LOG.info("find_tenants: Calling central's find_tenants.")
|
LOG.info("find_tenants: Calling central's find_tenants.")
|
||||||
msg = self.make_msg('find_tenants')
|
return self.client.call(context, 'find_tenants')
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def get_tenant(self, context, tenant_id):
|
def get_tenant(self, context, tenant_id):
|
||||||
LOG.info("get_tenant: Calling central's get_tenant.")
|
LOG.info("get_tenant: Calling central's get_tenant.")
|
||||||
msg = self.make_msg('get_tenant', tenant_id=tenant_id)
|
return self.client.call(context, 'get_tenant', tenant_id=tenant_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def count_tenants(self, context):
|
def count_tenants(self, context):
|
||||||
LOG.info("count_tenants: Calling central's count_tenants.")
|
LOG.info("count_tenants: Calling central's count_tenants.")
|
||||||
msg = self.make_msg('count_tenants')
|
return self.client.call(context, 'count_tenants')
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Domain Methods
|
# Domain Methods
|
||||||
def create_domain(self, context, values):
|
def create_domain(self, context, values):
|
||||||
LOG.info("create_domain: Calling central's create_domain.")
|
LOG.info("create_domain: Calling central's create_domain.")
|
||||||
msg = self.make_msg('create_domain', values=values)
|
return self.client.call(context, 'create_domain', values=values)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def get_domain(self, context, domain_id):
|
def get_domain(self, context, domain_id):
|
||||||
LOG.info("get_domain: Calling central's get_domain.")
|
LOG.info("get_domain: Calling central's get_domain.")
|
||||||
msg = self.make_msg('get_domain', domain_id=domain_id)
|
return self.client.call(context, 'get_domain', domain_id=domain_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def get_domain_servers(self, context, domain_id):
|
def get_domain_servers(self, context, domain_id):
|
||||||
LOG.info("get_domain_servers: Calling central's get_domain_servers.")
|
LOG.info("get_domain_servers: Calling central's get_domain_servers.")
|
||||||
msg = self.make_msg('get_domain_servers', domain_id=domain_id)
|
return self.client.call(context, 'get_domain_servers',
|
||||||
|
domain_id=domain_id)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_domains(self, context, criterion=None, marker=None, limit=None,
|
def find_domains(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_domains: Calling central's find_domains.")
|
LOG.info("find_domains: Calling central's find_domains.")
|
||||||
msg = self.make_msg('find_domains', criterion=criterion, marker=marker,
|
return self.client.call(context, 'find_domains', criterion=criterion,
|
||||||
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_domain(self, context, criterion=None):
|
def find_domain(self, context, criterion=None):
|
||||||
LOG.info("find_domain: Calling central's find_domain.")
|
LOG.info("find_domain: Calling central's find_domain.")
|
||||||
msg = self.make_msg('find_domain', criterion=criterion)
|
return self.client.call(context, 'find_domain', criterion=criterion)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_domain(self, context, domain_id, values, increment_serial=True):
|
def update_domain(self, context, domain_id, values, increment_serial=True):
|
||||||
LOG.info("update_domain: Calling central's update_domain.")
|
LOG.info("update_domain: Calling central's update_domain.")
|
||||||
msg = self.make_msg('update_domain',
|
return self.client.call(
|
||||||
domain_id=domain_id,
|
context, 'update_domain', domain_id=domain_id,
|
||||||
values=values,
|
values=values, increment_serial=increment_serial)
|
||||||
increment_serial=increment_serial)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_domain(self, context, domain_id):
|
def delete_domain(self, context, domain_id):
|
||||||
LOG.info("delete_domain: Calling central's delete_domain.")
|
LOG.info("delete_domain: Calling central's delete_domain.")
|
||||||
msg = self.make_msg('delete_domain', domain_id=domain_id)
|
return self.client.call(context, 'delete_domain', domain_id=domain_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def count_domains(self, context, criterion=None):
|
def count_domains(self, context, criterion=None):
|
||||||
LOG.info("count_domains: Calling central's count_domains.")
|
LOG.info("count_domains: Calling central's count_domains.")
|
||||||
msg = self.make_msg('count_domains', criterion=criterion)
|
return self.client.call(context, 'count_domains', criterion=criterion)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def touch_domain(self, context, domain_id):
|
def touch_domain(self, context, domain_id):
|
||||||
LOG.info("touch_domain: Calling central's touch_domain.")
|
LOG.info("touch_domain: Calling central's touch_domain.")
|
||||||
msg = self.make_msg('touch_domain', domain_id=domain_id)
|
return self.client.call(context, 'touch_domain', domain_id=domain_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# TLD Methods
|
# TLD Methods
|
||||||
def create_tld(self, context, values):
|
def create_tld(self, context, values):
|
||||||
LOG.info("create_tld: Calling central's create_tld.")
|
LOG.info("create_tld: Calling central's create_tld.")
|
||||||
msg = self.make_msg('create_tld', values=values)
|
cctxt = self.client.prepare(version='3.2')
|
||||||
|
return cctxt.call(context, 'create_tld', values=values)
|
||||||
return self.call(context, msg, version='3.2')
|
|
||||||
|
|
||||||
def find_tlds(self, context, criterion=None, marker=None, limit=None,
|
def find_tlds(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_tlds: Calling central's find_tlds.")
|
LOG.info("find_tlds: Calling central's find_tlds.")
|
||||||
msg = self.make_msg('find_tlds', criterion=criterion, marker=marker,
|
|
||||||
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.2')
|
cctxt = self.client.prepare(version='3.2')
|
||||||
|
return cctxt.call(context, 'find_tlds', criterion=criterion,
|
||||||
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
def get_tld(self, context, tld_id):
|
def get_tld(self, context, tld_id):
|
||||||
LOG.info("get_tld: Calling central's get_tld.")
|
LOG.info("get_tld: Calling central's get_tld.")
|
||||||
msg = self.make_msg('get_tld', tld_id=tld_id)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.2')
|
cctxt = self.client.prepare(version='3.2')
|
||||||
|
return cctxt.call(context, 'get_tld', tld_id=tld_id)
|
||||||
|
|
||||||
def update_tld(self, context, tld_id, values):
|
def update_tld(self, context, tld_id, values):
|
||||||
LOG.info("update_tld: Calling central's update_tld.")
|
LOG.info("update_tld: Calling central's update_tld.")
|
||||||
msg = self.make_msg('update_tld', tld_id=tld_id, values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.2')
|
cctxt = self.client.prepare(version='3.2')
|
||||||
|
return cctxt.call(context, 'update_tld', tld_id=tld_id, values=values)
|
||||||
|
|
||||||
def delete_tld(self, context, tld_id):
|
def delete_tld(self, context, tld_id):
|
||||||
LOG.info("delete_tld: Calling central's delete_tld.")
|
LOG.info("delete_tld: Calling central's delete_tld.")
|
||||||
msg = self.make_msg('delete_tld', tld_id=tld_id)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.2')
|
cctxt = self.client.prepare(version='3.2')
|
||||||
|
return cctxt.call(context, 'delete_tld', tld_id=tld_id)
|
||||||
|
|
||||||
# RecordSet Methods
|
# RecordSet Methods
|
||||||
def create_recordset(self, context, domain_id, values):
|
def create_recordset(self, context, domain_id, values):
|
||||||
LOG.info("create_recordset: Calling central's create_recordset.")
|
LOG.info("create_recordset: Calling central's create_recordset.")
|
||||||
msg = self.make_msg('create_recordset',
|
return self.client.call(context, 'create_recordset',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id, values=values)
|
||||||
values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def get_recordset(self, context, domain_id, recordset_id):
|
def get_recordset(self, context, domain_id, recordset_id):
|
||||||
LOG.info("get_recordset: Calling central's get_recordset.")
|
LOG.info("get_recordset: Calling central's get_recordset.")
|
||||||
msg = self.make_msg('get_recordset',
|
return self.client.call(context, 'get_recordset', domain_id=domain_id,
|
||||||
domain_id=domain_id,
|
recordset_id=recordset_id)
|
||||||
recordset_id=recordset_id)
|
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
|
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_recordsets: Calling central's find_recordsets.")
|
LOG.info("find_recordsets: Calling central's find_recordsets.")
|
||||||
msg = self.make_msg('find_recordsets', criterion=criterion,
|
return self.client.call(context, 'find_recordsets',
|
||||||
marker=marker, limit=limit, sort_key=sort_key,
|
criterion=criterion, marker=marker,
|
||||||
sort_dir=sort_dir)
|
limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_recordset(self, context, criterion=None):
|
def find_recordset(self, context, criterion=None):
|
||||||
LOG.info("find_recordset: Calling central's find_recordset.")
|
LOG.info("find_recordset: Calling central's find_recordset.")
|
||||||
msg = self.make_msg('find_recordset', criterion=criterion)
|
return self.client.call(context, 'find_recordset', criterion=criterion)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_recordset(self, context, domain_id, recordset_id, values,
|
def update_recordset(self, context, domain_id, recordset_id, values,
|
||||||
increment_serial=True):
|
increment_serial=True):
|
||||||
LOG.info("update_recordset: Calling central's update_recordset.")
|
LOG.info("update_recordset: Calling central's update_recordset.")
|
||||||
msg = self.make_msg('update_recordset',
|
return self.client.call(context, 'update_recordset',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
values=values,
|
values=values,
|
||||||
increment_serial=increment_serial)
|
increment_serial=increment_serial)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_recordset(self, context, domain_id, recordset_id,
|
def delete_recordset(self, context, domain_id, recordset_id,
|
||||||
increment_serial=True):
|
increment_serial=True):
|
||||||
LOG.info("delete_recordset: Calling central's delete_recordset.")
|
LOG.info("delete_recordset: Calling central's delete_recordset.")
|
||||||
msg = self.make_msg('delete_recordset',
|
return self.client.call(context, 'delete_recordset',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
increment_serial=increment_serial)
|
increment_serial=increment_serial)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def count_recordsets(self, context, criterion=None):
|
def count_recordsets(self, context, criterion=None):
|
||||||
LOG.info("count_recordsets: Calling central's count_recordsets.")
|
LOG.info("count_recordsets: Calling central's count_recordsets.")
|
||||||
msg = self.make_msg('count_recordsets', criterion=criterion)
|
return self.client.call(context, 'count_recordsets',
|
||||||
|
criterion=criterion)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Record Methods
|
# Record Methods
|
||||||
def create_record(self, context, domain_id, recordset_id, values,
|
def create_record(self, context, domain_id, recordset_id, values,
|
||||||
increment_serial=True):
|
increment_serial=True):
|
||||||
LOG.info("create_record: Calling central's create_record.")
|
LOG.info("create_record: Calling central's create_record.")
|
||||||
msg = self.make_msg('create_record',
|
return self.client.call(context, 'create_record',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
values=values,
|
values=values,
|
||||||
increment_serial=increment_serial)
|
increment_serial=increment_serial)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def get_record(self, context, domain_id, recordset_id, record_id):
|
def get_record(self, context, domain_id, recordset_id, record_id):
|
||||||
LOG.info("get_record: Calling central's get_record.")
|
LOG.info("get_record: Calling central's get_record.")
|
||||||
msg = self.make_msg('get_record',
|
return self.client.call(context, 'get_record',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
record_id=record_id)
|
record_id=record_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_records(self, context, criterion=None, marker=None, limit=None,
|
def find_records(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_records: Calling central's find_records.")
|
LOG.info("find_records: Calling central's find_records.")
|
||||||
msg = self.make_msg('find_records', criterion=criterion, marker=marker,
|
return self.client.call(context, 'find_records', criterion=criterion,
|
||||||
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def find_record(self, context, criterion=None):
|
def find_record(self, context, criterion=None):
|
||||||
LOG.info("find_record: Calling central's find_record.")
|
LOG.info("find_record: Calling central's find_record.")
|
||||||
msg = self.make_msg('find_record', criterion=criterion)
|
return self.client.call(context, 'find_record', criterion=criterion)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def update_record(self, context, domain_id, recordset_id, record_id,
|
def update_record(self, context, domain_id, recordset_id, record_id,
|
||||||
values, increment_serial=True):
|
values, increment_serial=True):
|
||||||
LOG.info("update_record: Calling central's update_record.")
|
LOG.info("update_record: Calling central's update_record.")
|
||||||
msg = self.make_msg('update_record',
|
return self.client.call(context, 'update_record',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
record_id=record_id,
|
record_id=record_id,
|
||||||
values=values,
|
values=values,
|
||||||
increment_serial=increment_serial)
|
increment_serial=increment_serial)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def delete_record(self, context, domain_id, recordset_id, record_id,
|
def delete_record(self, context, domain_id, recordset_id, record_id,
|
||||||
increment_serial=True):
|
increment_serial=True):
|
||||||
LOG.info("delete_record: Calling central's delete_record.")
|
LOG.info("delete_record: Calling central's delete_record.")
|
||||||
msg = self.make_msg('delete_record',
|
return self.client.call(context, 'delete_record',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
record_id=record_id,
|
record_id=record_id,
|
||||||
increment_serial=increment_serial)
|
increment_serial=increment_serial)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def count_records(self, context, criterion=None):
|
def count_records(self, context, criterion=None):
|
||||||
LOG.info("count_records: Calling central's count_records.")
|
LOG.info("count_records: Calling central's count_records.")
|
||||||
msg = self.make_msg('count_records', criterion=criterion)
|
return self.client.call(context, 'count_records', criterion=criterion)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
# Sync Methods
|
# Sync Methods
|
||||||
def sync_domains(self, context):
|
def sync_domains(self, context):
|
||||||
LOG.info("sync_domains: Calling central's sync_domains.")
|
LOG.info("sync_domains: Calling central's sync_domains.")
|
||||||
msg = self.make_msg('sync_domains')
|
return self.client.call(context, 'sync_domains')
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def sync_domain(self, context, domain_id):
|
def sync_domain(self, context, domain_id):
|
||||||
LOG.info("sync_domain: Calling central's sync_domains.")
|
LOG.info("sync_domain: Calling central's sync_domains.")
|
||||||
msg = self.make_msg('sync_domain', domain_id=domain_id)
|
return self.client.call(context, 'sync_domain', domain_id=domain_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def sync_record(self, context, domain_id, recordset_id, record_id):
|
def sync_record(self, context, domain_id, recordset_id, record_id):
|
||||||
LOG.info("sync_record: Calling central's sync_record.")
|
LOG.info("sync_record: Calling central's sync_record.")
|
||||||
msg = self.make_msg('sync_record',
|
return self.client.call(context, 'sync_record',
|
||||||
domain_id=domain_id,
|
domain_id=domain_id,
|
||||||
recordset_id=recordset_id,
|
recordset_id=recordset_id,
|
||||||
record_id=record_id)
|
record_id=record_id)
|
||||||
|
|
||||||
return self.call(context, msg)
|
|
||||||
|
|
||||||
def list_floatingips(self, context):
|
def list_floatingips(self, context):
|
||||||
msg = self.make_msg('list_floatingips')
|
LOG.info("list_floatingips: Calling central's list_floatingips.")
|
||||||
return self.call(context, msg, version="3.1")
|
|
||||||
|
cctxt = self.client.prepare(version='3.1')
|
||||||
|
return cctxt.call(context, 'list_floatingips')
|
||||||
|
|
||||||
def get_floatingip(self, context, region, floatingip_id):
|
def get_floatingip(self, context, region, floatingip_id):
|
||||||
msg = self.make_msg('get_floatingip', region=region,
|
LOG.info("get_floatingip: Calling central's get_floatingip.")
|
||||||
floatingip_id=floatingip_id)
|
|
||||||
return self.call(context, msg, version="3.1")
|
cctxt = self.client.prepare(version='3.1')
|
||||||
|
return cctxt.call(context, 'get_floatingip', region=region,
|
||||||
|
floatingip_id=floatingip_id)
|
||||||
|
|
||||||
def update_floatingip(self, context, region, floatingip_id, values):
|
def update_floatingip(self, context, region, floatingip_id, values):
|
||||||
msg = self.make_msg('update_floatingip', region=region,
|
LOG.info("update_floatingip: Calling central's update_floatingip.")
|
||||||
floatingip_id=floatingip_id, values=values)
|
|
||||||
return self.call(context, msg)
|
cctxt = self.client.prepare(version='3.1')
|
||||||
|
return cctxt.call(context, 'update_floatingip', region=region,
|
||||||
|
floatingip_id=floatingip_id, values=values)
|
||||||
|
|
||||||
# Blacklisted Domain Methods
|
# Blacklisted Domain Methods
|
||||||
def create_blacklist(self, context, values):
|
def create_blacklist(self, context, values):
|
||||||
LOG.info("create_blacklist: Calling central's create_blacklist")
|
LOG.info("create_blacklist: Calling central's create_blacklist")
|
||||||
msg = self.make_msg('create_blacklist', values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'create_blacklist', values=values)
|
||||||
|
|
||||||
def get_blacklist(self, context, blacklist_id):
|
def get_blacklist(self, context, blacklist_id):
|
||||||
LOG.info("get_blacklist: Calling central's get_blacklist.")
|
LOG.info("get_blacklist: Calling central's get_blacklist.")
|
||||||
msg = self.make_msg('get_blacklist', blacklist_id=blacklist_id)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'get_blacklist', blacklist_id=blacklist_id)
|
||||||
|
|
||||||
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
|
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
LOG.info("find_blacklists: Calling central's find_blacklists.")
|
LOG.info("find_blacklists: Calling central's find_blacklists.")
|
||||||
msg = self.make_msg('find_blacklists', criterion=criterion,
|
|
||||||
marker=marker, limit=limit, sort_key=sort_key,
|
|
||||||
sort_dir=sort_dir)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'find_blacklists', criterion=criterion,
|
||||||
|
marker=marker, limit=limit, sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
def find_blacklist(self, context, criterion):
|
def find_blacklist(self, context, criterion):
|
||||||
LOG.info("find_blacklist: Calling central's find_blacklist.")
|
LOG.info("find_blacklist: Calling central's find_blacklist.")
|
||||||
msg = self.make_msg('find_blacklist', criterion=criterion)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'find_blacklist', criterion=criterion)
|
||||||
|
|
||||||
def update_blacklist(self, context, blacklist_id, values):
|
def update_blacklist(self, context, blacklist_id, values):
|
||||||
LOG.info("update_blacklist: Calling central's update_blacklist.")
|
LOG.info("update_blacklist: Calling central's update_blacklist.")
|
||||||
msg = self.make_msg('update_blacklist', blacklist_id=blacklist_id,
|
|
||||||
values=values)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'update_blacklist',
|
||||||
|
blacklist_id=blacklist_id, values=values)
|
||||||
|
|
||||||
def delete_blacklist(self, context, blacklist_id):
|
def delete_blacklist(self, context, blacklist_id):
|
||||||
LOG.info("delete_blacklist: Calling central's delete blacklist.")
|
LOG.info("delete_blacklist: Calling central's delete blacklist.")
|
||||||
msg = self.make_msg('delete_blacklist', blacklist_id=blacklist_id)
|
|
||||||
|
|
||||||
return self.call(context, msg, version='3.3')
|
cctxt = self.client.prepare(version='3.3')
|
||||||
|
return cctxt.call(context, 'delete_blacklist',
|
||||||
|
blacklist_id=blacklist_id)
|
||||||
|
|
|
@ -16,17 +16,19 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
import re
|
import re
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import service as rpc_service
|
|
||||||
from designate.openstack.common.notifier import proxy as notifier
|
|
||||||
from designate import backend
|
from designate import backend
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
|
from designate import network_api
|
||||||
from designate import policy
|
from designate import policy
|
||||||
from designate import quota
|
from designate import quota
|
||||||
|
from designate import service
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.storage import api as storage_api
|
from designate.storage import api as storage_api
|
||||||
from designate import network_api
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -44,20 +46,15 @@ def wrap_backend_call():
|
||||||
raise exceptions.Backend('Unknown backend failure: %r' % exc)
|
raise exceptions.Backend('Unknown backend failure: %r' % exc)
|
||||||
|
|
||||||
|
|
||||||
class Service(rpc_service.Service):
|
class Service(service.Service):
|
||||||
RPC_API_VERSION = '3.3'
|
RPC_API_VERSION = '3.3'
|
||||||
|
|
||||||
|
target = messaging.Target(version=RPC_API_VERSION)
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
backend_driver = cfg.CONF['service:central'].backend_driver
|
backend_driver = cfg.CONF['service:central'].backend_driver
|
||||||
self.backend = backend.get_backend(backend_driver, self)
|
self.backend = backend.get_backend(backend_driver, self)
|
||||||
|
|
||||||
kwargs.update(
|
|
||||||
host=cfg.CONF.host,
|
|
||||||
topic=cfg.CONF.central_topic,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.notifier = notifier.get_notifier('central')
|
|
||||||
|
|
||||||
policy.init_policy()
|
policy.init_policy()
|
||||||
|
|
||||||
super(Service, self).__init__(*args, **kwargs)
|
super(Service, self).__init__(*args, **kwargs)
|
||||||
|
@ -271,7 +268,7 @@ class Service(rpc_service.Service):
|
||||||
# Misc Methods
|
# Misc Methods
|
||||||
def get_absolute_limits(self, context):
|
def get_absolute_limits(self, context):
|
||||||
# NOTE(Kiall): Currently, we only have quota based limits..
|
# NOTE(Kiall): Currently, we only have quota based limits..
|
||||||
return self.quota.get_quotas(context, context.tenant_id)
|
return self.quota.get_quotas(context, context.tenant)
|
||||||
|
|
||||||
# Quota Methods
|
# Quota Methods
|
||||||
def get_quotas(self, context, tenant_id):
|
def get_quotas(self, context, tenant_id):
|
||||||
|
@ -474,7 +471,7 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
# Default to creating in the current users tenant
|
# Default to creating in the current users tenant
|
||||||
if 'tenant_id' not in values:
|
if 'tenant_id' not in values:
|
||||||
values['tenant_id'] = context.tenant_id
|
values['tenant_id'] = context.tenant
|
||||||
|
|
||||||
target = {
|
target = {
|
||||||
'tenant_id': values['tenant_id'],
|
'tenant_id': values['tenant_id'],
|
||||||
|
@ -552,14 +549,14 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
def find_domains(self, context, criterion=None, marker=None, limit=None,
|
def find_domains(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_domains', context, target)
|
policy.check('find_domains', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_domains(context, criterion, marker, limit,
|
return self.storage_api.find_domains(context, criterion, marker, limit,
|
||||||
sort_key, sort_dir)
|
sort_key, sort_dir)
|
||||||
|
|
||||||
def find_domain(self, context, criterion=None):
|
def find_domain(self, context, criterion=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_domain', context, target)
|
policy.check('find_domain', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_domain(context, criterion)
|
return self.storage_api.find_domain(context, criterion)
|
||||||
|
@ -711,14 +708,14 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
|
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_recordsets', context, target)
|
policy.check('find_recordsets', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_recordsets(context, criterion, marker,
|
return self.storage_api.find_recordsets(context, criterion, marker,
|
||||||
limit, sort_key, sort_dir)
|
limit, sort_key, sort_dir)
|
||||||
|
|
||||||
def find_recordset(self, context, criterion=None):
|
def find_recordset(self, context, criterion=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_recordset', context, target)
|
policy.check('find_recordset', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_recordset(context, criterion)
|
return self.storage_api.find_recordset(context, criterion)
|
||||||
|
@ -870,14 +867,14 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
def find_records(self, context, criterion=None, marker=None, limit=None,
|
def find_records(self, context, criterion=None, marker=None, limit=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_records', context, target)
|
policy.check('find_records', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_records(context, criterion, marker, limit,
|
return self.storage_api.find_records(context, criterion, marker, limit,
|
||||||
sort_key, sort_dir)
|
sort_key, sort_dir)
|
||||||
|
|
||||||
def find_record(self, context, criterion=None):
|
def find_record(self, context, criterion=None):
|
||||||
target = {'tenant_id': context.tenant_id}
|
target = {'tenant_id': context.tenant}
|
||||||
policy.check('find_record', context, target)
|
policy.check('find_record', context, target)
|
||||||
|
|
||||||
return self.storage_api.find_record(context, criterion)
|
return self.storage_api.find_record(context, criterion)
|
||||||
|
@ -1062,7 +1059,7 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
Returns a list of tuples with FloatingIPs and it's Record.
|
Returns a list of tuples with FloatingIPs and it's Record.
|
||||||
"""
|
"""
|
||||||
tenant_id = tenant_id or context.tenant_id
|
tenant_id = tenant_id or context.tenant
|
||||||
|
|
||||||
elevated_context = context.elevated()
|
elevated_context = context.elevated()
|
||||||
elevated_context.all_tenants = True
|
elevated_context.all_tenants = True
|
||||||
|
@ -1174,7 +1171,7 @@ class Service(rpc_service.Service):
|
||||||
def _get_floatingip(self, context, region, floatingip_id, fips):
|
def _get_floatingip(self, context, region, floatingip_id, fips):
|
||||||
if (region, floatingip_id) not in fips:
|
if (region, floatingip_id) not in fips:
|
||||||
msg = 'FloatingIP %s in %s is not associated for tenant "%s"' % \
|
msg = 'FloatingIP %s in %s is not associated for tenant "%s"' % \
|
||||||
(floatingip_id, region, context.tenant_id)
|
(floatingip_id, region, context.tenant)
|
||||||
raise exceptions.NotFound(msg)
|
raise exceptions.NotFound(msg)
|
||||||
return fips[region, floatingip_id]
|
return fips[region, floatingip_id]
|
||||||
|
|
||||||
|
@ -1292,7 +1289,7 @@ class Service(rpc_service.Service):
|
||||||
'managed_resource_id': floatingip_id,
|
'managed_resource_id': floatingip_id,
|
||||||
'managed_resource_region': region,
|
'managed_resource_region': region,
|
||||||
'managed_resource_type': 'ptr:floatingip',
|
'managed_resource_type': 'ptr:floatingip',
|
||||||
'managed_tenant_id': context.tenant_id
|
'managed_tenant_id': context.tenant
|
||||||
}
|
}
|
||||||
|
|
||||||
record = self.create_record(
|
record = self.create_record(
|
||||||
|
@ -1318,7 +1315,7 @@ class Service(rpc_service.Service):
|
||||||
|
|
||||||
criterion = {
|
criterion = {
|
||||||
'managed_resource_id': floatingip_id,
|
'managed_resource_id': floatingip_id,
|
||||||
'managed_tenant_id': context.tenant_id
|
'managed_tenant_id': context.tenant
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
import sys
|
import sys
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import service
|
from designate import service
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.agent import service as agent_service
|
from designate.agent import service as agent_service
|
||||||
|
|
||||||
|
@ -27,6 +27,8 @@ CONF.import_opt('workers', 'designate.agent', group='service:agent')
|
||||||
def main():
|
def main():
|
||||||
utils.read_config('designate', sys.argv)
|
utils.read_config('designate', sys.argv)
|
||||||
logging.setup('designate')
|
logging.setup('designate')
|
||||||
launcher = service.launch(agent_service.Service(),
|
|
||||||
CONF['service:agent'].workers)
|
server = agent_service.Service.create(
|
||||||
launcher.wait()
|
binary='designate-agent')
|
||||||
|
service.serve(server, workers=CONF['service:agent'].workers)
|
||||||
|
service.wait()
|
||||||
|
|
|
@ -17,6 +17,7 @@ import sys
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import service
|
from designate.openstack.common import service
|
||||||
|
from designate import rpc
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.api import service as api_service
|
from designate.api import service as api_service
|
||||||
|
|
||||||
|
@ -27,6 +28,9 @@ CONF.import_opt('workers', 'designate.api', group='service:api')
|
||||||
def main():
|
def main():
|
||||||
utils.read_config('designate', sys.argv)
|
utils.read_config('designate', sys.argv)
|
||||||
logging.setup('designate')
|
logging.setup('designate')
|
||||||
|
|
||||||
|
rpc.init(CONF)
|
||||||
|
|
||||||
launcher = service.launch(api_service.Service(),
|
launcher = service.launch(api_service.Service(),
|
||||||
CONF['service:api'].workers)
|
CONF['service:api'].workers)
|
||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
|
@ -16,9 +16,9 @@
|
||||||
import sys
|
import sys
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import service
|
from designate import service
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.import_opt('workers', 'designate.central', group='service:central')
|
CONF.import_opt('workers', 'designate.central', group='service:central')
|
||||||
|
@ -27,6 +27,8 @@ CONF.import_opt('workers', 'designate.central', group='service:central')
|
||||||
def main():
|
def main():
|
||||||
utils.read_config('designate', sys.argv)
|
utils.read_config('designate', sys.argv)
|
||||||
logging.setup('designate')
|
logging.setup('designate')
|
||||||
launcher = service.launch(central_service.Service(),
|
|
||||||
CONF['service:central'].workers)
|
server = central.Service.create(binary='designate-central',
|
||||||
launcher.wait()
|
service_name='central')
|
||||||
|
service.serve(server, workers=CONF['service:central'].workers)
|
||||||
|
service.wait()
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
import sys
|
import sys
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import service
|
from designate import service
|
||||||
from designate import utils
|
from designate import utils
|
||||||
from designate.sink import service as sink_service
|
from designate.sink import service as sink_service
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ CONF.import_opt('workers', 'designate.sink', group='service:sink')
|
||||||
def main():
|
def main():
|
||||||
utils.read_config('designate', sys.argv)
|
utils.read_config('designate', sys.argv)
|
||||||
logging.setup('designate')
|
logging.setup('designate')
|
||||||
launcher = service.launch(sink_service.Service(),
|
|
||||||
CONF['service:sink'].workers)
|
server = sink_service.Service()
|
||||||
launcher.wait()
|
service.serve(server, workers=CONF['service:sink'].workers)
|
||||||
|
service.wait()
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
import itertools
|
import itertools
|
||||||
from designate.openstack.common import context
|
from designate.openstack.common import context
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -25,7 +26,10 @@ class DesignateContext(context.RequestContext):
|
||||||
user_domain=None, project_domain=None, is_admin=False,
|
user_domain=None, project_domain=None, is_admin=False,
|
||||||
read_only=False, show_deleted=False, request_id=None,
|
read_only=False, show_deleted=False, request_id=None,
|
||||||
instance_uuid=None, roles=[], service_catalog=None,
|
instance_uuid=None, roles=[], service_catalog=None,
|
||||||
all_tenants=False):
|
all_tenants=False, **kwargs):
|
||||||
|
if kwargs:
|
||||||
|
LOG.warn(_('Arguments dropped when creating context: %s') %
|
||||||
|
str(kwargs))
|
||||||
super(DesignateContext, self).__init__(
|
super(DesignateContext, self).__init__(
|
||||||
auth_token=auth_token,
|
auth_token=auth_token,
|
||||||
user=user,
|
user=user,
|
||||||
|
@ -46,31 +50,28 @@ class DesignateContext(context.RequestContext):
|
||||||
def deepcopy(self):
|
def deepcopy(self):
|
||||||
d = self.to_dict()
|
d = self.to_dict()
|
||||||
|
|
||||||
# Remove the user and tenant id fields, this map to user and tenant
|
|
||||||
d.pop('user_id')
|
|
||||||
d.pop('tenant_id')
|
|
||||||
d.pop('user_identity')
|
|
||||||
|
|
||||||
return self.from_dict(d)
|
return self.from_dict(d)
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
d = super(DesignateContext, self).to_dict()
|
d = super(DesignateContext, self).to_dict()
|
||||||
|
|
||||||
|
user_idt = (
|
||||||
|
self.user_idt_format.format(user=self.user or '-',
|
||||||
|
tenant=self.tenant or '-',
|
||||||
|
domain=self.domain or '-',
|
||||||
|
user_domain=self.user_domain or '-',
|
||||||
|
p_domain=self.project_domain or '-'))
|
||||||
d.update({
|
d.update({
|
||||||
'user_id': self.user_id,
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'roles': self.roles,
|
'roles': self.roles,
|
||||||
'service_catalog': self.service_catalog,
|
'service_catalog': self.service_catalog,
|
||||||
'all_tenants': self.all_tenants,
|
'all_tenants': self.all_tenants,
|
||||||
|
'user_identity': user_idt
|
||||||
})
|
})
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, values):
|
def from_dict(cls, values):
|
||||||
if 'user_identity' in values:
|
|
||||||
values.pop('user_identity')
|
|
||||||
|
|
||||||
return cls(**values)
|
return cls(**values)
|
||||||
|
|
||||||
def elevated(self, show_deleted=None):
|
def elevated(self, show_deleted=None):
|
||||||
|
@ -86,22 +87,6 @@ class DesignateContext(context.RequestContext):
|
||||||
|
|
||||||
return context
|
return context
|
||||||
|
|
||||||
@property
|
|
||||||
def user_id(self):
|
|
||||||
return self.user
|
|
||||||
|
|
||||||
@user_id.setter
|
|
||||||
def user_id(self, value):
|
|
||||||
self.user = value
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tenant_id(self):
|
|
||||||
return self.tenant
|
|
||||||
|
|
||||||
@tenant_id.setter
|
|
||||||
def tenant_id(self, value):
|
|
||||||
self.tenant = value
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_admin_context(cls, **kwargs):
|
def get_admin_context(cls, **kwargs):
|
||||||
# TODO(kiall): Remove Me
|
# TODO(kiall): Remove Me
|
||||||
|
|
|
@ -14,7 +14,11 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
import csv
|
import csv
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
|
from designate import rpc
|
||||||
from designate.central import rpcapi as central_rpcapi
|
from designate.central import rpcapi as central_rpcapi
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.manage import base
|
from designate.manage import base
|
||||||
|
@ -49,8 +53,9 @@ class TLDCommands(base.Commands):
|
||||||
InvalidLine - This occurs if the line contains more than 2 fields.
|
InvalidLine - This occurs if the line contains more than 2 fields.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,):
|
def __init__(self):
|
||||||
super(TLDCommands, self).__init__()
|
super(TLDCommands, self).__init__()
|
||||||
|
rpc.init(cfg.CONF)
|
||||||
self.central_api = central_rpcapi.CentralAPI()
|
self.central_api = central_rpcapi.CentralAPI()
|
||||||
|
|
||||||
# The dictionary function __str__() does not list the fields in any
|
# The dictionary function __str__() does not list the fields in any
|
||||||
|
|
|
@ -75,7 +75,7 @@ class FakeNetworkAPI(NetworkAPI):
|
||||||
for tenant_id, allocated in ALLOCATIONS.items():
|
for tenant_id, allocated in ALLOCATIONS.items():
|
||||||
data.extend(allocated.items())
|
data.extend(allocated.items())
|
||||||
else:
|
else:
|
||||||
data = ALLOCATIONS.get(context.tenant_id, {}).items()
|
data = ALLOCATIONS.get(context.tenant, {}).items()
|
||||||
|
|
||||||
formatted = [_format_floatingip(k, v) for k, v in data]
|
formatted = [_format_floatingip(k, v) for k, v in data]
|
||||||
LOG.debug('Returning %i FloatingIPs: %s', len(formatted), formatted)
|
LOG.debug('Returning %i FloatingIPs: %s', len(formatted), formatted)
|
||||||
|
|
|
@ -139,7 +139,7 @@ class NeutronNetworkAPI(NetworkAPI):
|
||||||
|
|
||||||
for endpoint, region in endpoints:
|
for endpoint, region in endpoints:
|
||||||
tg.add_thread(_call, endpoint, region,
|
tg.add_thread(_call, endpoint, region,
|
||||||
tenant_id=context.tenant_id)
|
tenant_id=context.tenant)
|
||||||
tg.wait()
|
tg.wait()
|
||||||
|
|
||||||
# NOTE: Sadly tg code doesn't give us a good way to handle failures.
|
# NOTE: Sadly tg code doesn't give us a good way to handle failures.
|
||||||
|
|
|
@ -24,7 +24,6 @@ from designate.plugin import ExtensionPlugin
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
central_api = central_rpcapi.CentralAPI()
|
|
||||||
|
|
||||||
|
|
||||||
def get_ip_data(addr_dict):
|
def get_ip_data(addr_dict):
|
||||||
|
@ -49,6 +48,10 @@ class NotificationHandler(ExtensionPlugin):
|
||||||
__plugin_ns__ = 'designate.notification.handler'
|
__plugin_ns__ = 'designate.notification.handler'
|
||||||
__plugin_type__ = 'handler'
|
__plugin_type__ = 'handler'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
super(NotificationHandler, self).__init__(*args, **kw)
|
||||||
|
self.central_api = central_rpcapi.CentralAPI()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_exchange_topics(self):
|
def get_exchange_topics(self):
|
||||||
"""
|
"""
|
||||||
|
@ -71,18 +74,18 @@ class NotificationHandler(ExtensionPlugin):
|
||||||
Return the domain for this context
|
Return the domain for this context
|
||||||
"""
|
"""
|
||||||
context = DesignateContext.get_admin_context(all_tenants=True)
|
context = DesignateContext.get_admin_context(all_tenants=True)
|
||||||
return central_api.get_domain(context, domain_id)
|
return self.central_api.get_domain(context, domain_id)
|
||||||
|
|
||||||
def _find_or_create_recordset(self, context, domain_id, name, type,
|
def _find_or_create_recordset(self, context, domain_id, name, type,
|
||||||
ttl=None):
|
ttl=None):
|
||||||
try:
|
try:
|
||||||
recordset = central_api.find_recordset(context, {
|
recordset = self.central_api.find_recordset(context, {
|
||||||
'domain_id': domain_id,
|
'domain_id': domain_id,
|
||||||
'name': name,
|
'name': name,
|
||||||
'type': type,
|
'type': type,
|
||||||
})
|
})
|
||||||
except exceptions.RecordSetNotFound:
|
except exceptions.RecordSetNotFound:
|
||||||
recordset = central_api.create_recordset(context, domain_id, {
|
recordset = self.central_api.create_recordset(context, domain_id, {
|
||||||
'name': name,
|
'name': name,
|
||||||
'type': type,
|
'type': type,
|
||||||
'ttl': ttl,
|
'ttl': ttl,
|
||||||
|
@ -144,8 +147,9 @@ class BaseAddressHandler(NotificationHandler):
|
||||||
|
|
||||||
LOG.debug('Creating record in %s / %s with values %r',
|
LOG.debug('Creating record in %s / %s with values %r',
|
||||||
domain['id'], recordset['id'], record_values)
|
domain['id'], recordset['id'], record_values)
|
||||||
central_api.create_record(context, domain['id'], recordset['id'],
|
self.central_api.create_record(context, domain['id'],
|
||||||
record_values)
|
recordset['id'],
|
||||||
|
record_values)
|
||||||
|
|
||||||
def _delete(self, managed=True, resource_id=None, resource_type='instance',
|
def _delete(self, managed=True, resource_id=None, resource_type='instance',
|
||||||
criterion={}):
|
criterion={}):
|
||||||
|
@ -167,10 +171,12 @@ class BaseAddressHandler(NotificationHandler):
|
||||||
'managed_resource_type': resource_type
|
'managed_resource_type': resource_type
|
||||||
})
|
})
|
||||||
|
|
||||||
records = central_api.find_records(context, criterion)
|
records = self.central_api.find_records(context, criterion)
|
||||||
|
|
||||||
for record in records:
|
for record in records:
|
||||||
LOG.debug('Deleting record %s' % record['id'])
|
LOG.debug('Deleting record %s' % record['id'])
|
||||||
|
|
||||||
central_api.delete_record(context, cfg.CONF[self.name].domain_id,
|
self.central_api.delete_record(context,
|
||||||
record['recordset_id'], record['id'])
|
cfg.CONF[self.name].domain_id,
|
||||||
|
record['recordset_id'],
|
||||||
|
record['id'])
|
||||||
|
|
|
@ -39,8 +39,7 @@ class NeutronFloatingHandler(BaseAddressHandler):
|
||||||
def get_exchange_topics(self):
|
def get_exchange_topics(self):
|
||||||
exchange = cfg.CONF[self.name].control_exchange
|
exchange = cfg.CONF[self.name].control_exchange
|
||||||
|
|
||||||
topics = [topic + ".info"
|
topics = [topic for topic in cfg.CONF[self.name].notification_topics]
|
||||||
for topic in cfg.CONF[self.name].notification_topics]
|
|
||||||
|
|
||||||
return (exchange, topics)
|
return (exchange, topics)
|
||||||
|
|
||||||
|
|
|
@ -39,8 +39,7 @@ class NovaFixedHandler(BaseAddressHandler):
|
||||||
def get_exchange_topics(self):
|
def get_exchange_topics(self):
|
||||||
exchange = cfg.CONF[self.name].control_exchange
|
exchange = cfg.CONF[self.name].control_exchange
|
||||||
|
|
||||||
topics = [topic + ".info"
|
topics = [topic for topic in cfg.CONF[self.name].notification_topics]
|
||||||
for topic in cfg.CONF[self.name].notification_topics]
|
|
||||||
|
|
||||||
return (exchange, topics)
|
return (exchange, topics)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.notifier import proxy as notifier
|
from designate import rpc
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -30,13 +30,9 @@ notify_opts = [
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(notify_opts)
|
CONF.register_opts(notify_opts)
|
||||||
CONF.import_opt('default_notification_level',
|
|
||||||
'designate.openstack.common.notifier.api')
|
|
||||||
CONF.import_opt('default_publisher_id',
|
|
||||||
'designate.openstack.common.notifier.api')
|
|
||||||
|
|
||||||
|
|
||||||
def send_api_fault(url, status, exception):
|
def send_api_fault(context, url, status, exception):
|
||||||
"""Send an api.fault notification."""
|
"""Send an api.fault notification."""
|
||||||
|
|
||||||
if not CONF.notify_api_faults:
|
if not CONF.notify_api_faults:
|
||||||
|
@ -44,4 +40,4 @@ def send_api_fault(url, status, exception):
|
||||||
|
|
||||||
payload = {'url': url, 'exception': str(exception), 'status': status}
|
payload = {'url': url, 'exception': str(exception), 'status': status}
|
||||||
|
|
||||||
notifier.get_notifier('api').error(None, 'dns.api.fault', payload)
|
rpc.get_notifier('api').error(context, 'dns.api.fault', payload)
|
||||||
|
|
|
@ -1,173 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import context
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import jsonutils
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notifier_opts = [
|
|
||||||
cfg.MultiStrOpt('notification_driver',
|
|
||||||
default=[],
|
|
||||||
help='Driver or drivers to handle sending notifications'),
|
|
||||||
cfg.StrOpt('default_notification_level',
|
|
||||||
default='INFO',
|
|
||||||
help='Default notification level for outgoing notifications'),
|
|
||||||
cfg.StrOpt('default_publisher_id',
|
|
||||||
default=None,
|
|
||||||
help='Default publisher_id for outgoing notifications'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(notifier_opts)
|
|
||||||
|
|
||||||
WARN = 'WARN'
|
|
||||||
INFO = 'INFO'
|
|
||||||
ERROR = 'ERROR'
|
|
||||||
CRITICAL = 'CRITICAL'
|
|
||||||
DEBUG = 'DEBUG'
|
|
||||||
|
|
||||||
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
|
||||||
|
|
||||||
|
|
||||||
class BadPriorityException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def notify_decorator(name, fn):
|
|
||||||
"""Decorator for notify which is used from utils.monkey_patch().
|
|
||||||
|
|
||||||
:param name: name of the function
|
|
||||||
:param function: - object of the function
|
|
||||||
:returns: function -- decorated function
|
|
||||||
|
|
||||||
"""
|
|
||||||
def wrapped_func(*args, **kwarg):
|
|
||||||
body = {}
|
|
||||||
body['args'] = []
|
|
||||||
body['kwarg'] = {}
|
|
||||||
for arg in args:
|
|
||||||
body['args'].append(arg)
|
|
||||||
for key in kwarg:
|
|
||||||
body['kwarg'][key] = kwarg[key]
|
|
||||||
|
|
||||||
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
|
|
||||||
notify(ctxt,
|
|
||||||
CONF.default_publisher_id or socket.gethostname(),
|
|
||||||
name,
|
|
||||||
CONF.default_notification_level,
|
|
||||||
body)
|
|
||||||
return fn(*args, **kwarg)
|
|
||||||
return wrapped_func
|
|
||||||
|
|
||||||
|
|
||||||
def publisher_id(service, host=None):
|
|
||||||
if not host:
|
|
||||||
try:
|
|
||||||
host = CONF.host
|
|
||||||
except AttributeError:
|
|
||||||
host = CONF.default_publisher_id or socket.gethostname()
|
|
||||||
return "%s.%s" % (service, host)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, publisher_id, event_type, priority, payload):
|
|
||||||
"""Sends a notification using the specified driver
|
|
||||||
|
|
||||||
:param publisher_id: the source worker_type.host of the message
|
|
||||||
:param event_type: the literal type of event (ex. Instance Creation)
|
|
||||||
:param priority: patterned after the enumeration of Python logging
|
|
||||||
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
|
||||||
:param payload: A python dictionary of attributes
|
|
||||||
|
|
||||||
Outgoing message format includes the above parameters, and appends the
|
|
||||||
following:
|
|
||||||
|
|
||||||
message_id
|
|
||||||
a UUID representing the id for this notification
|
|
||||||
|
|
||||||
timestamp
|
|
||||||
the GMT timestamp the notification was sent at
|
|
||||||
|
|
||||||
The composite message will be constructed as a dictionary of the above
|
|
||||||
attributes, which will then be sent via the transport mechanism defined
|
|
||||||
by the driver.
|
|
||||||
|
|
||||||
Message example::
|
|
||||||
|
|
||||||
{'message_id': str(uuid.uuid4()),
|
|
||||||
'publisher_id': 'compute.host1',
|
|
||||||
'timestamp': timeutils.utcnow(),
|
|
||||||
'priority': 'WARN',
|
|
||||||
'event_type': 'compute.create_instance',
|
|
||||||
'payload': {'instance_id': 12, ... }}
|
|
||||||
|
|
||||||
"""
|
|
||||||
if priority not in log_levels:
|
|
||||||
raise BadPriorityException(
|
|
||||||
_('%s not in valid priorities') % priority)
|
|
||||||
|
|
||||||
# Ensure everything is JSON serializable.
|
|
||||||
payload = jsonutils.to_primitive(payload, convert_instances=True)
|
|
||||||
|
|
||||||
msg = dict(message_id=str(uuid.uuid4()),
|
|
||||||
publisher_id=publisher_id,
|
|
||||||
event_type=event_type,
|
|
||||||
priority=priority,
|
|
||||||
payload=payload,
|
|
||||||
timestamp=str(timeutils.utcnow()))
|
|
||||||
|
|
||||||
for driver in _get_drivers():
|
|
||||||
try:
|
|
||||||
driver.notify(context, msg)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_("Problem '%(e)s' attempting to "
|
|
||||||
"send to notification system. "
|
|
||||||
"Payload=%(payload)s")
|
|
||||||
% dict(e=e, payload=payload))
|
|
||||||
|
|
||||||
|
|
||||||
_drivers = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_drivers():
|
|
||||||
"""Instantiate, cache, and return drivers based on the CONF."""
|
|
||||||
global _drivers
|
|
||||||
if _drivers is None:
|
|
||||||
_drivers = {}
|
|
||||||
for notification_driver in CONF.notification_driver:
|
|
||||||
try:
|
|
||||||
driver = importutils.import_module(notification_driver)
|
|
||||||
_drivers[notification_driver] = driver
|
|
||||||
except ImportError:
|
|
||||||
LOG.exception(_("Failed to load notifier %s. "
|
|
||||||
"These notifications will not be sent.") %
|
|
||||||
notification_driver)
|
|
||||||
return _drivers.values()
|
|
||||||
|
|
||||||
|
|
||||||
def _reset_drivers():
|
|
||||||
"""Used by unit tests to reset the drivers."""
|
|
||||||
global _drivers
|
|
||||||
_drivers = None
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import jsonutils
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Notifies the recipient of the desired event given the model.
|
|
||||||
|
|
||||||
Log notifications using OpenStack's default logging system.
|
|
||||||
"""
|
|
||||||
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
logger = logging.getLogger(
|
|
||||||
'designate.openstack.common.notification.%s' %
|
|
||||||
message['event_type'])
|
|
||||||
getattr(logger, priority)(jsonutils.dumps(message))
|
|
|
@ -1,19 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Notifies the recipient of the desired event given the model."""
|
|
||||||
pass
|
|
|
@ -1,77 +0,0 @@
|
||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A temporary helper which emulates designate.messaging.Notifier.
|
|
||||||
|
|
||||||
This helper method allows us to do the tedious porting to the new Notifier API
|
|
||||||
as a standalone commit so that the commit which switches us to designate.messaging
|
|
||||||
is smaller and easier to review. This file will be removed as part of that
|
|
||||||
commit.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common.notifier import api as notifier_api
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class Notifier(object):
|
|
||||||
|
|
||||||
def __init__(self, publisher_id):
|
|
||||||
super(Notifier, self).__init__()
|
|
||||||
self.publisher_id = publisher_id
|
|
||||||
|
|
||||||
_marker = object()
|
|
||||||
|
|
||||||
def prepare(self, publisher_id=_marker):
|
|
||||||
ret = self.__class__(self.publisher_id)
|
|
||||||
if publisher_id is not self._marker:
|
|
||||||
ret.publisher_id = publisher_id
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def _notify(self, ctxt, event_type, payload, priority):
|
|
||||||
notifier_api.notify(ctxt,
|
|
||||||
self.publisher_id,
|
|
||||||
event_type,
|
|
||||||
priority,
|
|
||||||
payload)
|
|
||||||
|
|
||||||
def audit(self, ctxt, event_type, payload):
|
|
||||||
# No audit in old notifier.
|
|
||||||
self._notify(ctxt, event_type, payload, 'INFO')
|
|
||||||
|
|
||||||
def debug(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'DEBUG')
|
|
||||||
|
|
||||||
def info(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'INFO')
|
|
||||||
|
|
||||||
def warn(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'WARN')
|
|
||||||
|
|
||||||
warning = warn
|
|
||||||
|
|
||||||
def error(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'ERROR')
|
|
||||||
|
|
||||||
def critical(self, ctxt, event_type, payload):
|
|
||||||
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
|
||||||
|
|
||||||
|
|
||||||
def get_notifier(service=None, host=None, publisher_id=None):
|
|
||||||
if not publisher_id:
|
|
||||||
publisher_id = "%s.%s" % (service, host or CONF.host)
|
|
||||||
return Notifier(publisher_id)
|
|
|
@ -1,47 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import context as req_context
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import rpc
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
|
||||||
'notification_topics', default=['notifications', ],
|
|
||||||
help='AMQP topic used for OpenStack notifications')
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opt(notification_topic_opt)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Sends a notification via RPC."""
|
|
||||||
if not context:
|
|
||||||
context = req_context.get_admin_context()
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
for topic in CONF.notification_topics:
|
|
||||||
topic = '%s.%s' % (topic, priority)
|
|
||||||
try:
|
|
||||||
rpc.notify(context, topic, message)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
|
||||||
"Payload=%(message)s"),
|
|
||||||
{"topic": topic, "message": message})
|
|
|
@ -1,53 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
'''messaging based notification driver, with message envelopes'''
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import context as req_context
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import rpc
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
notification_topic_opt = cfg.ListOpt(
|
|
||||||
'topics', default=['notifications', ],
|
|
||||||
help='AMQP topic(s) used for OpenStack notifications')
|
|
||||||
|
|
||||||
opt_group = cfg.OptGroup(name='rpc_notifier2',
|
|
||||||
title='Options for rpc_notifier2')
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_group(opt_group)
|
|
||||||
CONF.register_opt(notification_topic_opt, opt_group)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, message):
|
|
||||||
"""Sends a notification via RPC."""
|
|
||||||
if not context:
|
|
||||||
context = req_context.get_admin_context()
|
|
||||||
priority = message.get('priority',
|
|
||||||
CONF.default_notification_level)
|
|
||||||
priority = priority.lower()
|
|
||||||
for topic in CONF.rpc_notifier2.topics:
|
|
||||||
topic = '%s.%s' % (topic, priority)
|
|
||||||
try:
|
|
||||||
rpc.notify(context, topic, message, envelope=True)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_("Could not send notification to %(topic)s. "
|
|
||||||
"Payload=%(message)s"),
|
|
||||||
{"topic": topic, "message": message})
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
NOTIFICATIONS = []
|
|
||||||
|
|
||||||
|
|
||||||
def notify(_context, message):
|
|
||||||
"""Test notifier, stores notifications in memory for unittests."""
|
|
||||||
NOTIFICATIONS.append(message)
|
|
|
@ -1,275 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A remote procedure call (rpc) abstraction.
|
|
||||||
|
|
||||||
For some wrappers that add message versioning to rpc, see:
|
|
||||||
rpc.dispatcher
|
|
||||||
rpc.proxy
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
rpc_opts = [
|
|
||||||
cfg.StrOpt('rpc_backend',
|
|
||||||
default='%s.impl_kombu' % __package__,
|
|
||||||
help="The messaging module to use, defaults to kombu."),
|
|
||||||
cfg.IntOpt('rpc_thread_pool_size',
|
|
||||||
default=64,
|
|
||||||
help='Size of RPC thread pool'),
|
|
||||||
cfg.IntOpt('rpc_conn_pool_size',
|
|
||||||
default=30,
|
|
||||||
help='Size of RPC connection pool'),
|
|
||||||
cfg.IntOpt('rpc_response_timeout',
|
|
||||||
default=60,
|
|
||||||
help='Seconds to wait for a response from call or multicall'),
|
|
||||||
cfg.IntOpt('rpc_cast_timeout',
|
|
||||||
default=30,
|
|
||||||
help='Seconds to wait before a cast expires (TTL). '
|
|
||||||
'Only supported by impl_zmq.'),
|
|
||||||
cfg.ListOpt('allowed_rpc_exception_modules',
|
|
||||||
default=['nova.exception',
|
|
||||||
'cinder.exception',
|
|
||||||
'exceptions',
|
|
||||||
],
|
|
||||||
help='Modules of exceptions that are permitted to be recreated'
|
|
||||||
' upon receiving exception data from an rpc call.'),
|
|
||||||
cfg.BoolOpt('fake_rabbit',
|
|
||||||
default=False,
|
|
||||||
help='If passed, use a fake RabbitMQ provider'),
|
|
||||||
cfg.StrOpt('control_exchange',
|
|
||||||
default='openstack',
|
|
||||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(rpc_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(control_exchange):
|
|
||||||
cfg.set_defaults(rpc_opts,
|
|
||||||
control_exchange=control_exchange)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(new=True):
|
|
||||||
"""Create a connection to the message bus used for rpc.
|
|
||||||
|
|
||||||
For some example usage of creating a connection and some consumers on that
|
|
||||||
connection, see nova.service.
|
|
||||||
|
|
||||||
:param new: Whether or not to create a new connection. A new connection
|
|
||||||
will be created by default. If new is False, the
|
|
||||||
implementation is free to return an existing connection from a
|
|
||||||
pool.
|
|
||||||
|
|
||||||
:returns: An instance of openstack.common.rpc.common.Connection
|
|
||||||
"""
|
|
||||||
return _get_impl().create_connection(CONF, new=new)
|
|
||||||
|
|
||||||
|
|
||||||
def call(context, topic, msg, timeout=None):
|
|
||||||
"""Invoke a remote method that returns something.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
|
||||||
If set, this overrides the rpc_response_timeout option.
|
|
||||||
|
|
||||||
:returns: A dict from the remote method.
|
|
||||||
|
|
||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
|
||||||
is not received before the timeout is reached.
|
|
||||||
"""
|
|
||||||
return _get_impl().call(CONF, context, topic, msg, timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def cast(context, topic, msg):
|
|
||||||
"""Invoke a remote method that does not return anything.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cast(CONF, context, topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(context, topic, msg):
|
|
||||||
"""Broadcast a remote method invocation with no return.
|
|
||||||
|
|
||||||
This method will get invoked on all consumers that were set up with this
|
|
||||||
topic name and fanout=True.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=True.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().fanout_cast(CONF, context, topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(context, topic, msg, timeout=None):
|
|
||||||
"""Invoke a remote method and get back an iterator.
|
|
||||||
|
|
||||||
In this case, the remote method will be returning multiple values in
|
|
||||||
separate messages, so the return values can be processed as the come in via
|
|
||||||
an iterator.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the rpc message to. This correlates to the
|
|
||||||
topic argument of
|
|
||||||
openstack.common.rpc.common.Connection.create_consumer()
|
|
||||||
and only applies when the consumer was created with
|
|
||||||
fanout=False.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
:param timeout: int, number of seconds to use for a response timeout.
|
|
||||||
If set, this overrides the rpc_response_timeout option.
|
|
||||||
|
|
||||||
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
|
|
||||||
an index that starts at 0 and increases by one for each value
|
|
||||||
returned and X is the Nth value that was returned by the remote
|
|
||||||
method.
|
|
||||||
|
|
||||||
:raises: openstack.common.rpc.common.Timeout if a complete response
|
|
||||||
is not received before the timeout is reached.
|
|
||||||
"""
|
|
||||||
return _get_impl().multicall(CONF, context, topic, msg, timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(context, topic, msg, envelope=False):
|
|
||||||
"""Send notification event.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict of content of event.
|
|
||||||
:param envelope: Set to True to enable message envelope for notifications.
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
"""Clean up resources in use by implementation.
|
|
||||||
|
|
||||||
Clean up any resources that have been allocated by the RPC implementation.
|
|
||||||
This is typically open connections to a messaging service. This function
|
|
||||||
would get called before an application using this API exits to allow
|
|
||||||
connections to get torn down cleanly.
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cleanup()
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(context, server_params, topic, msg):
|
|
||||||
"""Invoke a remote method that does not return anything.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param server_params: Connection information
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().cast_to_server(CONF, context, server_params, topic,
|
|
||||||
msg)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(context, server_params, topic, msg):
|
|
||||||
"""Broadcast to a remote method invocation with no return.
|
|
||||||
|
|
||||||
:param context: Information that identifies the user that has made this
|
|
||||||
request.
|
|
||||||
:param server_params: Connection information
|
|
||||||
:param topic: The topic to send the notification to.
|
|
||||||
:param msg: This is a dict in the form { "method" : "method_to_invoke",
|
|
||||||
"args" : dict_of_kwargs }
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
|
|
||||||
topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def queue_get_for(context, topic, host):
|
|
||||||
"""Get a queue name for a given topic + host.
|
|
||||||
|
|
||||||
This function only works if this naming convention is followed on the
|
|
||||||
consumer side, as well. For example, in nova, every instance of the
|
|
||||||
nova-foo service calls create_consumer() for two topics:
|
|
||||||
|
|
||||||
foo
|
|
||||||
foo.<host>
|
|
||||||
|
|
||||||
Messages sent to the 'foo' topic are distributed to exactly one instance of
|
|
||||||
the nova-foo service. The services are chosen in a round-robin fashion.
|
|
||||||
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
|
|
||||||
<host>.
|
|
||||||
"""
|
|
||||||
return '%s.%s' % (topic, host) if host else topic
|
|
||||||
|
|
||||||
|
|
||||||
_RPCIMPL = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_impl():
|
|
||||||
"""Delay import of rpc_backend until configuration is loaded."""
|
|
||||||
global _RPCIMPL
|
|
||||||
if _RPCIMPL is None:
|
|
||||||
try:
|
|
||||||
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
|
|
||||||
except ImportError:
|
|
||||||
# For backwards compatibility with older nova config.
|
|
||||||
impl = CONF.rpc_backend.replace('nova.rpc',
|
|
||||||
'nova.openstack.common.rpc')
|
|
||||||
_RPCIMPL = importutils.import_module(impl)
|
|
||||||
return _RPCIMPL
|
|
|
@ -1,637 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Shared code between AMQP based openstack.common.rpc implementations.
|
|
||||||
|
|
||||||
The code in this module is shared between the rpc implementations based on
|
|
||||||
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
|
|
||||||
uses AMQP, but is deprecated and predates this code.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import inspect
|
|
||||||
import sys
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from eventlet import greenpool
|
|
||||||
from eventlet import pools
|
|
||||||
from eventlet import queue
|
|
||||||
from eventlet import semaphore
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import local
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
|
|
||||||
amqp_opts = [
|
|
||||||
cfg.BoolOpt('amqp_durable_queues',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='rabbit_durable_queues',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Use durable queues in amqp.'),
|
|
||||||
cfg.BoolOpt('amqp_auto_delete',
|
|
||||||
default=False,
|
|
||||||
help='Auto-delete queues in amqp.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(amqp_opts)
|
|
||||||
|
|
||||||
UNIQUE_ID = '_unique_id'
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Pool(pools.Pool):
|
|
||||||
"""Class that implements a Pool of Connections."""
|
|
||||||
def __init__(self, conf, connection_cls, *args, **kwargs):
|
|
||||||
self.connection_cls = connection_cls
|
|
||||||
self.conf = conf
|
|
||||||
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
|
|
||||||
kwargs.setdefault("order_as_stack", True)
|
|
||||||
super(Pool, self).__init__(*args, **kwargs)
|
|
||||||
self.reply_proxy = None
|
|
||||||
|
|
||||||
# TODO(comstud): Timeout connections not used in a while
|
|
||||||
def create(self):
|
|
||||||
LOG.debug(_('Pool creating new connection'))
|
|
||||||
return self.connection_cls(self.conf)
|
|
||||||
|
|
||||||
def empty(self):
|
|
||||||
while self.free_items:
|
|
||||||
self.get().close()
|
|
||||||
# Force a new connection pool to be created.
|
|
||||||
# Note that this was added due to failing unit test cases. The issue
|
|
||||||
# is the above "while loop" gets all the cached connections from the
|
|
||||||
# pool and closes them, but never returns them to the pool, a pool
|
|
||||||
# leak. The unit tests hang waiting for an item to be returned to the
|
|
||||||
# pool. The unit tests get here via the tearDown() method. In the run
|
|
||||||
# time code, it gets here via cleanup() and only appears in service.py
|
|
||||||
# just before doing a sys.exit(), so cleanup() only happens once and
|
|
||||||
# the leakage is not a problem.
|
|
||||||
self.connection_cls.pool = None
|
|
||||||
|
|
||||||
|
|
||||||
_pool_create_sem = semaphore.Semaphore()
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection_pool(conf, connection_cls):
|
|
||||||
with _pool_create_sem:
|
|
||||||
# Make sure only one thread tries to create the connection pool.
|
|
||||||
if not connection_cls.pool:
|
|
||||||
connection_cls.pool = Pool(conf, connection_cls)
|
|
||||||
return connection_cls.pool
|
|
||||||
|
|
||||||
|
|
||||||
class ConnectionContext(rpc_common.Connection):
|
|
||||||
"""The class that is actually returned to the create_connection() caller.
|
|
||||||
|
|
||||||
This is essentially a wrapper around Connection that supports 'with'.
|
|
||||||
It can also return a new Connection, or one from a pool.
|
|
||||||
|
|
||||||
The function will also catch when an instance of this class is to be
|
|
||||||
deleted. With that we can return Connections to the pool on exceptions
|
|
||||||
and so forth without making the caller be responsible for catching them.
|
|
||||||
If possible the function makes sure to return a connection to the pool.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
|
|
||||||
"""Create a new connection, or get one from the pool."""
|
|
||||||
self.connection = None
|
|
||||||
self.conf = conf
|
|
||||||
self.connection_pool = connection_pool
|
|
||||||
if pooled:
|
|
||||||
self.connection = connection_pool.get()
|
|
||||||
else:
|
|
||||||
self.connection = connection_pool.connection_cls(
|
|
||||||
conf,
|
|
||||||
server_params=server_params)
|
|
||||||
self.pooled = pooled
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
"""When with ConnectionContext() is used, return self."""
|
|
||||||
return self
|
|
||||||
|
|
||||||
def _done(self):
|
|
||||||
"""If the connection came from a pool, clean it up and put it back.
|
|
||||||
If it did not come from a pool, close it.
|
|
||||||
"""
|
|
||||||
if self.connection:
|
|
||||||
if self.pooled:
|
|
||||||
# Reset the connection so it's ready for the next caller
|
|
||||||
# to grab from the pool
|
|
||||||
self.connection.reset()
|
|
||||||
self.connection_pool.put(self.connection)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, tb):
|
|
||||||
"""End of 'with' statement. We're done here."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
"""Caller is done with this connection. Make sure we cleaned up."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Caller is done with this connection."""
|
|
||||||
self._done()
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
self.connection.create_consumer(topic, proxy, fanout)
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
self.connection.create_worker(topic, proxy, pool_name)
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
|
|
||||||
ack_on_error=True):
|
|
||||||
self.connection.join_consumer_pool(callback,
|
|
||||||
pool_name,
|
|
||||||
topic,
|
|
||||||
exchange_name,
|
|
||||||
ack_on_error)
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
self.connection.consume_in_thread()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
"""Proxy all other calls to the Connection instance."""
|
|
||||||
if self.connection:
|
|
||||||
return getattr(self.connection, key)
|
|
||||||
else:
|
|
||||||
raise rpc_common.InvalidRPCConnectionReuse()
|
|
||||||
|
|
||||||
|
|
||||||
class ReplyProxy(ConnectionContext):
|
|
||||||
"""Connection class for RPC replies / callbacks."""
|
|
||||||
def __init__(self, conf, connection_pool):
|
|
||||||
self._call_waiters = {}
|
|
||||||
self._num_call_waiters = 0
|
|
||||||
self._num_call_waiters_wrn_threshold = 10
|
|
||||||
self._reply_q = 'reply_' + uuid.uuid4().hex
|
|
||||||
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
|
|
||||||
self.declare_direct_consumer(self._reply_q, self._process_data)
|
|
||||||
self.consume_in_thread()
|
|
||||||
|
|
||||||
def _process_data(self, message_data):
|
|
||||||
msg_id = message_data.pop('_msg_id', None)
|
|
||||||
waiter = self._call_waiters.get(msg_id)
|
|
||||||
if not waiter:
|
|
||||||
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
|
|
||||||
', message : %(data)s'), {'msg_id': msg_id,
|
|
||||||
'data': message_data})
|
|
||||||
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
|
|
||||||
else:
|
|
||||||
waiter.put(message_data)
|
|
||||||
|
|
||||||
def add_call_waiter(self, waiter, msg_id):
|
|
||||||
self._num_call_waiters += 1
|
|
||||||
if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
|
|
||||||
LOG.warn(_('Number of call waiters is greater than warning '
|
|
||||||
'threshold: %d. There could be a MulticallProxyWaiter '
|
|
||||||
'leak.') % self._num_call_waiters_wrn_threshold)
|
|
||||||
self._num_call_waiters_wrn_threshold *= 2
|
|
||||||
self._call_waiters[msg_id] = waiter
|
|
||||||
|
|
||||||
def del_call_waiter(self, msg_id):
|
|
||||||
self._num_call_waiters -= 1
|
|
||||||
del self._call_waiters[msg_id]
|
|
||||||
|
|
||||||
def get_reply_q(self):
|
|
||||||
return self._reply_q
|
|
||||||
|
|
||||||
|
|
||||||
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
|
|
||||||
failure=None, ending=False, log_failure=True):
|
|
||||||
"""Sends a reply or an error on the channel signified by msg_id.
|
|
||||||
|
|
||||||
Failure should be a sys.exc_info() tuple.
|
|
||||||
|
|
||||||
"""
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
if failure:
|
|
||||||
failure = rpc_common.serialize_remote_exception(failure,
|
|
||||||
log_failure)
|
|
||||||
|
|
||||||
msg = {'result': reply, 'failure': failure}
|
|
||||||
if ending:
|
|
||||||
msg['ending'] = True
|
|
||||||
_add_unique_id(msg)
|
|
||||||
# If a reply_q exists, add the msg_id to the reply and pass the
|
|
||||||
# reply_q to direct_send() to use it as the response queue.
|
|
||||||
# Otherwise use the msg_id for backward compatibility.
|
|
||||||
if reply_q:
|
|
||||||
msg['_msg_id'] = msg_id
|
|
||||||
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
|
|
||||||
else:
|
|
||||||
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
"""Context that supports replying to a rpc.call."""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.msg_id = kwargs.pop('msg_id', None)
|
|
||||||
self.reply_q = kwargs.pop('reply_q', None)
|
|
||||||
self.conf = kwargs.pop('conf')
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
values['conf'] = self.conf
|
|
||||||
values['msg_id'] = self.msg_id
|
|
||||||
values['reply_q'] = self.reply_q
|
|
||||||
return self.__class__(**values)
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False,
|
|
||||||
connection_pool=None, log_failure=True):
|
|
||||||
if self.msg_id:
|
|
||||||
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
|
|
||||||
reply, failure, ending, log_failure)
|
|
||||||
if ending:
|
|
||||||
self.msg_id = None
|
|
||||||
|
|
||||||
|
|
||||||
def unpack_context(conf, msg):
|
|
||||||
"""Unpack context from msg."""
|
|
||||||
context_dict = {}
|
|
||||||
for key in list(msg.keys()):
|
|
||||||
# NOTE(vish): Some versions of python don't like unicode keys
|
|
||||||
# in kwargs.
|
|
||||||
key = str(key)
|
|
||||||
if key.startswith('_context_'):
|
|
||||||
value = msg.pop(key)
|
|
||||||
context_dict[key[9:]] = value
|
|
||||||
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
|
||||||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
|
||||||
context_dict['conf'] = conf
|
|
||||||
ctx = RpcContext.from_dict(context_dict)
|
|
||||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
|
||||||
return ctx
|
|
||||||
|
|
||||||
|
|
||||||
def pack_context(msg, context):
|
|
||||||
"""Pack context into msg.
|
|
||||||
|
|
||||||
Values for message keys need to be less than 255 chars, so we pull
|
|
||||||
context out into a bunch of separate keys. If we want to support
|
|
||||||
more arguments in rabbit messages, we may want to do the same
|
|
||||||
for args at some point.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(context, dict):
|
|
||||||
context_d = dict([('_context_%s' % key, value)
|
|
||||||
for (key, value) in six.iteritems(context)])
|
|
||||||
else:
|
|
||||||
context_d = dict([('_context_%s' % key, value)
|
|
||||||
for (key, value) in
|
|
||||||
six.iteritems(context.to_dict())])
|
|
||||||
|
|
||||||
msg.update(context_d)
|
|
||||||
|
|
||||||
|
|
||||||
class _MsgIdCache(object):
|
|
||||||
"""This class checks any duplicate messages."""
|
|
||||||
|
|
||||||
# NOTE: This value is considered can be a configuration item, but
|
|
||||||
# it is not necessary to change its value in most cases,
|
|
||||||
# so let this value as static for now.
|
|
||||||
DUP_MSG_CHECK_SIZE = 16
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.prev_msgids = collections.deque([],
|
|
||||||
maxlen=self.DUP_MSG_CHECK_SIZE)
|
|
||||||
|
|
||||||
def check_duplicate_message(self, message_data):
|
|
||||||
"""AMQP consumers may read same message twice when exceptions occur
|
|
||||||
before ack is returned. This method prevents doing it.
|
|
||||||
"""
|
|
||||||
if UNIQUE_ID in message_data:
|
|
||||||
msg_id = message_data[UNIQUE_ID]
|
|
||||||
if msg_id not in self.prev_msgids:
|
|
||||||
self.prev_msgids.append(msg_id)
|
|
||||||
else:
|
|
||||||
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
|
|
||||||
|
|
||||||
|
|
||||||
def _add_unique_id(msg):
|
|
||||||
"""Add unique_id for checking duplicate messages."""
|
|
||||||
unique_id = uuid.uuid4().hex
|
|
||||||
msg.update({UNIQUE_ID: unique_id})
|
|
||||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
|
||||||
|
|
||||||
|
|
||||||
class _ThreadPoolWithWait(object):
|
|
||||||
"""Base class for a delayed invocation manager.
|
|
||||||
|
|
||||||
Used by the Connection class to start up green threads
|
|
||||||
to handle incoming messages.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, connection_pool):
|
|
||||||
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
|
|
||||||
self.connection_pool = connection_pool
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
"""Wait for all callback threads to exit."""
|
|
||||||
self.pool.waitall()
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackWrapper(_ThreadPoolWithWait):
|
|
||||||
"""Wraps a straight callback.
|
|
||||||
|
|
||||||
Allows it to be invoked in a green thread.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf, callback, connection_pool,
|
|
||||||
wait_for_consumers=False):
|
|
||||||
"""Initiates CallbackWrapper object.
|
|
||||||
|
|
||||||
:param conf: cfg.CONF instance
|
|
||||||
:param callback: a callable (probably a function)
|
|
||||||
:param connection_pool: connection pool as returned by
|
|
||||||
get_connection_pool()
|
|
||||||
:param wait_for_consumers: wait for all green threads to
|
|
||||||
complete and raise the last
|
|
||||||
caught exception, if any.
|
|
||||||
|
|
||||||
"""
|
|
||||||
super(CallbackWrapper, self).__init__(
|
|
||||||
conf=conf,
|
|
||||||
connection_pool=connection_pool,
|
|
||||||
)
|
|
||||||
self.callback = callback
|
|
||||||
self.wait_for_consumers = wait_for_consumers
|
|
||||||
self.exc_info = None
|
|
||||||
|
|
||||||
def _wrap(self, message_data, **kwargs):
|
|
||||||
"""Wrap the callback invocation to catch exceptions.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.callback(message_data, **kwargs)
|
|
||||||
except Exception:
|
|
||||||
self.exc_info = sys.exc_info()
|
|
||||||
|
|
||||||
def __call__(self, message_data):
|
|
||||||
self.exc_info = None
|
|
||||||
self.pool.spawn_n(self._wrap, message_data)
|
|
||||||
|
|
||||||
if self.wait_for_consumers:
|
|
||||||
self.pool.waitall()
|
|
||||||
if self.exc_info:
|
|
||||||
six.reraise(self.exc_info[1], None, self.exc_info[2])
|
|
||||||
|
|
||||||
|
|
||||||
class ProxyCallback(_ThreadPoolWithWait):
|
|
||||||
"""Calls methods on a proxy object based on method and args."""
|
|
||||||
|
|
||||||
def __init__(self, conf, proxy, connection_pool):
|
|
||||||
super(ProxyCallback, self).__init__(
|
|
||||||
conf=conf,
|
|
||||||
connection_pool=connection_pool,
|
|
||||||
)
|
|
||||||
self.proxy = proxy
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def __call__(self, message_data):
|
|
||||||
"""Consumer callback to call a method on a proxy object.
|
|
||||||
|
|
||||||
Parses the message for validity and fires off a thread to call the
|
|
||||||
proxy object method.
|
|
||||||
|
|
||||||
Message data should be a dictionary with two keys:
|
|
||||||
method: string representing the method to call
|
|
||||||
args: dictionary of arg: value
|
|
||||||
|
|
||||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
|
||||||
|
|
||||||
"""
|
|
||||||
# It is important to clear the context here, because at this point
|
|
||||||
# the previous context is stored in local.store.context
|
|
||||||
if hasattr(local.store, 'context'):
|
|
||||||
del local.store.context
|
|
||||||
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
|
|
||||||
self.msg_id_cache.check_duplicate_message(message_data)
|
|
||||||
ctxt = unpack_context(self.conf, message_data)
|
|
||||||
method = message_data.get('method')
|
|
||||||
args = message_data.get('args', {})
|
|
||||||
version = message_data.get('version')
|
|
||||||
namespace = message_data.get('namespace')
|
|
||||||
if not method:
|
|
||||||
LOG.warn(_('no method for message: %s') % message_data)
|
|
||||||
ctxt.reply(_('No method for message: %s') % message_data,
|
|
||||||
connection_pool=self.connection_pool)
|
|
||||||
return
|
|
||||||
self.pool.spawn_n(self._process_data, ctxt, version, method,
|
|
||||||
namespace, args)
|
|
||||||
|
|
||||||
def _process_data(self, ctxt, version, method, namespace, args):
|
|
||||||
"""Process a message in a new thread.
|
|
||||||
|
|
||||||
If the proxy object we have has a dispatch method
|
|
||||||
(see rpc.dispatcher.RpcDispatcher), pass it the version,
|
|
||||||
method, and args and let it dispatch as appropriate. If not, use
|
|
||||||
the old behavior of magically calling the specified method on the
|
|
||||||
proxy we have here.
|
|
||||||
"""
|
|
||||||
ctxt.update_store()
|
|
||||||
try:
|
|
||||||
rval = self.proxy.dispatch(ctxt, version, method, namespace,
|
|
||||||
**args)
|
|
||||||
# Check if the result was a generator
|
|
||||||
if inspect.isgenerator(rval):
|
|
||||||
for x in rval:
|
|
||||||
ctxt.reply(x, None, connection_pool=self.connection_pool)
|
|
||||||
else:
|
|
||||||
ctxt.reply(rval, None, connection_pool=self.connection_pool)
|
|
||||||
# This final None tells multicall that it is done.
|
|
||||||
ctxt.reply(ending=True, connection_pool=self.connection_pool)
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
LOG.debug(_('Expected exception during message handling (%s)') %
|
|
||||||
e._exc_info[1])
|
|
||||||
ctxt.reply(None, e._exc_info,
|
|
||||||
connection_pool=self.connection_pool,
|
|
||||||
log_failure=False)
|
|
||||||
except Exception:
|
|
||||||
# sys.exc_info() is deleted by LOG.exception().
|
|
||||||
exc_info = sys.exc_info()
|
|
||||||
LOG.error(_('Exception during message handling'),
|
|
||||||
exc_info=exc_info)
|
|
||||||
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
|
|
||||||
|
|
||||||
|
|
||||||
class MulticallProxyWaiter(object):
|
|
||||||
def __init__(self, conf, msg_id, timeout, connection_pool):
|
|
||||||
self._msg_id = msg_id
|
|
||||||
self._timeout = timeout or conf.rpc_response_timeout
|
|
||||||
self._reply_proxy = connection_pool.reply_proxy
|
|
||||||
self._done = False
|
|
||||||
self._got_ending = False
|
|
||||||
self._conf = conf
|
|
||||||
self._dataqueue = queue.LightQueue()
|
|
||||||
# Add this caller to the reply proxy's call_waiters
|
|
||||||
self._reply_proxy.add_call_waiter(self, self._msg_id)
|
|
||||||
self.msg_id_cache = _MsgIdCache()
|
|
||||||
|
|
||||||
def put(self, data):
|
|
||||||
self._dataqueue.put(data)
|
|
||||||
|
|
||||||
def done(self):
|
|
||||||
if self._done:
|
|
||||||
return
|
|
||||||
self._done = True
|
|
||||||
# Remove this caller from reply proxy's call_waiters
|
|
||||||
self._reply_proxy.del_call_waiter(self._msg_id)
|
|
||||||
|
|
||||||
def _process_data(self, data):
|
|
||||||
result = None
|
|
||||||
self.msg_id_cache.check_duplicate_message(data)
|
|
||||||
if data['failure']:
|
|
||||||
failure = data['failure']
|
|
||||||
result = rpc_common.deserialize_remote_exception(self._conf,
|
|
||||||
failure)
|
|
||||||
elif data.get('ending', False):
|
|
||||||
self._got_ending = True
|
|
||||||
else:
|
|
||||||
result = data['result']
|
|
||||||
return result
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
"""Return a result until we get a reply with an 'ending' flag."""
|
|
||||||
if self._done:
|
|
||||||
raise StopIteration
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
data = self._dataqueue.get(timeout=self._timeout)
|
|
||||||
result = self._process_data(data)
|
|
||||||
except queue.Empty:
|
|
||||||
self.done()
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
self.done()
|
|
||||||
if self._got_ending:
|
|
||||||
self.done()
|
|
||||||
raise StopIteration
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
self.done()
|
|
||||||
raise result
|
|
||||||
yield result
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new, connection_pool):
|
|
||||||
"""Create a connection."""
|
|
||||||
return ConnectionContext(conf, connection_pool, pooled=not new)
|
|
||||||
|
|
||||||
|
|
||||||
_reply_proxy_create_sem = semaphore.Semaphore()
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout, connection_pool):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
LOG.debug(_('Making synchronous call on %s ...'), topic)
|
|
||||||
msg_id = uuid.uuid4().hex
|
|
||||||
msg.update({'_msg_id': msg_id})
|
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
|
|
||||||
with _reply_proxy_create_sem:
|
|
||||||
if not connection_pool.reply_proxy:
|
|
||||||
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
|
|
||||||
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
|
|
||||||
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
|
|
||||||
return wait_msg
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout, connection_pool):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
|
|
||||||
# NOTE(vish): return the last result from the multicall
|
|
||||||
rv = list(rv)
|
|
||||||
if not rv:
|
|
||||||
return
|
|
||||||
return rv[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
|
||||||
server_params=server_params) as conn:
|
|
||||||
conn.topic_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg,
|
|
||||||
connection_pool):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool, pooled=False,
|
|
||||||
server_params=server_params) as conn:
|
|
||||||
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, connection_pool, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
|
|
||||||
dict(event_type=msg.get('event_type'),
|
|
||||||
topic=topic))
|
|
||||||
_add_unique_id(msg)
|
|
||||||
pack_context(msg, context)
|
|
||||||
with ConnectionContext(conf, connection_pool) as conn:
|
|
||||||
if envelope:
|
|
||||||
msg = rpc_common.serialize_msg(msg)
|
|
||||||
conn.notify_send(topic, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup(connection_pool):
|
|
||||||
if connection_pool:
|
|
||||||
connection_pool.empty()
|
|
||||||
|
|
||||||
|
|
||||||
def get_control_exchange(conf):
|
|
||||||
return conf.control_exchange
|
|
|
@ -1,508 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import jsonutils
|
|
||||||
from designate.openstack.common import local
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import versionutils
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
_RPC_ENVELOPE_VERSION = '2.0'
|
|
||||||
'''RPC Envelope Version.
|
|
||||||
|
|
||||||
This version number applies to the top level structure of messages sent out.
|
|
||||||
It does *not* apply to the message payload, which must be versioned
|
|
||||||
independently. For example, when using rpc APIs, a version number is applied
|
|
||||||
for changes to the API being exposed over rpc. This version number is handled
|
|
||||||
in the rpc proxy and dispatcher modules.
|
|
||||||
|
|
||||||
This version number applies to the message envelope that is used in the
|
|
||||||
serialization done inside the rpc layer. See serialize_msg() and
|
|
||||||
deserialize_msg().
|
|
||||||
|
|
||||||
The current message format (version 2.0) is very simple. It is::
|
|
||||||
|
|
||||||
{
|
|
||||||
'oslo.version': <RPC Envelope Version as a String>,
|
|
||||||
'oslo.message': <Application Message Payload, JSON encoded>
|
|
||||||
}
|
|
||||||
|
|
||||||
Message format version '1.0' is just considered to be the messages we sent
|
|
||||||
without a message envelope.
|
|
||||||
|
|
||||||
So, the current message envelope just includes the envelope version. It may
|
|
||||||
eventually contain additional information, such as a signature for the message
|
|
||||||
payload.
|
|
||||||
|
|
||||||
We will JSON encode the application message payload. The message envelope,
|
|
||||||
which includes the JSON encoded application message body, will be passed down
|
|
||||||
to the messaging libraries as a dict.
|
|
||||||
'''
|
|
||||||
|
|
||||||
_VERSION_KEY = 'oslo.version'
|
|
||||||
_MESSAGE_KEY = 'oslo.message'
|
|
||||||
|
|
||||||
_REMOTE_POSTFIX = '_Remote'
|
|
||||||
|
|
||||||
|
|
||||||
class RPCException(Exception):
|
|
||||||
msg_fmt = _("An unknown RPC related exception occurred.")
|
|
||||||
|
|
||||||
def __init__(self, message=None, **kwargs):
|
|
||||||
self.kwargs = kwargs
|
|
||||||
|
|
||||||
if not message:
|
|
||||||
try:
|
|
||||||
message = self.msg_fmt % kwargs
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# kwargs doesn't match a variable in the message
|
|
||||||
# log the issue and the kwargs
|
|
||||||
LOG.exception(_('Exception in string format operation'))
|
|
||||||
for name, value in six.iteritems(kwargs):
|
|
||||||
LOG.error("%s: %s" % (name, value))
|
|
||||||
# at least get the core message out if something happened
|
|
||||||
message = self.msg_fmt
|
|
||||||
|
|
||||||
super(RPCException, self).__init__(message)
|
|
||||||
|
|
||||||
|
|
||||||
class RemoteError(RPCException):
|
|
||||||
"""Signifies that a remote class has raised an exception.
|
|
||||||
|
|
||||||
Contains a string representation of the type of the original exception,
|
|
||||||
the value of the original exception, and the traceback. These are
|
|
||||||
sent to the parent as a joined string so printing the exception
|
|
||||||
contains all of the relevant info.
|
|
||||||
|
|
||||||
"""
|
|
||||||
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
|
|
||||||
|
|
||||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
|
||||||
self.exc_type = exc_type
|
|
||||||
self.value = value
|
|
||||||
self.traceback = traceback
|
|
||||||
super(RemoteError, self).__init__(exc_type=exc_type,
|
|
||||||
value=value,
|
|
||||||
traceback=traceback)
|
|
||||||
|
|
||||||
|
|
||||||
class Timeout(RPCException):
|
|
||||||
"""Signifies that a timeout has occurred.
|
|
||||||
|
|
||||||
This exception is raised if the rpc_response_timeout is reached while
|
|
||||||
waiting for a response from the remote side.
|
|
||||||
"""
|
|
||||||
msg_fmt = _('Timeout while waiting on RPC response - '
|
|
||||||
'topic: "%(topic)s", RPC method: "%(method)s" '
|
|
||||||
'info: "%(info)s"')
|
|
||||||
|
|
||||||
def __init__(self, info=None, topic=None, method=None):
|
|
||||||
"""Initiates Timeout object.
|
|
||||||
|
|
||||||
:param info: Extra info to convey to the user
|
|
||||||
:param topic: The topic that the rpc call was sent to
|
|
||||||
:param rpc_method_name: The name of the rpc method being
|
|
||||||
called
|
|
||||||
"""
|
|
||||||
self.info = info
|
|
||||||
self.topic = topic
|
|
||||||
self.method = method
|
|
||||||
super(Timeout, self).__init__(
|
|
||||||
None,
|
|
||||||
info=info or _('<unknown>'),
|
|
||||||
topic=topic or _('<unknown>'),
|
|
||||||
method=method or _('<unknown>'))
|
|
||||||
|
|
||||||
|
|
||||||
class DuplicateMessageError(RPCException):
|
|
||||||
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidRPCConnectionReuse(RPCException):
|
|
||||||
msg_fmt = _("Invalid reuse of an RPC connection.")
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcVersion(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
|
|
||||||
"this endpoint.")
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedRpcEnvelopeVersion(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC envelope version, %(version)s, "
|
|
||||||
"not supported by this endpoint.")
|
|
||||||
|
|
||||||
|
|
||||||
class RpcVersionCapError(RPCException):
|
|
||||||
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""A connection, returned by rpc.create_connection().
|
|
||||||
|
|
||||||
This class represents a connection to the message bus used for rpc.
|
|
||||||
An instance of this class should never be created by users of the rpc API.
|
|
||||||
Use rpc.create_connection() instead.
|
|
||||||
"""
|
|
||||||
def close(self):
|
|
||||||
"""Close the connection.
|
|
||||||
|
|
||||||
This method must be called when the connection will no longer be used.
|
|
||||||
It will ensure that any resources associated with the connection, such
|
|
||||||
as a network connection, and cleaned up.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer on this connection.
|
|
||||||
|
|
||||||
A consumer is associated with a message queue on the backend message
|
|
||||||
bus. The consumer will read messages from the queue, unpack them, and
|
|
||||||
dispatch them to the proxy object. The contents of the message pulled
|
|
||||||
off of the queue will determine which method gets called on the proxy
|
|
||||||
object.
|
|
||||||
|
|
||||||
:param topic: This is a name associated with what to consume from.
|
|
||||||
Multiple instances of a service may consume from the same
|
|
||||||
topic. For example, all instances of nova-compute consume
|
|
||||||
from a queue called "compute". In that case, the
|
|
||||||
messages will get distributed amongst the consumers in a
|
|
||||||
round-robin fashion if fanout=False. If fanout=True,
|
|
||||||
every consumer associated with this topic will get a
|
|
||||||
copy of every message.
|
|
||||||
:param proxy: The object that will handle all incoming messages.
|
|
||||||
:param fanout: Whether or not this is a fanout topic. See the
|
|
||||||
documentation for the topic parameter for some
|
|
||||||
additional comments on this.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker on this connection.
|
|
||||||
|
|
||||||
A worker is like a regular consumer of messages directed to a
|
|
||||||
topic, except that it is part of a set of such consumers (the
|
|
||||||
"pool") which may run in parallel. Every pool of workers will
|
|
||||||
receive a given message, but only one worker in the pool will
|
|
||||||
be asked to process it. Load is distributed across the members
|
|
||||||
of the pool in round-robin fashion.
|
|
||||||
|
|
||||||
:param topic: This is a name associated with what to consume from.
|
|
||||||
Multiple instances of a service may consume from the same
|
|
||||||
topic.
|
|
||||||
:param proxy: The object that will handle all incoming messages.
|
|
||||||
:param pool_name: String containing the name of the pool of workers
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
|
|
||||||
"""Register as a member of a group of consumers.
|
|
||||||
|
|
||||||
Uses given topic from the specified exchange.
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
|
|
||||||
:param callback: Callable to be invoked for each message.
|
|
||||||
:type callback: callable accepting one argument
|
|
||||||
:param pool_name: The name of the consumer pool.
|
|
||||||
:type pool_name: str
|
|
||||||
:param topic: The routing topic for desired messages.
|
|
||||||
:type topic: str
|
|
||||||
:param exchange_name: The name of the message exchange where
|
|
||||||
the client should attach. Defaults to
|
|
||||||
the configured exchange.
|
|
||||||
:type exchange_name: str
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Spawn a thread to handle incoming messages.
|
|
||||||
|
|
||||||
Spawn a thread that will be responsible for handling all incoming
|
|
||||||
messages for consumers that were set up on this connection.
|
|
||||||
|
|
||||||
Message dispatching inside of this is expected to be implemented in a
|
|
||||||
non-blocking manner. An example implementation would be having this
|
|
||||||
thread pull messages in for all of the consumers, but utilize a thread
|
|
||||||
pool for dispatching the messages to the proxy objects.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
def _safe_log(log_func, msg, msg_data):
|
|
||||||
"""Sanitizes the msg_data field before logging."""
|
|
||||||
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
|
|
||||||
|
|
||||||
def _fix_passwords(d):
|
|
||||||
"""Sanitizes the password fields in the dictionary."""
|
|
||||||
for k in six.iterkeys(d):
|
|
||||||
if k.lower().find('password') != -1:
|
|
||||||
d[k] = '<SANITIZED>'
|
|
||||||
elif k.lower() in SANITIZE:
|
|
||||||
d[k] = '<SANITIZED>'
|
|
||||||
elif isinstance(d[k], list):
|
|
||||||
for e in d[k]:
|
|
||||||
if isinstance(e, dict):
|
|
||||||
_fix_passwords(e)
|
|
||||||
elif isinstance(d[k], dict):
|
|
||||||
_fix_passwords(d[k])
|
|
||||||
return d
|
|
||||||
|
|
||||||
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_remote_exception(failure_info, log_failure=True):
|
|
||||||
"""Prepares exception data to be sent over rpc.
|
|
||||||
|
|
||||||
Failure_info should be a sys.exc_info() tuple.
|
|
||||||
|
|
||||||
"""
|
|
||||||
tb = traceback.format_exception(*failure_info)
|
|
||||||
failure = failure_info[1]
|
|
||||||
if log_failure:
|
|
||||||
LOG.error(_("Returning exception %s to caller"),
|
|
||||||
six.text_type(failure))
|
|
||||||
LOG.error(tb)
|
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
if hasattr(failure, 'kwargs'):
|
|
||||||
kwargs = failure.kwargs
|
|
||||||
|
|
||||||
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
|
|
||||||
# exceptions. Lets turn it back into the original exception type.
|
|
||||||
cls_name = str(failure.__class__.__name__)
|
|
||||||
mod_name = str(failure.__class__.__module__)
|
|
||||||
if (cls_name.endswith(_REMOTE_POSTFIX) and
|
|
||||||
mod_name.endswith(_REMOTE_POSTFIX)):
|
|
||||||
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
|
|
||||||
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'class': cls_name,
|
|
||||||
'module': mod_name,
|
|
||||||
'message': six.text_type(failure),
|
|
||||||
'tb': tb,
|
|
||||||
'args': failure.args,
|
|
||||||
'kwargs': kwargs
|
|
||||||
}
|
|
||||||
|
|
||||||
json_data = jsonutils.dumps(data)
|
|
||||||
|
|
||||||
return json_data
|
|
||||||
|
|
||||||
|
|
||||||
def deserialize_remote_exception(conf, data):
|
|
||||||
failure = jsonutils.loads(str(data))
|
|
||||||
|
|
||||||
trace = failure.get('tb', [])
|
|
||||||
message = failure.get('message', "") + "\n" + "\n".join(trace)
|
|
||||||
name = failure.get('class')
|
|
||||||
module = failure.get('module')
|
|
||||||
|
|
||||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
|
||||||
# order to prevent arbitrary code execution.
|
|
||||||
if module not in conf.allowed_rpc_exception_modules:
|
|
||||||
return RemoteError(name, failure.get('message'), trace)
|
|
||||||
|
|
||||||
try:
|
|
||||||
mod = importutils.import_module(module)
|
|
||||||
klass = getattr(mod, name)
|
|
||||||
if not issubclass(klass, Exception):
|
|
||||||
raise TypeError("Can only deserialize Exceptions")
|
|
||||||
|
|
||||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
|
||||||
except (AttributeError, TypeError, ImportError):
|
|
||||||
return RemoteError(name, failure.get('message'), trace)
|
|
||||||
|
|
||||||
ex_type = type(failure)
|
|
||||||
str_override = lambda self: message
|
|
||||||
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
|
|
||||||
{'__str__': str_override, '__unicode__': str_override})
|
|
||||||
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
|
|
||||||
try:
|
|
||||||
# NOTE(ameade): Dynamically create a new exception type and swap it in
|
|
||||||
# as the new type for the exception. This only works on user defined
|
|
||||||
# Exceptions and not core python exceptions. This is important because
|
|
||||||
# we cannot necessarily change an exception message so we must override
|
|
||||||
# the __str__ method.
|
|
||||||
failure.__class__ = new_ex_type
|
|
||||||
except TypeError:
|
|
||||||
# NOTE(ameade): If a core exception then just add the traceback to the
|
|
||||||
# first exception argument.
|
|
||||||
failure.args = (message,) + failure.args[1:]
|
|
||||||
return failure
|
|
||||||
|
|
||||||
|
|
||||||
class CommonRpcContext(object):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.values = kwargs
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
try:
|
|
||||||
return self.values[key]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(key)
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return copy.deepcopy(self.values)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_dict(cls, values):
|
|
||||||
return cls(**values)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
return self.from_dict(self.to_dict())
|
|
||||||
|
|
||||||
def update_store(self):
|
|
||||||
local.store.context = self
|
|
||||||
|
|
||||||
def elevated(self, read_deleted=None, overwrite=False):
|
|
||||||
"""Return a version of this context with admin flag set."""
|
|
||||||
# TODO(russellb) This method is a bit of a nova-ism. It makes
|
|
||||||
# some assumptions about the data in the request context sent
|
|
||||||
# across rpc, while the rest of this class does not. We could get
|
|
||||||
# rid of this if we changed the nova code that uses this to
|
|
||||||
# convert the RpcContext back to its native RequestContext doing
|
|
||||||
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
|
|
||||||
|
|
||||||
context = self.deepcopy()
|
|
||||||
context.values['is_admin'] = True
|
|
||||||
|
|
||||||
context.values.setdefault('roles', [])
|
|
||||||
|
|
||||||
if 'admin' not in context.values['roles']:
|
|
||||||
context.values['roles'].append('admin')
|
|
||||||
|
|
||||||
if read_deleted is not None:
|
|
||||||
context.values['read_deleted'] = read_deleted
|
|
||||||
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
class ClientException(Exception):
|
|
||||||
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
|
|
||||||
|
|
||||||
Merely instantiating it records the current exception information, which
|
|
||||||
will be passed back to the RPC client without exceptional logging.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self._exc_info = sys.exc_info()
|
|
||||||
|
|
||||||
|
|
||||||
def catch_client_exception(exceptions, func, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except Exception as e:
|
|
||||||
if type(e) in exceptions:
|
|
||||||
raise ClientException()
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def client_exceptions(*exceptions):
|
|
||||||
"""Decorator for manager methods that raise expected exceptions.
|
|
||||||
|
|
||||||
Marking a Manager method with this decorator allows the declaration
|
|
||||||
of expected exceptions that the RPC layer should not consider fatal,
|
|
||||||
and not log as if they were generated in a real error scenario. Note
|
|
||||||
that this will cause listed exceptions to be wrapped in a
|
|
||||||
ClientException, which is used internally by the RPC layer.
|
|
||||||
"""
|
|
||||||
def outer(func):
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
return catch_client_exception(exceptions, func, *args, **kwargs)
|
|
||||||
return inner
|
|
||||||
return outer
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(sirp): we should deprecate this in favor of
|
|
||||||
# using `versionutils.is_compatible` directly
|
|
||||||
def version_is_compatible(imp_version, version):
|
|
||||||
"""Determine whether versions are compatible.
|
|
||||||
|
|
||||||
:param imp_version: The version implemented
|
|
||||||
:param version: The version requested by an incoming message.
|
|
||||||
"""
|
|
||||||
return versionutils.is_compatible(version, imp_version)
|
|
||||||
|
|
||||||
|
|
||||||
def serialize_msg(raw_msg):
|
|
||||||
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
|
|
||||||
# information about this format.
|
|
||||||
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
|
|
||||||
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
|
|
||||||
|
|
||||||
return msg
|
|
||||||
|
|
||||||
|
|
||||||
def deserialize_msg(msg):
|
|
||||||
# NOTE(russellb): Hang on to your hats, this road is about to
|
|
||||||
# get a little bumpy.
|
|
||||||
#
|
|
||||||
# Robustness Principle:
|
|
||||||
# "Be strict in what you send, liberal in what you accept."
|
|
||||||
#
|
|
||||||
# At this point we have to do a bit of guessing about what it
|
|
||||||
# is we just received. Here is the set of possibilities:
|
|
||||||
#
|
|
||||||
# 1) We received a dict. This could be 2 things:
|
|
||||||
#
|
|
||||||
# a) Inspect it to see if it looks like a standard message envelope.
|
|
||||||
# If so, great!
|
|
||||||
#
|
|
||||||
# b) If it doesn't look like a standard message envelope, it could either
|
|
||||||
# be a notification, or a message from before we added a message
|
|
||||||
# envelope (referred to as version 1.0).
|
|
||||||
# Just return the message as-is.
|
|
||||||
#
|
|
||||||
# 2) It's any other non-dict type. Just return it and hope for the best.
|
|
||||||
# This case covers return values from rpc.call() from before message
|
|
||||||
# envelopes were used. (messages to call a method were always a dict)
|
|
||||||
|
|
||||||
if not isinstance(msg, dict):
|
|
||||||
# See #2 above.
|
|
||||||
return msg
|
|
||||||
|
|
||||||
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
|
|
||||||
if not all(map(lambda key: key in msg, base_envelope_keys)):
|
|
||||||
# See #1.b above.
|
|
||||||
return msg
|
|
||||||
|
|
||||||
# At this point we think we have the message envelope
|
|
||||||
# format we were expecting. (#1.a above)
|
|
||||||
|
|
||||||
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
|
|
||||||
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
|
|
||||||
|
|
||||||
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
|
|
||||||
|
|
||||||
return raw_msg
|
|
|
@ -1,178 +0,0 @@
|
||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Code for rpc message dispatching.
|
|
||||||
|
|
||||||
Messages that come in have a version number associated with them. RPC API
|
|
||||||
version numbers are in the form:
|
|
||||||
|
|
||||||
Major.Minor
|
|
||||||
|
|
||||||
For a given message with version X.Y, the receiver must be marked as able to
|
|
||||||
handle messages of version A.B, where:
|
|
||||||
|
|
||||||
A = X
|
|
||||||
|
|
||||||
B >= Y
|
|
||||||
|
|
||||||
The Major version number would be incremented for an almost completely new API.
|
|
||||||
The Minor version number would be incremented for backwards compatible changes
|
|
||||||
to an existing API. A backwards compatible change could be something like
|
|
||||||
adding a new method, adding an argument to an existing method (but not
|
|
||||||
requiring it), or changing the type for an existing argument (but still
|
|
||||||
handling the old type as well).
|
|
||||||
|
|
||||||
The conversion over to a versioned API must be done on both the client side and
|
|
||||||
server side of the API at the same time. However, as the code stands today,
|
|
||||||
there can be both versioned and unversioned APIs implemented in the same code
|
|
||||||
base.
|
|
||||||
|
|
||||||
EXAMPLES
|
|
||||||
========
|
|
||||||
|
|
||||||
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
|
|
||||||
API as an example. The client side is in nova/compute/rpcapi.py and the server
|
|
||||||
side is in nova/compute/manager.py.
|
|
||||||
|
|
||||||
|
|
||||||
Example 1) Adding a new method.
|
|
||||||
-------------------------------
|
|
||||||
|
|
||||||
Adding a new method is a backwards compatible change. It should be added to
|
|
||||||
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
|
|
||||||
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
|
|
||||||
have a specific version specified to indicate the minimum API version that must
|
|
||||||
be implemented for the method to be supported. For example::
|
|
||||||
|
|
||||||
def get_host_uptime(self, ctxt, host):
|
|
||||||
topic = _compute_topic(self.topic, ctxt, host, None)
|
|
||||||
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
|
|
||||||
version='1.1')
|
|
||||||
|
|
||||||
In this case, version '1.1' is the first version that supported the
|
|
||||||
get_host_uptime() method.
|
|
||||||
|
|
||||||
|
|
||||||
Example 2) Adding a new parameter.
|
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
Adding a new parameter to an rpc method can be made backwards compatible. The
|
|
||||||
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
|
|
||||||
The implementation of the method must not expect the parameter to be present.::
|
|
||||||
|
|
||||||
def some_remote_method(self, arg1, arg2, newarg=None):
|
|
||||||
# The code needs to deal with newarg=None for cases
|
|
||||||
# where an older client sends a message without it.
|
|
||||||
pass
|
|
||||||
|
|
||||||
On the client side, the same changes should be made as in example 1. The
|
|
||||||
minimum version that supports the new parameter should be specified.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.openstack.common.rpc import serializer as rpc_serializer
|
|
||||||
|
|
||||||
|
|
||||||
class RpcDispatcher(object):
|
|
||||||
"""Dispatch rpc messages according to the requested API version.
|
|
||||||
|
|
||||||
This class can be used as the top level 'manager' for a service. It
|
|
||||||
contains a list of underlying managers that have an API_VERSION attribute.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, callbacks, serializer=None):
|
|
||||||
"""Initialize the rpc dispatcher.
|
|
||||||
|
|
||||||
:param callbacks: List of proxy objects that are an instance
|
|
||||||
of a class with rpc methods exposed. Each proxy
|
|
||||||
object should have an RPC_API_VERSION attribute.
|
|
||||||
:param serializer: The Serializer object that will be used to
|
|
||||||
deserialize arguments before the method call and
|
|
||||||
to serialize the result after it returns.
|
|
||||||
"""
|
|
||||||
self.callbacks = callbacks
|
|
||||||
if serializer is None:
|
|
||||||
serializer = rpc_serializer.NoOpSerializer()
|
|
||||||
self.serializer = serializer
|
|
||||||
super(RpcDispatcher, self).__init__()
|
|
||||||
|
|
||||||
def _deserialize_args(self, context, kwargs):
|
|
||||||
"""Helper method called to deserialize args before dispatch.
|
|
||||||
|
|
||||||
This calls our serializer on each argument, returning a new set of
|
|
||||||
args that have been deserialized.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param kwargs: The arguments to be deserialized
|
|
||||||
:returns: A new set of deserialized args
|
|
||||||
"""
|
|
||||||
new_kwargs = dict()
|
|
||||||
for argname, arg in six.iteritems(kwargs):
|
|
||||||
new_kwargs[argname] = self.serializer.deserialize_entity(context,
|
|
||||||
arg)
|
|
||||||
return new_kwargs
|
|
||||||
|
|
||||||
def dispatch(self, ctxt, version, method, namespace, **kwargs):
|
|
||||||
"""Dispatch a message based on a requested version.
|
|
||||||
|
|
||||||
:param ctxt: The request context
|
|
||||||
:param version: The requested API version from the incoming message
|
|
||||||
:param method: The method requested to be called by the incoming
|
|
||||||
message.
|
|
||||||
:param namespace: The namespace for the requested method. If None,
|
|
||||||
the dispatcher will look for a method on a callback
|
|
||||||
object with no namespace set.
|
|
||||||
:param kwargs: A dict of keyword arguments to be passed to the method.
|
|
||||||
|
|
||||||
:returns: Whatever is returned by the underlying method that gets
|
|
||||||
called.
|
|
||||||
"""
|
|
||||||
if not version:
|
|
||||||
version = '1.0'
|
|
||||||
|
|
||||||
had_compatible = False
|
|
||||||
for proxyobj in self.callbacks:
|
|
||||||
# Check for namespace compatibility
|
|
||||||
try:
|
|
||||||
cb_namespace = proxyobj.RPC_API_NAMESPACE
|
|
||||||
except AttributeError:
|
|
||||||
cb_namespace = None
|
|
||||||
|
|
||||||
if namespace != cb_namespace:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check for version compatibility
|
|
||||||
try:
|
|
||||||
rpc_api_version = proxyobj.RPC_API_VERSION
|
|
||||||
except AttributeError:
|
|
||||||
rpc_api_version = '1.0'
|
|
||||||
|
|
||||||
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
|
|
||||||
version)
|
|
||||||
had_compatible = had_compatible or is_compatible
|
|
||||||
|
|
||||||
if not hasattr(proxyobj, method):
|
|
||||||
continue
|
|
||||||
if is_compatible:
|
|
||||||
kwargs = self._deserialize_args(ctxt, kwargs)
|
|
||||||
result = getattr(proxyobj, method)(ctxt, **kwargs)
|
|
||||||
return self.serializer.serialize_entity(ctxt, result)
|
|
||||||
|
|
||||||
if had_compatible:
|
|
||||||
raise AttributeError("No such RPC function '%s'" % method)
|
|
||||||
else:
|
|
||||||
raise rpc_common.UnsupportedRpcVersion(version=version)
|
|
|
@ -1,195 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Fake RPC implementation which calls proxy methods directly with no
|
|
||||||
queues. Casts will block, but this is very useful for tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
|
|
||||||
# jsonutils has some extra logic to automatically convert objects to primitive
|
|
||||||
# types so that they can be serialized. We want to catch all cases where
|
|
||||||
# non-primitive types make it into this code and treat it as an error.
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
CONSUMERS = {}
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
self._response = []
|
|
||||||
self._done = False
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
new_inst = self.__class__(**values)
|
|
||||||
new_inst._response = self._response
|
|
||||||
new_inst._done = self._done
|
|
||||||
return new_inst
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False):
|
|
||||||
if ending:
|
|
||||||
self._done = True
|
|
||||||
if not self._done:
|
|
||||||
self._response.append((reply, failure))
|
|
||||||
|
|
||||||
|
|
||||||
class Consumer(object):
|
|
||||||
def __init__(self, topic, proxy):
|
|
||||||
self.topic = topic
|
|
||||||
self.proxy = proxy
|
|
||||||
|
|
||||||
def call(self, context, version, method, namespace, args, timeout):
|
|
||||||
done = eventlet.event.Event()
|
|
||||||
|
|
||||||
def _inner():
|
|
||||||
ctxt = RpcContext.from_dict(context.to_dict())
|
|
||||||
try:
|
|
||||||
rval = self.proxy.dispatch(context, version, method,
|
|
||||||
namespace, **args)
|
|
||||||
res = []
|
|
||||||
# Caller might have called ctxt.reply() manually
|
|
||||||
for (reply, failure) in ctxt._response:
|
|
||||||
if failure:
|
|
||||||
six.reraise(failure[0], failure[1], failure[2])
|
|
||||||
res.append(reply)
|
|
||||||
# if ending not 'sent'...we might have more data to
|
|
||||||
# return from the function itself
|
|
||||||
if not ctxt._done:
|
|
||||||
if inspect.isgenerator(rval):
|
|
||||||
for val in rval:
|
|
||||||
res.append(val)
|
|
||||||
else:
|
|
||||||
res.append(rval)
|
|
||||||
done.send(res)
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
done.send_exception(e._exc_info[1])
|
|
||||||
except Exception as e:
|
|
||||||
done.send_exception(e)
|
|
||||||
|
|
||||||
thread = eventlet.greenthread.spawn(_inner)
|
|
||||||
|
|
||||||
if timeout:
|
|
||||||
start_time = time.time()
|
|
||||||
while not done.ready():
|
|
||||||
eventlet.greenthread.sleep(1)
|
|
||||||
cur_time = time.time()
|
|
||||||
if (cur_time - start_time) > timeout:
|
|
||||||
thread.kill()
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
|
|
||||||
return done.wait()
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
consumer = Consumer(topic, proxy)
|
|
||||||
self.consumers.append(consumer)
|
|
||||||
if topic not in CONSUMERS:
|
|
||||||
CONSUMERS[topic] = []
|
|
||||||
CONSUMERS[topic].append(consumer)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
for consumer in self.consumers:
|
|
||||||
CONSUMERS[consumer.topic].remove(consumer)
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return Connection()
|
|
||||||
|
|
||||||
|
|
||||||
def check_serialize(msg):
|
|
||||||
"""Make sure a message intended for rpc can be serialized."""
|
|
||||||
json.dumps(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
|
|
||||||
check_serialize(msg)
|
|
||||||
|
|
||||||
method = msg.get('method')
|
|
||||||
if not method:
|
|
||||||
return
|
|
||||||
args = msg.get('args', {})
|
|
||||||
version = msg.get('version', None)
|
|
||||||
namespace = msg.get('namespace', None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
consumer = CONSUMERS[topic][0]
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
raise rpc_common.Timeout("No consumers available")
|
|
||||||
else:
|
|
||||||
return consumer.call(context, version, method, namespace, args,
|
|
||||||
timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
rv = multicall(conf, context, topic, msg, timeout)
|
|
||||||
# NOTE(vish): return the last result from the multicall
|
|
||||||
rv = list(rv)
|
|
||||||
if not rv:
|
|
||||||
return
|
|
||||||
return rv[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
check_serialize(msg)
|
|
||||||
try:
|
|
||||||
call(conf, context, topic, msg)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
check_serialize(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Cast to all consumers of a topic."""
|
|
||||||
check_serialize(msg)
|
|
||||||
method = msg.get('method')
|
|
||||||
if not method:
|
|
||||||
return
|
|
||||||
args = msg.get('args', {})
|
|
||||||
version = msg.get('version', None)
|
|
||||||
namespace = msg.get('namespace', None)
|
|
||||||
|
|
||||||
for consumer in CONSUMERS.get(topic, []):
|
|
||||||
try:
|
|
||||||
consumer.call(context, version, method, namespace, args, None)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
|
@ -1,855 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import itertools
|
|
||||||
import socket
|
|
||||||
import ssl
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
import kombu
|
|
||||||
import kombu.connection
|
|
||||||
import kombu.entity
|
|
||||||
import kombu.messaging
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import network_utils
|
|
||||||
from designate.openstack.common.rpc import amqp as rpc_amqp
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.openstack.common import sslutils
|
|
||||||
|
|
||||||
kombu_opts = [
|
|
||||||
cfg.StrOpt('kombu_ssl_version',
|
|
||||||
default='',
|
|
||||||
help='If SSL is enabled, the SSL version to use. Valid '
|
|
||||||
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
|
|
||||||
'be available on some distributions.'
|
|
||||||
),
|
|
||||||
cfg.StrOpt('kombu_ssl_keyfile',
|
|
||||||
default='',
|
|
||||||
help='SSL key file (valid only if SSL enabled)'),
|
|
||||||
cfg.StrOpt('kombu_ssl_certfile',
|
|
||||||
default='',
|
|
||||||
help='SSL cert file (valid only if SSL enabled)'),
|
|
||||||
cfg.StrOpt('kombu_ssl_ca_certs',
|
|
||||||
default='',
|
|
||||||
help=('SSL certification authority file '
|
|
||||||
'(valid only if SSL enabled)')),
|
|
||||||
cfg.StrOpt('rabbit_host',
|
|
||||||
default='localhost',
|
|
||||||
help='The RabbitMQ broker address where a single node is used'),
|
|
||||||
cfg.IntOpt('rabbit_port',
|
|
||||||
default=5672,
|
|
||||||
help='The RabbitMQ broker port where a single node is used'),
|
|
||||||
cfg.ListOpt('rabbit_hosts',
|
|
||||||
default=['$rabbit_host:$rabbit_port'],
|
|
||||||
help='RabbitMQ HA cluster host:port pairs'),
|
|
||||||
cfg.BoolOpt('rabbit_use_ssl',
|
|
||||||
default=False,
|
|
||||||
help='Connect over SSL for RabbitMQ'),
|
|
||||||
cfg.StrOpt('rabbit_userid',
|
|
||||||
default='guest',
|
|
||||||
help='The RabbitMQ userid'),
|
|
||||||
cfg.StrOpt('rabbit_password',
|
|
||||||
default='guest',
|
|
||||||
help='The RabbitMQ password',
|
|
||||||
secret=True),
|
|
||||||
cfg.StrOpt('rabbit_virtual_host',
|
|
||||||
default='/',
|
|
||||||
help='The RabbitMQ virtual host'),
|
|
||||||
cfg.IntOpt('rabbit_retry_interval',
|
|
||||||
default=1,
|
|
||||||
help='How frequently to retry connecting with RabbitMQ'),
|
|
||||||
cfg.IntOpt('rabbit_retry_backoff',
|
|
||||||
default=2,
|
|
||||||
help='How long to backoff for between retries when connecting '
|
|
||||||
'to RabbitMQ'),
|
|
||||||
cfg.IntOpt('rabbit_max_retries',
|
|
||||||
default=0,
|
|
||||||
help='Maximum number of RabbitMQ connection retries. '
|
|
||||||
'Default is 0 (infinite retry count)'),
|
|
||||||
cfg.BoolOpt('rabbit_ha_queues',
|
|
||||||
default=False,
|
|
||||||
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
|
|
||||||
'If you change this option, you must wipe the '
|
|
||||||
'RabbitMQ database.'),
|
|
||||||
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(kombu_opts)
|
|
||||||
|
|
||||||
LOG = rpc_common.LOG
|
|
||||||
|
|
||||||
|
|
||||||
def _get_queue_arguments(conf):
|
|
||||||
"""Construct the arguments for declaring a queue.
|
|
||||||
|
|
||||||
If the rabbit_ha_queues option is set, we declare a mirrored queue
|
|
||||||
as described here:
|
|
||||||
|
|
||||||
http://www.rabbitmq.com/ha.html
|
|
||||||
|
|
||||||
Setting x-ha-policy to all means that the queue will be mirrored
|
|
||||||
to all nodes in the cluster.
|
|
||||||
"""
|
|
||||||
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Consumer base class."""
|
|
||||||
|
|
||||||
def __init__(self, channel, callback, tag, **kwargs):
|
|
||||||
"""Declare a queue on an amqp channel.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
queue name, exchange name, and other kombu options are
|
|
||||||
passed in here as a dictionary.
|
|
||||||
"""
|
|
||||||
self.callback = callback
|
|
||||||
self.tag = str(tag)
|
|
||||||
self.kwargs = kwargs
|
|
||||||
self.queue = None
|
|
||||||
self.ack_on_error = kwargs.get('ack_on_error', True)
|
|
||||||
self.reconnect(channel)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
"""Re-declare the queue after a rabbit reconnect."""
|
|
||||||
self.channel = channel
|
|
||||||
self.kwargs['channel'] = channel
|
|
||||||
self.queue = kombu.entity.Queue(**self.kwargs)
|
|
||||||
self.queue.declare()
|
|
||||||
|
|
||||||
def _callback_handler(self, message, callback):
|
|
||||||
"""Call callback with deserialized message.
|
|
||||||
|
|
||||||
Messages that are processed without exception are ack'ed.
|
|
||||||
|
|
||||||
If the message processing generates an exception, it will be
|
|
||||||
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
msg = rpc_common.deserialize_msg(message.payload)
|
|
||||||
callback(msg)
|
|
||||||
except Exception:
|
|
||||||
if self.ack_on_error:
|
|
||||||
LOG.exception(_("Failed to process message"
|
|
||||||
" ... skipping it."))
|
|
||||||
message.ack()
|
|
||||||
else:
|
|
||||||
LOG.exception(_("Failed to process message"
|
|
||||||
" ... will requeue."))
|
|
||||||
message.requeue()
|
|
||||||
else:
|
|
||||||
message.ack()
|
|
||||||
|
|
||||||
def consume(self, *args, **kwargs):
|
|
||||||
"""Actually declare the consumer on the amqp channel. This will
|
|
||||||
start the flow of messages from the queue. Using the
|
|
||||||
Connection.iterconsume() iterator will process the messages,
|
|
||||||
calling the appropriate callback.
|
|
||||||
|
|
||||||
If a callback is specified in kwargs, use that. Otherwise,
|
|
||||||
use the callback passed during __init__()
|
|
||||||
|
|
||||||
If kwargs['nowait'] is True, then this call will block until
|
|
||||||
a message is read.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
options = {'consumer_tag': self.tag}
|
|
||||||
options['nowait'] = kwargs.get('nowait', False)
|
|
||||||
callback = kwargs.get('callback', self.callback)
|
|
||||||
if not callback:
|
|
||||||
raise ValueError("No callback defined")
|
|
||||||
|
|
||||||
def _callback(raw_message):
|
|
||||||
message = self.channel.message_to_python(raw_message)
|
|
||||||
self._callback_handler(message, callback)
|
|
||||||
|
|
||||||
self.queue.consume(*args, callback=_callback, **options)
|
|
||||||
|
|
||||||
def cancel(self):
|
|
||||||
"""Cancel the consuming from the queue, if it has started."""
|
|
||||||
try:
|
|
||||||
self.queue.cancel(self.tag)
|
|
||||||
except KeyError as e:
|
|
||||||
# NOTE(comstud): Kludge to get around a amqplib bug
|
|
||||||
if str(e) != "u'%s'" % self.tag:
|
|
||||||
raise
|
|
||||||
self.queue = None
|
|
||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
|
||||||
"""Queue/consumer class for 'direct'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
|
|
||||||
"""Init a 'direct' queue.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'msg_id' is the msg_id to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
Other kombu options may be passed
|
|
||||||
"""
|
|
||||||
# Default options
|
|
||||||
options = {'durable': False,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange = kombu.entity.Exchange(name=msg_id,
|
|
||||||
type='direct',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(DirectConsumer, self).__init__(channel,
|
|
||||||
callback,
|
|
||||||
tag,
|
|
||||||
name=msg_id,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=msg_id,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'topic'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, name=None,
|
|
||||||
exchange_name=None, **kwargs):
|
|
||||||
"""Init a 'topic' queue.
|
|
||||||
|
|
||||||
:param channel: the amqp channel to use
|
|
||||||
:param topic: the topic to listen on
|
|
||||||
:paramtype topic: str
|
|
||||||
:param callback: the callback to call when messages are received
|
|
||||||
:param tag: a unique ID for the consumer on the channel
|
|
||||||
:param name: optional queue name, defaults to topic
|
|
||||||
:paramtype name: str
|
|
||||||
|
|
||||||
Other kombu options may be passed as keyword arguments
|
|
||||||
"""
|
|
||||||
# Default options
|
|
||||||
options = {'durable': conf.amqp_durable_queues,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': conf.amqp_auto_delete,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
|
||||||
exchange = kombu.entity.Exchange(name=exchange_name,
|
|
||||||
type='topic',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(TopicConsumer, self).__init__(channel,
|
|
||||||
callback,
|
|
||||||
tag,
|
|
||||||
name=name or topic,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=topic,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'fanout'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
|
|
||||||
"""Init a 'fanout' queue.
|
|
||||||
|
|
||||||
'channel' is the amqp channel to use
|
|
||||||
'topic' is the topic to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'tag' is a unique ID for the consumer on the channel
|
|
||||||
|
|
||||||
Other kombu options may be passed
|
|
||||||
"""
|
|
||||||
unique = uuid.uuid4().hex
|
|
||||||
exchange_name = '%s_fanout' % topic
|
|
||||||
queue_name = '%s_fanout_%s' % (topic, unique)
|
|
||||||
|
|
||||||
# Default options
|
|
||||||
options = {'durable': False,
|
|
||||||
'queue_arguments': _get_queue_arguments(conf),
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
|
|
||||||
durable=options['durable'],
|
|
||||||
auto_delete=options['auto_delete'])
|
|
||||||
super(FanoutConsumer, self).__init__(channel, callback, tag,
|
|
||||||
name=queue_name,
|
|
||||||
exchange=exchange,
|
|
||||||
routing_key=topic,
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
|
||||||
"""Base Publisher class."""
|
|
||||||
|
|
||||||
def __init__(self, channel, exchange_name, routing_key, **kwargs):
|
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
|
||||||
and other options
|
|
||||||
"""
|
|
||||||
self.exchange_name = exchange_name
|
|
||||||
self.routing_key = routing_key
|
|
||||||
self.kwargs = kwargs
|
|
||||||
self.reconnect(channel)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
"""Re-establish the Producer after a rabbit reconnection."""
|
|
||||||
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
|
|
||||||
**self.kwargs)
|
|
||||||
self.producer = kombu.messaging.Producer(exchange=self.exchange,
|
|
||||||
channel=channel,
|
|
||||||
routing_key=self.routing_key)
|
|
||||||
|
|
||||||
def send(self, msg, timeout=None):
|
|
||||||
"""Send a message."""
|
|
||||||
if timeout:
|
|
||||||
#
|
|
||||||
# AMQP TTL is in milliseconds when set in the header.
|
|
||||||
#
|
|
||||||
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
|
|
||||||
else:
|
|
||||||
self.producer.publish(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
|
||||||
"""Publisher class for 'direct'."""
|
|
||||||
def __init__(self, conf, channel, msg_id, **kwargs):
|
|
||||||
"""init a 'direct' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
|
|
||||||
options = {'durable': False,
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
|
|
||||||
type='direct', **options)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
|
||||||
"""Publisher class for 'topic'."""
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
"""init a 'topic' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
options = {'durable': conf.amqp_durable_queues,
|
|
||||||
'auto_delete': conf.amqp_auto_delete,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
super(TopicPublisher, self).__init__(channel,
|
|
||||||
exchange_name,
|
|
||||||
topic,
|
|
||||||
type='topic',
|
|
||||||
**options)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
|
||||||
"""Publisher class for 'fanout'."""
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
"""init a 'fanout' publisher.
|
|
||||||
|
|
||||||
Kombu options may be passed as keyword args to override defaults
|
|
||||||
"""
|
|
||||||
options = {'durable': False,
|
|
||||||
'auto_delete': True,
|
|
||||||
'exclusive': False}
|
|
||||||
options.update(kwargs)
|
|
||||||
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
|
|
||||||
None, type='fanout', **options)
|
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(TopicPublisher):
|
|
||||||
"""Publisher class for 'notify'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, channel, topic, **kwargs):
|
|
||||||
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
|
|
||||||
self.queue_arguments = _get_queue_arguments(conf)
|
|
||||||
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
|
|
||||||
|
|
||||||
def reconnect(self, channel):
|
|
||||||
super(NotifyPublisher, self).reconnect(channel)
|
|
||||||
|
|
||||||
# NOTE(jerdfelt): Normally the consumer would create the queue, but
|
|
||||||
# we do this to ensure that messages don't get dropped if the
|
|
||||||
# consumer is started after we do
|
|
||||||
queue = kombu.entity.Queue(channel=channel,
|
|
||||||
exchange=self.exchange,
|
|
||||||
durable=self.durable,
|
|
||||||
name=self.routing_key,
|
|
||||||
routing_key=self.routing_key,
|
|
||||||
queue_arguments=self.queue_arguments)
|
|
||||||
queue.declare()
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
pool = None
|
|
||||||
|
|
||||||
def __init__(self, conf, server_params=None):
|
|
||||||
self.consumers = []
|
|
||||||
self.consumer_thread = None
|
|
||||||
self.proxy_callbacks = []
|
|
||||||
self.conf = conf
|
|
||||||
self.max_retries = self.conf.rabbit_max_retries
|
|
||||||
# Try forever?
|
|
||||||
if self.max_retries <= 0:
|
|
||||||
self.max_retries = None
|
|
||||||
self.interval_start = self.conf.rabbit_retry_interval
|
|
||||||
self.interval_stepping = self.conf.rabbit_retry_backoff
|
|
||||||
# max retry-interval = 30 seconds
|
|
||||||
self.interval_max = 30
|
|
||||||
self.memory_transport = False
|
|
||||||
|
|
||||||
if server_params is None:
|
|
||||||
server_params = {}
|
|
||||||
# Keys to translate from server_params to kombu params
|
|
||||||
server_params_to_kombu_params = {'username': 'userid'}
|
|
||||||
|
|
||||||
ssl_params = self._fetch_ssl_params()
|
|
||||||
params_list = []
|
|
||||||
for adr in self.conf.rabbit_hosts:
|
|
||||||
hostname, port = network_utils.parse_host_port(
|
|
||||||
adr, default_port=self.conf.rabbit_port)
|
|
||||||
|
|
||||||
params = {
|
|
||||||
'hostname': hostname,
|
|
||||||
'port': port,
|
|
||||||
'userid': self.conf.rabbit_userid,
|
|
||||||
'password': self.conf.rabbit_password,
|
|
||||||
'virtual_host': self.conf.rabbit_virtual_host,
|
|
||||||
}
|
|
||||||
|
|
||||||
for sp_key, value in six.iteritems(server_params):
|
|
||||||
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
|
|
||||||
params[p_key] = value
|
|
||||||
|
|
||||||
if self.conf.fake_rabbit:
|
|
||||||
params['transport'] = 'memory'
|
|
||||||
if self.conf.rabbit_use_ssl:
|
|
||||||
params['ssl'] = ssl_params
|
|
||||||
|
|
||||||
params_list.append(params)
|
|
||||||
|
|
||||||
self.params_list = params_list
|
|
||||||
|
|
||||||
self.memory_transport = self.conf.fake_rabbit
|
|
||||||
|
|
||||||
self.connection = None
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def _fetch_ssl_params(self):
|
|
||||||
"""Handles fetching what ssl params should be used for the connection
|
|
||||||
(if any).
|
|
||||||
"""
|
|
||||||
ssl_params = dict()
|
|
||||||
|
|
||||||
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
|
|
||||||
if self.conf.kombu_ssl_version:
|
|
||||||
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
|
|
||||||
self.conf.kombu_ssl_version)
|
|
||||||
if self.conf.kombu_ssl_keyfile:
|
|
||||||
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
|
|
||||||
if self.conf.kombu_ssl_certfile:
|
|
||||||
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
|
|
||||||
if self.conf.kombu_ssl_ca_certs:
|
|
||||||
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
|
|
||||||
# We might want to allow variations in the
|
|
||||||
# future with this?
|
|
||||||
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
|
|
||||||
|
|
||||||
# Return the extended behavior or just have the default behavior
|
|
||||||
return ssl_params or True
|
|
||||||
|
|
||||||
def _connect(self, params):
|
|
||||||
"""Connect to rabbit. Re-establish any queues that may have
|
|
||||||
been declared before if we are reconnecting. Exceptions should
|
|
||||||
be handled by the caller.
|
|
||||||
"""
|
|
||||||
if self.connection:
|
|
||||||
LOG.info(_("Reconnecting to AMQP server on "
|
|
||||||
"%(hostname)s:%(port)d") % params)
|
|
||||||
try:
|
|
||||||
self.connection.release()
|
|
||||||
except self.connection_errors:
|
|
||||||
pass
|
|
||||||
# Setting this in case the next statement fails, though
|
|
||||||
# it shouldn't be doing any network operations, yet.
|
|
||||||
self.connection = None
|
|
||||||
self.connection = kombu.connection.BrokerConnection(**params)
|
|
||||||
self.connection_errors = self.connection.connection_errors
|
|
||||||
if self.memory_transport:
|
|
||||||
# Kludge to speed up tests.
|
|
||||||
self.connection.transport.polling_interval = 0.0
|
|
||||||
self.consumer_num = itertools.count(1)
|
|
||||||
self.connection.connect()
|
|
||||||
self.channel = self.connection.channel()
|
|
||||||
# work around 'memory' transport bug in 1.1.3
|
|
||||||
if self.memory_transport:
|
|
||||||
self.channel._new_queue('ae.undeliver')
|
|
||||||
for consumer in self.consumers:
|
|
||||||
consumer.reconnect(self.channel)
|
|
||||||
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
|
|
||||||
params)
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Handles reconnecting and re-establishing queues.
|
|
||||||
Will retry up to self.max_retries number of times.
|
|
||||||
self.max_retries = 0 means to retry forever.
|
|
||||||
Sleep between tries, starting at self.interval_start
|
|
||||||
seconds, backing off self.interval_stepping number of seconds
|
|
||||||
each attempt.
|
|
||||||
"""
|
|
||||||
|
|
||||||
attempt = 0
|
|
||||||
while True:
|
|
||||||
params = self.params_list[attempt % len(self.params_list)]
|
|
||||||
attempt += 1
|
|
||||||
try:
|
|
||||||
self._connect(params)
|
|
||||||
return
|
|
||||||
except (IOError, self.connection_errors) as e:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
|
||||||
# to return an error not covered by its transport
|
|
||||||
# connection_errors in the case of a timeout waiting for
|
|
||||||
# a protocol response. (See paste link in LP888621)
|
|
||||||
# So, we check all exceptions for 'timeout' in them
|
|
||||||
# and try to reconnect in this case.
|
|
||||||
if 'timeout' not in str(e):
|
|
||||||
raise
|
|
||||||
|
|
||||||
log_info = {}
|
|
||||||
log_info['err_str'] = str(e)
|
|
||||||
log_info['max_retries'] = self.max_retries
|
|
||||||
log_info.update(params)
|
|
||||||
|
|
||||||
if self.max_retries and attempt == self.max_retries:
|
|
||||||
msg = _('Unable to connect to AMQP server on '
|
|
||||||
'%(hostname)s:%(port)d after %(max_retries)d '
|
|
||||||
'tries: %(err_str)s') % log_info
|
|
||||||
LOG.error(msg)
|
|
||||||
raise rpc_common.RPCException(msg)
|
|
||||||
|
|
||||||
if attempt == 1:
|
|
||||||
sleep_time = self.interval_start or 1
|
|
||||||
elif attempt > 1:
|
|
||||||
sleep_time += self.interval_stepping
|
|
||||||
if self.interval_max:
|
|
||||||
sleep_time = min(sleep_time, self.interval_max)
|
|
||||||
|
|
||||||
log_info['sleep_time'] = sleep_time
|
|
||||||
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
|
|
||||||
'unreachable: %(err_str)s. Trying again in '
|
|
||||||
'%(sleep_time)d seconds.') % log_info)
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return method(*args, **kwargs)
|
|
||||||
except (self.connection_errors, socket.timeout, IOError) as e:
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
except Exception as e:
|
|
||||||
# NOTE(comstud): Unfortunately it's possible for amqplib
|
|
||||||
# to return an error not covered by its transport
|
|
||||||
# connection_errors in the case of a timeout waiting for
|
|
||||||
# a protocol response. (See paste link in LP888621)
|
|
||||||
# So, we check all exceptions for 'timeout' in them
|
|
||||||
# and try to reconnect in this case.
|
|
||||||
if 'timeout' not in str(e):
|
|
||||||
raise
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def get_channel(self):
|
|
||||||
"""Convenience call for bin/clear_rabbit_queues."""
|
|
||||||
return self.channel
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close/release this connection."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.connection.release()
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
"""Reset a connection so it can be used again."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.channel.close()
|
|
||||||
self.channel = self.connection.channel()
|
|
||||||
# work around 'memory' transport bug in 1.1.3
|
|
||||||
if self.memory_transport:
|
|
||||||
self.channel._new_queue('ae.undeliver')
|
|
||||||
self.consumers = []
|
|
||||||
|
|
||||||
def declare_consumer(self, consumer_cls, topic, callback):
|
|
||||||
"""Create a Consumer using the class that was passed in and
|
|
||||||
add it to our list of consumers
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
|
||||||
"%(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _declare_consumer():
|
|
||||||
consumer = consumer_cls(self.conf, self.channel, topic, callback,
|
|
||||||
six.next(self.consumer_num))
|
|
||||||
self.consumers.append(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _declare_consumer)
|
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
|
||||||
"""Return an iterator that will consume from all queues/consumers."""
|
|
||||||
|
|
||||||
info = {'do_consume': True}
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
if isinstance(exc, socket.timeout):
|
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
|
||||||
str(exc))
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
else:
|
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
|
||||||
str(exc))
|
|
||||||
info['do_consume'] = True
|
|
||||||
|
|
||||||
def _consume():
|
|
||||||
if info['do_consume']:
|
|
||||||
queues_head = self.consumers[:-1] # not fanout.
|
|
||||||
queues_tail = self.consumers[-1] # fanout
|
|
||||||
for queue in queues_head:
|
|
||||||
queue.consume(nowait=True)
|
|
||||||
queues_tail.consume(nowait=False)
|
|
||||||
info['do_consume'] = False
|
|
||||||
return self.connection.drain_events(timeout=timeout)
|
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
|
||||||
if limit and iteration >= limit:
|
|
||||||
raise StopIteration
|
|
||||||
yield self.ensure(_error_callback, _consume)
|
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
|
||||||
"""Cancel a consumer thread."""
|
|
||||||
if self.consumer_thread is not None:
|
|
||||||
self.consumer_thread.kill()
|
|
||||||
try:
|
|
||||||
self.consumer_thread.wait()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
pass
|
|
||||||
self.consumer_thread = None
|
|
||||||
|
|
||||||
def wait_on_proxy_callbacks(self):
|
|
||||||
"""Wait for all proxy callback threads to exit."""
|
|
||||||
for proxy_cb in self.proxy_callbacks:
|
|
||||||
proxy_cb.wait()
|
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
|
|
||||||
"""Send to a publisher based on the publisher class."""
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _publish():
|
|
||||||
publisher = cls(self.conf, self.channel, topic, **kwargs)
|
|
||||||
publisher.send(msg, timeout)
|
|
||||||
|
|
||||||
self.ensure(_error_callback, _publish)
|
|
||||||
|
|
||||||
def declare_direct_consumer(self, topic, callback):
|
|
||||||
"""Create a 'direct' queue.
|
|
||||||
In nova's use, this is generally a msg_id queue used for
|
|
||||||
responses for call/multicall
|
|
||||||
"""
|
|
||||||
self.declare_consumer(DirectConsumer, topic, callback)
|
|
||||||
|
|
||||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Create a 'topic' consumer."""
|
|
||||||
self.declare_consumer(functools.partial(TopicConsumer,
|
|
||||||
name=queue_name,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
ack_on_error=ack_on_error,
|
|
||||||
),
|
|
||||||
topic, callback)
|
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
|
||||||
"""Create a 'fanout' consumer."""
|
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
|
||||||
"""Send a 'direct' message."""
|
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
|
||||||
"""Send a 'topic' message."""
|
|
||||||
self.publisher_send(TopicPublisher, topic, msg, timeout)
|
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
|
||||||
"""Send a 'fanout' message."""
|
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
|
||||||
"""Send a notify message on a topic."""
|
|
||||||
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
|
|
||||||
|
|
||||||
def consume(self, limit=None):
|
|
||||||
"""Consume from all queues/consumers."""
|
|
||||||
it = self.iterconsume(limit=limit)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
six.next(it)
|
|
||||||
except StopIteration:
|
|
||||||
return
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Consumer from all queues/consumers in a greenthread."""
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consumer_thread():
|
|
||||||
try:
|
|
||||||
self.consume()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
return
|
|
||||||
if self.consumer_thread is None:
|
|
||||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
|
||||||
return self.consumer_thread
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
if fanout:
|
|
||||||
self.declare_fanout_consumer(topic, proxy_cb)
|
|
||||||
else:
|
|
||||||
self.declare_topic_consumer(topic, proxy_cb)
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
self.declare_topic_consumer(topic, proxy_cb, pool_name)
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
"""
|
|
||||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
|
||||||
conf=self.conf,
|
|
||||||
callback=callback,
|
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
|
||||||
Connection),
|
|
||||||
wait_for_consumers=not ack_on_error
|
|
||||||
)
|
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
|
||||||
self.declare_topic_consumer(
|
|
||||||
queue_name=pool_name,
|
|
||||||
topic=topic,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
callback=callback_wrapper,
|
|
||||||
ack_on_error=ack_on_error,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return rpc_amqp.create_connection(
|
|
||||||
conf, new,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
return rpc_amqp.multicall(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
return rpc_amqp.call(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
return rpc_amqp.cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
return rpc_amqp.fanout_cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
return rpc_amqp.cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
return rpc_amqp.fanout_cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
return rpc_amqp.notify(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection),
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
return rpc_amqp.cleanup(Connection.pool)
|
|
|
@ -1,821 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# Copyright 2011 - 2012, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import itertools
|
|
||||||
import time
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import jsonutils
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common.rpc import amqp as rpc_amqp
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
qpid_codec = importutils.try_import("qpid.codec010")
|
|
||||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
|
||||||
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
qpid_opts = [
|
|
||||||
cfg.StrOpt('qpid_hostname',
|
|
||||||
default='localhost',
|
|
||||||
help='Qpid broker hostname'),
|
|
||||||
cfg.IntOpt('qpid_port',
|
|
||||||
default=5672,
|
|
||||||
help='Qpid broker port'),
|
|
||||||
cfg.ListOpt('qpid_hosts',
|
|
||||||
default=['$qpid_hostname:$qpid_port'],
|
|
||||||
help='Qpid HA cluster host:port pairs'),
|
|
||||||
cfg.StrOpt('qpid_username',
|
|
||||||
default='',
|
|
||||||
help='Username for qpid connection'),
|
|
||||||
cfg.StrOpt('qpid_password',
|
|
||||||
default='',
|
|
||||||
help='Password for qpid connection',
|
|
||||||
secret=True),
|
|
||||||
cfg.StrOpt('qpid_sasl_mechanisms',
|
|
||||||
default='',
|
|
||||||
help='Space separated list of SASL mechanisms to use for auth'),
|
|
||||||
cfg.IntOpt('qpid_heartbeat',
|
|
||||||
default=60,
|
|
||||||
help='Seconds between connection keepalive heartbeats'),
|
|
||||||
cfg.StrOpt('qpid_protocol',
|
|
||||||
default='tcp',
|
|
||||||
help="Transport to use, either 'tcp' or 'ssl'"),
|
|
||||||
cfg.BoolOpt('qpid_tcp_nodelay',
|
|
||||||
default=True,
|
|
||||||
help='Disable Nagle algorithm'),
|
|
||||||
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
|
|
||||||
# this file could probably use some additional refactoring so that the
|
|
||||||
# differences between each version are split into different classes.
|
|
||||||
cfg.IntOpt('qpid_topology_version',
|
|
||||||
default=1,
|
|
||||||
help="The qpid topology version to use. Version 1 is what "
|
|
||||||
"was originally used by impl_qpid. Version 2 includes "
|
|
||||||
"some backwards-incompatible changes that allow broker "
|
|
||||||
"federation to work. Users should update to version 2 "
|
|
||||||
"when they are able to take everything down, as it "
|
|
||||||
"requires a clean break."),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(qpid_opts)
|
|
||||||
|
|
||||||
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
|
|
||||||
|
|
||||||
|
|
||||||
def raise_invalid_topology_version(conf):
|
|
||||||
msg = (_("Invalid value for qpid_topology_version: %d") %
|
|
||||||
conf.qpid_topology_version)
|
|
||||||
LOG.error(msg)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Consumer base class."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, callback, node_name, node_opts,
|
|
||||||
link_name, link_opts):
|
|
||||||
"""Declare a queue on an amqp session.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
'node_name' is the first part of the Qpid address string, before ';'
|
|
||||||
'node_opts' will be applied to the "x-declare" section of "node"
|
|
||||||
in the address string.
|
|
||||||
'link_name' goes into the "name" field of the "link" in the address
|
|
||||||
string
|
|
||||||
'link_opts' will be applied to the "x-declare" section of "link"
|
|
||||||
in the address string.
|
|
||||||
"""
|
|
||||||
self.callback = callback
|
|
||||||
self.receiver = None
|
|
||||||
self.session = None
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
addr_opts = {
|
|
||||||
"create": "always",
|
|
||||||
"node": {
|
|
||||||
"type": "topic",
|
|
||||||
"x-declare": {
|
|
||||||
"durable": True,
|
|
||||||
"auto-delete": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"link": {
|
|
||||||
"durable": True,
|
|
||||||
"x-declare": {
|
|
||||||
"durable": False,
|
|
||||||
"auto-delete": True,
|
|
||||||
"exclusive": False,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
addr_opts = {
|
|
||||||
"link": {
|
|
||||||
"x-declare": {
|
|
||||||
"auto-delete": True,
|
|
||||||
"exclusive": False,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
addr_opts["link"]["x-declare"].update(link_opts)
|
|
||||||
if link_name:
|
|
||||||
addr_opts["link"]["name"] = link_name
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
|
||||||
|
|
||||||
self.connect(session)
|
|
||||||
|
|
||||||
def connect(self, session):
|
|
||||||
"""Declare the receiver on connect."""
|
|
||||||
self._declare_receiver(session)
|
|
||||||
|
|
||||||
def reconnect(self, session):
|
|
||||||
"""Re-declare the receiver after a qpid reconnect."""
|
|
||||||
self._declare_receiver(session)
|
|
||||||
|
|
||||||
def _declare_receiver(self, session):
|
|
||||||
self.session = session
|
|
||||||
self.receiver = session.receiver(self.address)
|
|
||||||
self.receiver.capacity = 1
|
|
||||||
|
|
||||||
def _unpack_json_msg(self, msg):
|
|
||||||
"""Load the JSON data in msg if msg.content_type indicates that it
|
|
||||||
is necessary. Put the loaded data back into msg.content and
|
|
||||||
update msg.content_type appropriately.
|
|
||||||
|
|
||||||
A Qpid Message containing a dict will have a content_type of
|
|
||||||
'amqp/map', whereas one containing a string that needs to be converted
|
|
||||||
back from JSON will have a content_type of JSON_CONTENT_TYPE.
|
|
||||||
|
|
||||||
:param msg: a Qpid Message object
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
if msg.content_type == JSON_CONTENT_TYPE:
|
|
||||||
msg.content = jsonutils.loads(msg.content)
|
|
||||||
msg.content_type = 'amqp/map'
|
|
||||||
|
|
||||||
def consume(self):
|
|
||||||
"""Fetch the message and pass it to the callback object."""
|
|
||||||
message = self.receiver.fetch()
|
|
||||||
try:
|
|
||||||
self._unpack_json_msg(message)
|
|
||||||
msg = rpc_common.deserialize_msg(message.content)
|
|
||||||
self.callback(msg)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_("Failed to process message... skipping it."))
|
|
||||||
finally:
|
|
||||||
# TODO(sandy): Need support for optional ack_on_error.
|
|
||||||
self.session.acknowledge(message)
|
|
||||||
|
|
||||||
def get_receiver(self):
|
|
||||||
return self.receiver
|
|
||||||
|
|
||||||
def get_node_name(self):
|
|
||||||
return self.address.split(';')[0]
|
|
||||||
|
|
||||||
|
|
||||||
class DirectConsumer(ConsumerBase):
|
|
||||||
"""Queue/consumer class for 'direct'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, msg_id, callback):
|
|
||||||
"""Init a 'direct' queue.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'msg_id' is the msg_id to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
"""
|
|
||||||
|
|
||||||
link_opts = {
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
|
||||||
"exclusive": True,
|
|
||||||
"durable": conf.amqp_durable_queues,
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (msg_id, msg_id)
|
|
||||||
node_opts = {"type": "direct"}
|
|
||||||
link_name = msg_id
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.direct/%s" % msg_id
|
|
||||||
node_opts = {}
|
|
||||||
link_name = None
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(DirectConsumer, self).__init__(conf, session, callback,
|
|
||||||
node_name, node_opts, link_name,
|
|
||||||
link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'topic'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback, name=None,
|
|
||||||
exchange_name=None):
|
|
||||||
"""Init a 'topic' queue.
|
|
||||||
|
|
||||||
:param session: the amqp session to use
|
|
||||||
:param topic: is the topic to listen on
|
|
||||||
:paramtype topic: str
|
|
||||||
:param callback: the callback to call when messages are received
|
|
||||||
:param name: optional queue name, defaults to topic
|
|
||||||
"""
|
|
||||||
|
|
||||||
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
|
|
||||||
link_opts = {
|
|
||||||
"auto-delete": conf.amqp_auto_delete,
|
|
||||||
"durable": conf.amqp_durable_queues,
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
|
|
||||||
{}, name or topic, link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutConsumer(ConsumerBase):
|
|
||||||
"""Consumer class for 'fanout'."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, topic, callback):
|
|
||||||
"""Init a 'fanout' queue.
|
|
||||||
|
|
||||||
'session' is the amqp session to use
|
|
||||||
'topic' is the topic to listen on
|
|
||||||
'callback' is the callback to call when messages are received
|
|
||||||
"""
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
link_opts = {"exclusive": True}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s_fanout" % topic
|
|
||||||
node_opts = {"durable": False, "type": "fanout"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/fanout/%s" % topic
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(FanoutConsumer, self).__init__(conf, session, callback,
|
|
||||||
node_name, node_opts, None,
|
|
||||||
link_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(object):
|
|
||||||
"""Base Publisher class."""
|
|
||||||
|
|
||||||
def __init__(self, conf, session, node_name, node_opts=None):
|
|
||||||
"""Init the Publisher class with the exchange_name, routing_key,
|
|
||||||
and other options
|
|
||||||
"""
|
|
||||||
self.sender = None
|
|
||||||
self.session = session
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
addr_opts = {
|
|
||||||
"create": "always",
|
|
||||||
"node": {
|
|
||||||
"type": "topic",
|
|
||||||
"x-declare": {
|
|
||||||
"durable": False,
|
|
||||||
# auto-delete isn't implemented for exchanges in qpid,
|
|
||||||
# but put in here anyway
|
|
||||||
"auto-delete": True,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if node_opts:
|
|
||||||
addr_opts["node"]["x-declare"].update(node_opts)
|
|
||||||
|
|
||||||
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
self.address = node_name
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
self.reconnect(session)
|
|
||||||
|
|
||||||
def reconnect(self, session):
|
|
||||||
"""Re-establish the Sender after a reconnection."""
|
|
||||||
self.sender = session.sender(self.address)
|
|
||||||
|
|
||||||
def _pack_json_msg(self, msg):
|
|
||||||
"""Qpid cannot serialize dicts containing strings longer than 65535
|
|
||||||
characters. This function dumps the message content to a JSON
|
|
||||||
string, which Qpid is able to handle.
|
|
||||||
|
|
||||||
:param msg: May be either a Qpid Message object or a bare dict.
|
|
||||||
:returns: A Qpid Message with its content field JSON encoded.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
msg.content = jsonutils.dumps(msg.content)
|
|
||||||
except AttributeError:
|
|
||||||
# Need to have a Qpid message so we can set the content_type.
|
|
||||||
msg = qpid_messaging.Message(jsonutils.dumps(msg))
|
|
||||||
msg.content_type = JSON_CONTENT_TYPE
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def send(self, msg):
|
|
||||||
"""Send a message."""
|
|
||||||
try:
|
|
||||||
# Check if Qpid can encode the message
|
|
||||||
check_msg = msg
|
|
||||||
if not hasattr(check_msg, 'content_type'):
|
|
||||||
check_msg = qpid_messaging.Message(msg)
|
|
||||||
content_type = check_msg.content_type
|
|
||||||
enc, dec = qpid_messaging.message.get_codec(content_type)
|
|
||||||
enc(check_msg.content)
|
|
||||||
except qpid_codec.CodecException:
|
|
||||||
# This means the message couldn't be serialized as a dict.
|
|
||||||
msg = self._pack_json_msg(msg)
|
|
||||||
self.sender.send(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectPublisher(Publisher):
|
|
||||||
"""Publisher class for 'direct'."""
|
|
||||||
def __init__(self, conf, session, msg_id):
|
|
||||||
"""Init a 'direct' publisher."""
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = msg_id
|
|
||||||
node_opts = {"type": "direct"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.direct/%s" % msg_id
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(DirectPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
|
||||||
"""Publisher class for 'topic'."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'topic' publisher.
|
|
||||||
"""
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(TopicPublisher, self).__init__(conf, session, node_name)
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutPublisher(Publisher):
|
|
||||||
"""Publisher class for 'fanout'."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'fanout' publisher.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s_fanout" % topic
|
|
||||||
node_opts = {"type": "fanout"}
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/fanout/%s" % topic
|
|
||||||
node_opts = {}
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(FanoutPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class NotifyPublisher(Publisher):
|
|
||||||
"""Publisher class for notifications."""
|
|
||||||
def __init__(self, conf, session, topic):
|
|
||||||
"""Init a 'topic' publisher.
|
|
||||||
"""
|
|
||||||
exchange_name = rpc_amqp.get_control_exchange(conf)
|
|
||||||
node_opts = {"durable": True}
|
|
||||||
|
|
||||||
if conf.qpid_topology_version == 1:
|
|
||||||
node_name = "%s/%s" % (exchange_name, topic)
|
|
||||||
elif conf.qpid_topology_version == 2:
|
|
||||||
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
|
|
||||||
else:
|
|
||||||
raise_invalid_topology_version()
|
|
||||||
|
|
||||||
super(NotifyPublisher, self).__init__(conf, session, node_name,
|
|
||||||
node_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(object):
|
|
||||||
"""Connection object."""
|
|
||||||
|
|
||||||
pool = None
|
|
||||||
|
|
||||||
def __init__(self, conf, server_params=None):
|
|
||||||
if not qpid_messaging:
|
|
||||||
raise ImportError("Failed to import qpid.messaging")
|
|
||||||
|
|
||||||
self.session = None
|
|
||||||
self.consumers = {}
|
|
||||||
self.consumer_thread = None
|
|
||||||
self.proxy_callbacks = []
|
|
||||||
self.conf = conf
|
|
||||||
|
|
||||||
if server_params and 'hostname' in server_params:
|
|
||||||
# NOTE(russellb) This enables support for cast_to_server.
|
|
||||||
server_params['qpid_hosts'] = [
|
|
||||||
'%s:%d' % (server_params['hostname'],
|
|
||||||
server_params.get('port', 5672))
|
|
||||||
]
|
|
||||||
|
|
||||||
params = {
|
|
||||||
'qpid_hosts': self.conf.qpid_hosts,
|
|
||||||
'username': self.conf.qpid_username,
|
|
||||||
'password': self.conf.qpid_password,
|
|
||||||
}
|
|
||||||
params.update(server_params or {})
|
|
||||||
|
|
||||||
self.brokers = params['qpid_hosts']
|
|
||||||
self.username = params['username']
|
|
||||||
self.password = params['password']
|
|
||||||
self.connection_create(self.brokers[0])
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def connection_create(self, broker):
|
|
||||||
# Create the connection - this does not open the connection
|
|
||||||
self.connection = qpid_messaging.Connection(broker)
|
|
||||||
|
|
||||||
# Check if flags are set and if so set them for the connection
|
|
||||||
# before we call open
|
|
||||||
self.connection.username = self.username
|
|
||||||
self.connection.password = self.password
|
|
||||||
|
|
||||||
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
|
|
||||||
# Reconnection is done by self.reconnect()
|
|
||||||
self.connection.reconnect = False
|
|
||||||
self.connection.heartbeat = self.conf.qpid_heartbeat
|
|
||||||
self.connection.transport = self.conf.qpid_protocol
|
|
||||||
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
|
|
||||||
|
|
||||||
def _register_consumer(self, consumer):
|
|
||||||
self.consumers[str(consumer.get_receiver())] = consumer
|
|
||||||
|
|
||||||
def _lookup_consumer(self, receiver):
|
|
||||||
return self.consumers[str(receiver)]
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Handles reconnecting and re-establishing sessions and queues."""
|
|
||||||
attempt = 0
|
|
||||||
delay = 1
|
|
||||||
while True:
|
|
||||||
# Close the session if necessary
|
|
||||||
if self.connection.opened():
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except qpid_exceptions.ConnectionError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
broker = self.brokers[attempt % len(self.brokers)]
|
|
||||||
attempt += 1
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.connection_create(broker)
|
|
||||||
self.connection.open()
|
|
||||||
except qpid_exceptions.ConnectionError as e:
|
|
||||||
msg_dict = dict(e=e, delay=delay)
|
|
||||||
msg = _("Unable to connect to AMQP server: %(e)s. "
|
|
||||||
"Sleeping %(delay)s seconds") % msg_dict
|
|
||||||
LOG.error(msg)
|
|
||||||
time.sleep(delay)
|
|
||||||
delay = min(2 * delay, 60)
|
|
||||||
else:
|
|
||||||
LOG.info(_('Connected to AMQP server on %s'), broker)
|
|
||||||
break
|
|
||||||
|
|
||||||
self.session = self.connection.session()
|
|
||||||
|
|
||||||
if self.consumers:
|
|
||||||
consumers = self.consumers
|
|
||||||
self.consumers = {}
|
|
||||||
|
|
||||||
for consumer in six.itervalues(consumers):
|
|
||||||
consumer.reconnect(self.session)
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
LOG.debug(_("Re-established AMQP queues"))
|
|
||||||
|
|
||||||
def ensure(self, error_callback, method, *args, **kwargs):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return method(*args, **kwargs)
|
|
||||||
except (qpid_exceptions.Empty,
|
|
||||||
qpid_exceptions.ConnectionError) as e:
|
|
||||||
if error_callback:
|
|
||||||
error_callback(e)
|
|
||||||
self.reconnect()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""Close/release this connection."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
try:
|
|
||||||
self.connection.close()
|
|
||||||
except Exception:
|
|
||||||
# NOTE(dripton) Logging exceptions that happen during cleanup just
|
|
||||||
# causes confusion; there's really nothing useful we can do with
|
|
||||||
# them.
|
|
||||||
pass
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
"""Reset a connection so it can be used again."""
|
|
||||||
self.cancel_consumer_thread()
|
|
||||||
self.wait_on_proxy_callbacks()
|
|
||||||
self.session.close()
|
|
||||||
self.session = self.connection.session()
|
|
||||||
self.consumers = {}
|
|
||||||
|
|
||||||
def declare_consumer(self, consumer_cls, topic, callback):
|
|
||||||
"""Create a Consumer using the class that was passed in and
|
|
||||||
add it to our list of consumers
|
|
||||||
"""
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
|
||||||
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
|
|
||||||
"%(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _declare_consumer():
|
|
||||||
consumer = consumer_cls(self.conf, self.session, topic, callback)
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _declare_consumer)
|
|
||||||
|
|
||||||
def iterconsume(self, limit=None, timeout=None):
|
|
||||||
"""Return an iterator that will consume from all queues/consumers."""
|
|
||||||
|
|
||||||
def _error_callback(exc):
|
|
||||||
if isinstance(exc, qpid_exceptions.Empty):
|
|
||||||
LOG.debug(_('Timed out waiting for RPC response: %s') %
|
|
||||||
str(exc))
|
|
||||||
raise rpc_common.Timeout()
|
|
||||||
else:
|
|
||||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
|
||||||
str(exc))
|
|
||||||
|
|
||||||
def _consume():
|
|
||||||
nxt_receiver = self.session.next_receiver(timeout=timeout)
|
|
||||||
try:
|
|
||||||
self._lookup_consumer(nxt_receiver).consume()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_("Error processing message. Skipping it."))
|
|
||||||
|
|
||||||
for iteration in itertools.count(0):
|
|
||||||
if limit and iteration >= limit:
|
|
||||||
raise StopIteration
|
|
||||||
yield self.ensure(_error_callback, _consume)
|
|
||||||
|
|
||||||
def cancel_consumer_thread(self):
|
|
||||||
"""Cancel a consumer thread."""
|
|
||||||
if self.consumer_thread is not None:
|
|
||||||
self.consumer_thread.kill()
|
|
||||||
try:
|
|
||||||
self.consumer_thread.wait()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
pass
|
|
||||||
self.consumer_thread = None
|
|
||||||
|
|
||||||
def wait_on_proxy_callbacks(self):
|
|
||||||
"""Wait for all proxy callback threads to exit."""
|
|
||||||
for proxy_cb in self.proxy_callbacks:
|
|
||||||
proxy_cb.wait()
|
|
||||||
|
|
||||||
def publisher_send(self, cls, topic, msg):
|
|
||||||
"""Send to a publisher based on the publisher class."""
|
|
||||||
|
|
||||||
def _connect_error(exc):
|
|
||||||
log_info = {'topic': topic, 'err_str': str(exc)}
|
|
||||||
LOG.exception(_("Failed to publish message to topic "
|
|
||||||
"'%(topic)s': %(err_str)s") % log_info)
|
|
||||||
|
|
||||||
def _publisher_send():
|
|
||||||
publisher = cls(self.conf, self.session, topic)
|
|
||||||
publisher.send(msg)
|
|
||||||
|
|
||||||
return self.ensure(_connect_error, _publisher_send)
|
|
||||||
|
|
||||||
def declare_direct_consumer(self, topic, callback):
|
|
||||||
"""Create a 'direct' queue.
|
|
||||||
In nova's use, this is generally a msg_id queue used for
|
|
||||||
responses for call/multicall
|
|
||||||
"""
|
|
||||||
self.declare_consumer(DirectConsumer, topic, callback)
|
|
||||||
|
|
||||||
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
|
|
||||||
exchange_name=None):
|
|
||||||
"""Create a 'topic' consumer."""
|
|
||||||
self.declare_consumer(functools.partial(TopicConsumer,
|
|
||||||
name=queue_name,
|
|
||||||
exchange_name=exchange_name,
|
|
||||||
),
|
|
||||||
topic, callback)
|
|
||||||
|
|
||||||
def declare_fanout_consumer(self, topic, callback):
|
|
||||||
"""Create a 'fanout' consumer."""
|
|
||||||
self.declare_consumer(FanoutConsumer, topic, callback)
|
|
||||||
|
|
||||||
def direct_send(self, msg_id, msg):
|
|
||||||
"""Send a 'direct' message."""
|
|
||||||
self.publisher_send(DirectPublisher, msg_id, msg)
|
|
||||||
|
|
||||||
def topic_send(self, topic, msg, timeout=None):
|
|
||||||
"""Send a 'topic' message."""
|
|
||||||
#
|
|
||||||
# We want to create a message with attributes, e.g. a TTL. We
|
|
||||||
# don't really need to keep 'msg' in its JSON format any longer
|
|
||||||
# so let's create an actual qpid message here and get some
|
|
||||||
# value-add on the go.
|
|
||||||
#
|
|
||||||
# WARNING: Request timeout happens to be in the same units as
|
|
||||||
# qpid's TTL (seconds). If this changes in the future, then this
|
|
||||||
# will need to be altered accordingly.
|
|
||||||
#
|
|
||||||
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
|
|
||||||
self.publisher_send(TopicPublisher, topic, qpid_message)
|
|
||||||
|
|
||||||
def fanout_send(self, topic, msg):
|
|
||||||
"""Send a 'fanout' message."""
|
|
||||||
self.publisher_send(FanoutPublisher, topic, msg)
|
|
||||||
|
|
||||||
def notify_send(self, topic, msg, **kwargs):
|
|
||||||
"""Send a notify message on a topic."""
|
|
||||||
self.publisher_send(NotifyPublisher, topic, msg)
|
|
||||||
|
|
||||||
def consume(self, limit=None):
|
|
||||||
"""Consume from all queues/consumers."""
|
|
||||||
it = self.iterconsume(limit=limit)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
six.next(it)
|
|
||||||
except StopIteration:
|
|
||||||
return
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Consumer from all queues/consumers in a greenthread."""
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consumer_thread():
|
|
||||||
try:
|
|
||||||
self.consume()
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
return
|
|
||||||
if self.consumer_thread is None:
|
|
||||||
self.consumer_thread = eventlet.spawn(_consumer_thread)
|
|
||||||
return self.consumer_thread
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
"""Create a consumer that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
if fanout:
|
|
||||||
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
|
|
||||||
else:
|
|
||||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
def create_worker(self, topic, proxy, pool_name):
|
|
||||||
"""Create a worker that calls a method in a proxy object."""
|
|
||||||
proxy_cb = rpc_amqp.ProxyCallback(
|
|
||||||
self.conf, proxy,
|
|
||||||
rpc_amqp.get_connection_pool(self.conf, Connection))
|
|
||||||
self.proxy_callbacks.append(proxy_cb)
|
|
||||||
|
|
||||||
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
|
|
||||||
name=pool_name)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
def join_consumer_pool(self, callback, pool_name, topic,
|
|
||||||
exchange_name=None, ack_on_error=True):
|
|
||||||
"""Register as a member of a group of consumers for a given topic from
|
|
||||||
the specified exchange.
|
|
||||||
|
|
||||||
Exactly one member of a given pool will receive each message.
|
|
||||||
|
|
||||||
A message will be delivered to multiple pools, if more than
|
|
||||||
one is created.
|
|
||||||
"""
|
|
||||||
callback_wrapper = rpc_amqp.CallbackWrapper(
|
|
||||||
conf=self.conf,
|
|
||||||
callback=callback,
|
|
||||||
connection_pool=rpc_amqp.get_connection_pool(self.conf,
|
|
||||||
Connection),
|
|
||||||
wait_for_consumers=not ack_on_error
|
|
||||||
)
|
|
||||||
self.proxy_callbacks.append(callback_wrapper)
|
|
||||||
|
|
||||||
consumer = TopicConsumer(conf=self.conf,
|
|
||||||
session=self.session,
|
|
||||||
topic=topic,
|
|
||||||
callback=callback_wrapper,
|
|
||||||
name=pool_name,
|
|
||||||
exchange_name=exchange_name)
|
|
||||||
|
|
||||||
self._register_consumer(consumer)
|
|
||||||
return consumer
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
"""Create a connection."""
|
|
||||||
return rpc_amqp.create_connection(
|
|
||||||
conf, new,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Make a call that returns multiple times."""
|
|
||||||
return rpc_amqp.multicall(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, context, topic, msg, timeout=None):
|
|
||||||
"""Sends a message on a topic and wait for a response."""
|
|
||||||
return rpc_amqp.call(
|
|
||||||
conf, context, topic, msg, timeout,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
|
||||||
return rpc_amqp.cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
|
||||||
return rpc_amqp.fanout_cast(
|
|
||||||
conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a topic to a specific server."""
|
|
||||||
return rpc_amqp.cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast_to_server(conf, context, server_params, topic, msg):
|
|
||||||
"""Sends a message on a fanout exchange to a specific server."""
|
|
||||||
return rpc_amqp.fanout_cast_to_server(
|
|
||||||
conf, context, server_params, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection))
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Sends a notification event on a topic."""
|
|
||||||
return rpc_amqp.notify(conf, context, topic, msg,
|
|
||||||
rpc_amqp.get_connection_pool(conf, Connection),
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
return rpc_amqp.cleanup(Connection.pool)
|
|
|
@ -1,818 +0,0 @@
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import pprint
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import types
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import greenlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
from six import moves
|
|
||||||
|
|
||||||
from designate.openstack.common import excutils
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import jsonutils
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
|
|
||||||
zmq = importutils.try_import('eventlet.green.zmq')
|
|
||||||
|
|
||||||
# for convenience, are not modified.
|
|
||||||
pformat = pprint.pformat
|
|
||||||
Timeout = eventlet.timeout.Timeout
|
|
||||||
LOG = rpc_common.LOG
|
|
||||||
RemoteError = rpc_common.RemoteError
|
|
||||||
RPCException = rpc_common.RPCException
|
|
||||||
|
|
||||||
zmq_opts = [
|
|
||||||
cfg.StrOpt('rpc_zmq_bind_address', default='*',
|
|
||||||
help='ZeroMQ bind address. Should be a wildcard (*), '
|
|
||||||
'an ethernet interface, or IP. '
|
|
||||||
'The "host" option should point or resolve to this '
|
|
||||||
'address.'),
|
|
||||||
|
|
||||||
# The module.Class to use for matchmaking.
|
|
||||||
cfg.StrOpt(
|
|
||||||
'rpc_zmq_matchmaker',
|
|
||||||
default=('designate.openstack.common.rpc.'
|
|
||||||
'matchmaker.MatchMakerLocalhost'),
|
|
||||||
help='MatchMaker driver',
|
|
||||||
),
|
|
||||||
|
|
||||||
# The following port is unassigned by IANA as of 2012-05-21
|
|
||||||
cfg.IntOpt('rpc_zmq_port', default=9501,
|
|
||||||
help='ZeroMQ receiver listening port'),
|
|
||||||
|
|
||||||
cfg.IntOpt('rpc_zmq_contexts', default=1,
|
|
||||||
help='Number of ZeroMQ contexts, defaults to 1'),
|
|
||||||
|
|
||||||
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
|
|
||||||
help='Maximum number of ingress messages to locally buffer '
|
|
||||||
'per topic. Default is unlimited.'),
|
|
||||||
|
|
||||||
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
|
|
||||||
help='Directory for holding IPC sockets'),
|
|
||||||
|
|
||||||
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
|
|
||||||
help='Name of this node. Must be a valid hostname, FQDN, or '
|
|
||||||
'IP address. Must match "host" option, if running Nova.')
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(zmq_opts)
|
|
||||||
|
|
||||||
ZMQ_CTX = None # ZeroMQ Context, must be global.
|
|
||||||
matchmaker = None # memorized matchmaker object
|
|
||||||
|
|
||||||
|
|
||||||
def _serialize(data):
|
|
||||||
"""Serialization wrapper.
|
|
||||||
|
|
||||||
We prefer using JSON, but it cannot encode all types.
|
|
||||||
Error if a developer passes us bad data.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return jsonutils.dumps(data, ensure_ascii=True)
|
|
||||||
except TypeError:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_("JSON serialization failed."))
|
|
||||||
|
|
||||||
|
|
||||||
def _deserialize(data):
|
|
||||||
"""Deserialization wrapper."""
|
|
||||||
LOG.debug(_("Deserializing: %s"), data)
|
|
||||||
return jsonutils.loads(data)
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqSocket(object):
|
|
||||||
"""A tiny wrapper around ZeroMQ.
|
|
||||||
|
|
||||||
Simplifies the send/recv protocol and connection management.
|
|
||||||
Can be used as a Context (supports the 'with' statement).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
|
|
||||||
self.sock = _get_ctxt().socket(zmq_type)
|
|
||||||
self.addr = addr
|
|
||||||
self.type = zmq_type
|
|
||||||
self.subscriptions = []
|
|
||||||
|
|
||||||
# Support failures on sending/receiving on wrong socket type.
|
|
||||||
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
|
|
||||||
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
|
|
||||||
self.can_sub = zmq_type in (zmq.SUB, )
|
|
||||||
|
|
||||||
# Support list, str, & None for subscribe arg (cast to list)
|
|
||||||
do_sub = {
|
|
||||||
list: subscribe,
|
|
||||||
str: [subscribe],
|
|
||||||
type(None): []
|
|
||||||
}[type(subscribe)]
|
|
||||||
|
|
||||||
for f in do_sub:
|
|
||||||
self.subscribe(f)
|
|
||||||
|
|
||||||
str_data = {'addr': addr, 'type': self.socket_s(),
|
|
||||||
'subscribe': subscribe, 'bind': bind}
|
|
||||||
|
|
||||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
|
||||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
|
||||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if bind:
|
|
||||||
self.sock.bind(addr)
|
|
||||||
else:
|
|
||||||
self.sock.connect(addr)
|
|
||||||
except Exception:
|
|
||||||
raise RPCException(_("Could not open socket."))
|
|
||||||
|
|
||||||
def socket_s(self):
|
|
||||||
"""Get socket type as string."""
|
|
||||||
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
|
|
||||||
'DEALER')
|
|
||||||
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
|
|
||||||
|
|
||||||
def subscribe(self, msg_filter):
|
|
||||||
"""Subscribe."""
|
|
||||||
if not self.can_sub:
|
|
||||||
raise RPCException("Cannot subscribe on this socket.")
|
|
||||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
|
||||||
except Exception:
|
|
||||||
return
|
|
||||||
|
|
||||||
self.subscriptions.append(msg_filter)
|
|
||||||
|
|
||||||
def unsubscribe(self, msg_filter):
|
|
||||||
"""Unsubscribe."""
|
|
||||||
if msg_filter not in self.subscriptions:
|
|
||||||
return
|
|
||||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
|
|
||||||
self.subscriptions.remove(msg_filter)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self.sock is None or self.sock.closed:
|
|
||||||
return
|
|
||||||
|
|
||||||
# We must unsubscribe, or we'll leak descriptors.
|
|
||||||
if self.subscriptions:
|
|
||||||
for f in self.subscriptions:
|
|
||||||
try:
|
|
||||||
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self.subscriptions = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Default is to linger
|
|
||||||
self.sock.close()
|
|
||||||
except Exception:
|
|
||||||
# While this is a bad thing to happen,
|
|
||||||
# it would be much worse if some of the code calling this
|
|
||||||
# were to fail. For now, lets log, and later evaluate
|
|
||||||
# if we can safely raise here.
|
|
||||||
LOG.error(_("ZeroMQ socket could not be closed."))
|
|
||||||
self.sock = None
|
|
||||||
|
|
||||||
def recv(self, **kwargs):
|
|
||||||
if not self.can_recv:
|
|
||||||
raise RPCException(_("You cannot recv on this socket."))
|
|
||||||
return self.sock.recv_multipart(**kwargs)
|
|
||||||
|
|
||||||
def send(self, data, **kwargs):
|
|
||||||
if not self.can_send:
|
|
||||||
raise RPCException(_("You cannot send on this socket."))
|
|
||||||
self.sock.send_multipart(data, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqClient(object):
|
|
||||||
"""Client for ZMQ sockets."""
|
|
||||||
|
|
||||||
def __init__(self, addr):
|
|
||||||
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
|
|
||||||
|
|
||||||
def cast(self, msg_id, topic, data, envelope):
|
|
||||||
msg_id = msg_id or 0
|
|
||||||
|
|
||||||
if not envelope:
|
|
||||||
self.outq.send(map(bytes,
|
|
||||||
(msg_id, topic, 'cast', _serialize(data))))
|
|
||||||
return
|
|
||||||
|
|
||||||
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
|
|
||||||
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
|
|
||||||
self.outq.send(map(bytes,
|
|
||||||
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.outq.close()
|
|
||||||
|
|
||||||
|
|
||||||
class RpcContext(rpc_common.CommonRpcContext):
|
|
||||||
"""Context that supports replying to a rpc.call."""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.replies = []
|
|
||||||
super(RpcContext, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def deepcopy(self):
|
|
||||||
values = self.to_dict()
|
|
||||||
values['replies'] = self.replies
|
|
||||||
return self.__class__(**values)
|
|
||||||
|
|
||||||
def reply(self, reply=None, failure=None, ending=False):
|
|
||||||
if ending:
|
|
||||||
return
|
|
||||||
self.replies.append(reply)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def marshal(self, ctx):
|
|
||||||
ctx_data = ctx.to_dict()
|
|
||||||
return _serialize(ctx_data)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def unmarshal(self, data):
|
|
||||||
return RpcContext.from_dict(_deserialize(data))
|
|
||||||
|
|
||||||
|
|
||||||
class InternalContext(object):
|
|
||||||
"""Used by ConsumerBase as a private context for - methods."""
|
|
||||||
|
|
||||||
def __init__(self, proxy):
|
|
||||||
self.proxy = proxy
|
|
||||||
self.msg_waiter = None
|
|
||||||
|
|
||||||
def _get_response(self, ctx, proxy, topic, data):
|
|
||||||
"""Process a curried message and cast the result to topic."""
|
|
||||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
|
||||||
data.setdefault('version', None)
|
|
||||||
data.setdefault('args', {})
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = proxy.dispatch(
|
|
||||||
ctx, data['version'], data['method'],
|
|
||||||
data.get('namespace'), **data['args'])
|
|
||||||
return ConsumerBase.normalize_reply(result, ctx.replies)
|
|
||||||
except greenlet.GreenletExit:
|
|
||||||
# ignore these since they are just from shutdowns
|
|
||||||
pass
|
|
||||||
except rpc_common.ClientException as e:
|
|
||||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
|
||||||
e._exc_info[1])
|
|
||||||
return {'exc':
|
|
||||||
rpc_common.serialize_remote_exception(e._exc_info,
|
|
||||||
log_failure=False)}
|
|
||||||
except Exception:
|
|
||||||
LOG.error(_("Exception during message handling"))
|
|
||||||
return {'exc':
|
|
||||||
rpc_common.serialize_remote_exception(sys.exc_info())}
|
|
||||||
|
|
||||||
def reply(self, ctx, proxy,
|
|
||||||
msg_id=None, context=None, topic=None, msg=None):
|
|
||||||
"""Reply to a casted call."""
|
|
||||||
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
|
|
||||||
# this may be able to be removed earlier than
|
|
||||||
# 'I' if ConsumerBase.process were refactored.
|
|
||||||
if type(msg) is list:
|
|
||||||
payload = msg[-1]
|
|
||||||
else:
|
|
||||||
payload = msg
|
|
||||||
|
|
||||||
response = ConsumerBase.normalize_reply(
|
|
||||||
self._get_response(ctx, proxy, topic, payload),
|
|
||||||
ctx.replies)
|
|
||||||
|
|
||||||
LOG.debug(_("Sending reply"))
|
|
||||||
_multi_send(_cast, ctx, topic, {
|
|
||||||
'method': '-process_reply',
|
|
||||||
'args': {
|
|
||||||
'msg_id': msg_id, # Include for Folsom compat.
|
|
||||||
'response': response
|
|
||||||
}
|
|
||||||
}, _msg_id=msg_id)
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerBase(object):
|
|
||||||
"""Base Consumer."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.private_ctx = InternalContext(None)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def normalize_reply(self, result, replies):
|
|
||||||
#TODO(ewindisch): re-evaluate and document this method.
|
|
||||||
if isinstance(result, types.GeneratorType):
|
|
||||||
return list(result)
|
|
||||||
elif replies:
|
|
||||||
return replies
|
|
||||||
else:
|
|
||||||
return [result]
|
|
||||||
|
|
||||||
def process(self, proxy, ctx, data):
|
|
||||||
data.setdefault('version', None)
|
|
||||||
data.setdefault('args', {})
|
|
||||||
|
|
||||||
# Method starting with - are
|
|
||||||
# processed internally. (non-valid method name)
|
|
||||||
method = data.get('method')
|
|
||||||
if not method:
|
|
||||||
LOG.error(_("RPC message did not include method."))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Internal method
|
|
||||||
# uses internal context for safety.
|
|
||||||
if method == '-reply':
|
|
||||||
self.private_ctx.reply(ctx, proxy, **data['args'])
|
|
||||||
return
|
|
||||||
|
|
||||||
proxy.dispatch(ctx, data['version'],
|
|
||||||
data['method'], data.get('namespace'), **data['args'])
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqBaseReactor(ConsumerBase):
|
|
||||||
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
|
|
||||||
|
|
||||||
Used for RoundRobin requests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqBaseReactor, self).__init__()
|
|
||||||
|
|
||||||
self.proxies = {}
|
|
||||||
self.threads = []
|
|
||||||
self.sockets = []
|
|
||||||
self.subscribe = {}
|
|
||||||
|
|
||||||
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
|
|
||||||
|
|
||||||
def register(self, proxy, in_addr, zmq_type_in,
|
|
||||||
in_bind=True, subscribe=None):
|
|
||||||
|
|
||||||
LOG.info(_("Registering reactor"))
|
|
||||||
|
|
||||||
if zmq_type_in not in (zmq.PULL, zmq.SUB):
|
|
||||||
raise RPCException("Bad input socktype")
|
|
||||||
|
|
||||||
# Items push in.
|
|
||||||
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
|
|
||||||
subscribe=subscribe)
|
|
||||||
|
|
||||||
self.proxies[inq] = proxy
|
|
||||||
self.sockets.append(inq)
|
|
||||||
|
|
||||||
LOG.info(_("In reactor registered"))
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
@excutils.forever_retry_uncaught_exceptions
|
|
||||||
def _consume(sock):
|
|
||||||
LOG.info(_("Consuming socket"))
|
|
||||||
while True:
|
|
||||||
self.consume(sock)
|
|
||||||
|
|
||||||
for k in self.proxies.keys():
|
|
||||||
self.threads.append(
|
|
||||||
self.pool.spawn(_consume, k)
|
|
||||||
)
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
for t in self.threads:
|
|
||||||
t.wait()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
for s in self.sockets:
|
|
||||||
s.close()
|
|
||||||
|
|
||||||
for t in self.threads:
|
|
||||||
t.kill()
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqProxy(ZmqBaseReactor):
|
|
||||||
"""A consumer class implementing a topic-based proxy.
|
|
||||||
|
|
||||||
Forwards to IPC sockets.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqProxy, self).__init__(conf)
|
|
||||||
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
|
|
||||||
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
|
|
||||||
|
|
||||||
self.topic_proxy = {}
|
|
||||||
|
|
||||||
def consume(self, sock):
|
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
|
||||||
|
|
||||||
data = sock.recv(copy=False)
|
|
||||||
topic = data[1].bytes
|
|
||||||
|
|
||||||
if topic.startswith('fanout~'):
|
|
||||||
sock_type = zmq.PUB
|
|
||||||
topic = topic.split('.', 1)[0]
|
|
||||||
elif topic.startswith('zmq_replies'):
|
|
||||||
sock_type = zmq.PUB
|
|
||||||
else:
|
|
||||||
sock_type = zmq.PUSH
|
|
||||||
|
|
||||||
if topic not in self.topic_proxy:
|
|
||||||
def publisher(waiter):
|
|
||||||
LOG.info(_("Creating proxy for topic: %s"), topic)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# The topic is received over the network,
|
|
||||||
# don't trust this input.
|
|
||||||
if self.badchars.search(topic) is not None:
|
|
||||||
emsg = _("Topic contained dangerous characters.")
|
|
||||||
LOG.warn(emsg)
|
|
||||||
raise RPCException(emsg)
|
|
||||||
|
|
||||||
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
|
|
||||||
(ipc_dir, topic),
|
|
||||||
sock_type, bind=True)
|
|
||||||
except RPCException:
|
|
||||||
waiter.send_exception(*sys.exc_info())
|
|
||||||
return
|
|
||||||
|
|
||||||
self.topic_proxy[topic] = eventlet.queue.LightQueue(
|
|
||||||
CONF.rpc_zmq_topic_backlog)
|
|
||||||
self.sockets.append(out_sock)
|
|
||||||
|
|
||||||
# It takes some time for a pub socket to open,
|
|
||||||
# before we can have any faith in doing a send() to it.
|
|
||||||
if sock_type == zmq.PUB:
|
|
||||||
eventlet.sleep(.5)
|
|
||||||
|
|
||||||
waiter.send(True)
|
|
||||||
|
|
||||||
while(True):
|
|
||||||
data = self.topic_proxy[topic].get()
|
|
||||||
out_sock.send(data, copy=False)
|
|
||||||
|
|
||||||
wait_sock_creation = eventlet.event.Event()
|
|
||||||
eventlet.spawn(publisher, wait_sock_creation)
|
|
||||||
|
|
||||||
try:
|
|
||||||
wait_sock_creation.wait()
|
|
||||||
except RPCException:
|
|
||||||
LOG.error(_("Topic socket file creation failed."))
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.topic_proxy[topic].put_nowait(data)
|
|
||||||
except eventlet.queue.Full:
|
|
||||||
LOG.error(_("Local per-topic backlog buffer full for topic "
|
|
||||||
"%(topic)s. Dropping message.") % {'topic': topic})
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
"""Runs the ZmqProxy service."""
|
|
||||||
ipc_dir = CONF.rpc_zmq_ipc_dir
|
|
||||||
consume_in = "tcp://%s:%s" % \
|
|
||||||
(CONF.rpc_zmq_bind_address,
|
|
||||||
CONF.rpc_zmq_port)
|
|
||||||
consumption_proxy = InternalContext(None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.makedirs(ipc_dir)
|
|
||||||
except os.error:
|
|
||||||
if not os.path.isdir(ipc_dir):
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_("Required IPC directory does not exist at"
|
|
||||||
" %s") % (ipc_dir, ))
|
|
||||||
try:
|
|
||||||
self.register(consumption_proxy,
|
|
||||||
consume_in,
|
|
||||||
zmq.PULL)
|
|
||||||
except zmq.ZMQError:
|
|
||||||
if os.access(ipc_dir, os.X_OK):
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_("Permission denied to IPC directory at"
|
|
||||||
" %s") % (ipc_dir, ))
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_("Could not create ZeroMQ receiver daemon. "
|
|
||||||
"Socket may already be in use."))
|
|
||||||
|
|
||||||
super(ZmqProxy, self).consume_in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def unflatten_envelope(packenv):
|
|
||||||
"""Unflattens the RPC envelope.
|
|
||||||
|
|
||||||
Takes a list and returns a dictionary.
|
|
||||||
i.e. [1,2,3,4] => {1: 2, 3: 4}
|
|
||||||
"""
|
|
||||||
i = iter(packenv)
|
|
||||||
h = {}
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
k = six.next(i)
|
|
||||||
h[k] = six.next(i)
|
|
||||||
except StopIteration:
|
|
||||||
return h
|
|
||||||
|
|
||||||
|
|
||||||
class ZmqReactor(ZmqBaseReactor):
|
|
||||||
"""A consumer class implementing a consumer for messages.
|
|
||||||
|
|
||||||
Can also be used as a 1:1 proxy
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(ZmqReactor, self).__init__(conf)
|
|
||||||
|
|
||||||
def consume(self, sock):
|
|
||||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
|
||||||
data = sock.recv()
|
|
||||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
|
||||||
|
|
||||||
proxy = self.proxies[sock]
|
|
||||||
|
|
||||||
if data[2] == 'cast': # Legacy protocol
|
|
||||||
packenv = data[3]
|
|
||||||
|
|
||||||
ctx, msg = _deserialize(packenv)
|
|
||||||
request = rpc_common.deserialize_msg(msg)
|
|
||||||
ctx = RpcContext.unmarshal(ctx)
|
|
||||||
elif data[2] == 'impl_zmq_v2':
|
|
||||||
packenv = data[4:]
|
|
||||||
|
|
||||||
msg = unflatten_envelope(packenv)
|
|
||||||
request = rpc_common.deserialize_msg(msg)
|
|
||||||
|
|
||||||
# Unmarshal only after verifying the message.
|
|
||||||
ctx = RpcContext.unmarshal(data[3])
|
|
||||||
else:
|
|
||||||
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
|
|
||||||
return
|
|
||||||
|
|
||||||
self.pool.spawn_n(self.process, proxy, ctx, request)
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(rpc_common.Connection):
|
|
||||||
"""Manages connections and threads."""
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
self.topics = []
|
|
||||||
self.reactor = ZmqReactor(conf)
|
|
||||||
|
|
||||||
def create_consumer(self, topic, proxy, fanout=False):
|
|
||||||
# Register with matchmaker.
|
|
||||||
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
|
|
||||||
|
|
||||||
# Subscription scenarios
|
|
||||||
if fanout:
|
|
||||||
sock_type = zmq.SUB
|
|
||||||
subscribe = ('', fanout)[type(fanout) == str]
|
|
||||||
topic = 'fanout~' + topic.split('.', 1)[0]
|
|
||||||
else:
|
|
||||||
sock_type = zmq.PULL
|
|
||||||
subscribe = None
|
|
||||||
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
|
|
||||||
|
|
||||||
if topic in self.topics:
|
|
||||||
LOG.info(_("Skipping topic registration. Already registered."))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Receive messages from (local) proxy
|
|
||||||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
|
||||||
(CONF.rpc_zmq_ipc_dir, topic)
|
|
||||||
|
|
||||||
LOG.debug(_("Consumer is a zmq.%s"),
|
|
||||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
|
||||||
|
|
||||||
self.reactor.register(proxy, inaddr, sock_type,
|
|
||||||
subscribe=subscribe, in_bind=False)
|
|
||||||
self.topics.append(topic)
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
_get_matchmaker().stop_heartbeat()
|
|
||||||
for topic in self.topics:
|
|
||||||
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
|
|
||||||
|
|
||||||
self.reactor.close()
|
|
||||||
self.topics = []
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
self.reactor.wait()
|
|
||||||
|
|
||||||
def consume_in_thread(self):
|
|
||||||
_get_matchmaker().start_heartbeat()
|
|
||||||
self.reactor.consume_in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
|
|
||||||
_msg_id=None):
|
|
||||||
timeout_cast = timeout or CONF.rpc_cast_timeout
|
|
||||||
payload = [RpcContext.marshal(context), msg]
|
|
||||||
|
|
||||||
with Timeout(timeout_cast, exception=rpc_common.Timeout):
|
|
||||||
try:
|
|
||||||
conn = ZmqClient(addr)
|
|
||||||
|
|
||||||
# assumes cast can't return an exception
|
|
||||||
conn.cast(_msg_id, topic, payload, envelope)
|
|
||||||
except zmq.ZMQError:
|
|
||||||
raise RPCException("Cast failed. ZMQ Socket Exception")
|
|
||||||
finally:
|
|
||||||
if 'conn' in vars():
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
def _call(addr, context, topic, msg, timeout=None,
|
|
||||||
envelope=False):
|
|
||||||
# timeout_response is how long we wait for a response
|
|
||||||
timeout = timeout or CONF.rpc_response_timeout
|
|
||||||
|
|
||||||
# The msg_id is used to track replies.
|
|
||||||
msg_id = uuid.uuid4().hex
|
|
||||||
|
|
||||||
# Replies always come into the reply service.
|
|
||||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
|
||||||
|
|
||||||
LOG.debug(_("Creating payload"))
|
|
||||||
# Curry the original request into a reply method.
|
|
||||||
mcontext = RpcContext.marshal(context)
|
|
||||||
payload = {
|
|
||||||
'method': '-reply',
|
|
||||||
'args': {
|
|
||||||
'msg_id': msg_id,
|
|
||||||
'topic': reply_topic,
|
|
||||||
# TODO(ewindisch): safe to remove mcontext in I.
|
|
||||||
'msg': [mcontext, msg]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
|
||||||
|
|
||||||
# Messages arriving async.
|
|
||||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
|
||||||
with Timeout(timeout, exception=rpc_common.Timeout):
|
|
||||||
try:
|
|
||||||
msg_waiter = ZmqSocket(
|
|
||||||
"ipc://%s/zmq_topic_zmq_replies.%s" %
|
|
||||||
(CONF.rpc_zmq_ipc_dir,
|
|
||||||
CONF.rpc_zmq_host),
|
|
||||||
zmq.SUB, subscribe=msg_id, bind=False
|
|
||||||
)
|
|
||||||
|
|
||||||
LOG.debug(_("Sending cast"))
|
|
||||||
_cast(addr, context, topic, payload, envelope)
|
|
||||||
|
|
||||||
LOG.debug(_("Cast sent; Waiting reply"))
|
|
||||||
# Blocks until receives reply
|
|
||||||
msg = msg_waiter.recv()
|
|
||||||
LOG.debug(_("Received message: %s"), msg)
|
|
||||||
LOG.debug(_("Unpacking response"))
|
|
||||||
|
|
||||||
if msg[2] == 'cast': # Legacy version
|
|
||||||
raw_msg = _deserialize(msg[-1])[-1]
|
|
||||||
elif msg[2] == 'impl_zmq_v2':
|
|
||||||
rpc_envelope = unflatten_envelope(msg[4:])
|
|
||||||
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
|
|
||||||
else:
|
|
||||||
raise rpc_common.UnsupportedRpcEnvelopeVersion(
|
|
||||||
_("Unsupported or unknown ZMQ envelope returned."))
|
|
||||||
|
|
||||||
responses = raw_msg['args']['response']
|
|
||||||
# ZMQError trumps the Timeout error.
|
|
||||||
except zmq.ZMQError:
|
|
||||||
raise RPCException("ZMQ Socket Error")
|
|
||||||
except (IndexError, KeyError):
|
|
||||||
raise RPCException(_("RPC Message Invalid."))
|
|
||||||
finally:
|
|
||||||
if 'msg_waiter' in vars():
|
|
||||||
msg_waiter.close()
|
|
||||||
|
|
||||||
# It seems we don't need to do all of the following,
|
|
||||||
# but perhaps it would be useful for multicall?
|
|
||||||
# One effect of this is that we're checking all
|
|
||||||
# responses for Exceptions.
|
|
||||||
for resp in responses:
|
|
||||||
if isinstance(resp, types.DictType) and 'exc' in resp:
|
|
||||||
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
|
|
||||||
|
|
||||||
return responses[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def _multi_send(method, context, topic, msg, timeout=None,
|
|
||||||
envelope=False, _msg_id=None):
|
|
||||||
"""Wraps the sending of messages.
|
|
||||||
|
|
||||||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
|
||||||
"""
|
|
||||||
conf = CONF
|
|
||||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
|
||||||
|
|
||||||
queues = _get_matchmaker().queues(topic)
|
|
||||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
|
||||||
|
|
||||||
# Don't stack if we have no matchmaker results
|
|
||||||
if not queues:
|
|
||||||
LOG.warn(_("No matchmaker results. Not casting."))
|
|
||||||
# While not strictly a timeout, callers know how to handle
|
|
||||||
# this exception and a timeout isn't too big a lie.
|
|
||||||
raise rpc_common.Timeout(_("No match from matchmaker."))
|
|
||||||
|
|
||||||
# This supports brokerless fanout (addresses > 1)
|
|
||||||
for queue in queues:
|
|
||||||
(_topic, ip_addr) = queue
|
|
||||||
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
|
|
||||||
|
|
||||||
if method.__name__ == '_cast':
|
|
||||||
eventlet.spawn_n(method, _addr, context,
|
|
||||||
_topic, msg, timeout, envelope,
|
|
||||||
_msg_id)
|
|
||||||
return
|
|
||||||
return method(_addr, context, _topic, msg, timeout,
|
|
||||||
envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def create_connection(conf, new=True):
|
|
||||||
return Connection(conf)
|
|
||||||
|
|
||||||
|
|
||||||
def multicall(conf, *args, **kwargs):
|
|
||||||
"""Multiple calls."""
|
|
||||||
return _multi_send(_call, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def call(conf, *args, **kwargs):
|
|
||||||
"""Send a message, expect a response."""
|
|
||||||
data = _multi_send(_call, *args, **kwargs)
|
|
||||||
return data[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def cast(conf, *args, **kwargs):
|
|
||||||
"""Send a message expecting no reply."""
|
|
||||||
_multi_send(_cast, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(conf, context, topic, msg, **kwargs):
|
|
||||||
"""Send a message to all listening and expect no reply."""
|
|
||||||
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
|
|
||||||
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
|
|
||||||
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def notify(conf, context, topic, msg, envelope):
|
|
||||||
"""Send notification event.
|
|
||||||
|
|
||||||
Notifications are sent to topic-priority.
|
|
||||||
This differs from the AMQP drivers which send to topic.priority.
|
|
||||||
"""
|
|
||||||
# NOTE(ewindisch): dot-priority in rpc notifier does not
|
|
||||||
# work with our assumptions.
|
|
||||||
topic = topic.replace('.', '-')
|
|
||||||
cast(conf, context, topic, msg, envelope=envelope)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
"""Clean up resources in use by implementation."""
|
|
||||||
global ZMQ_CTX
|
|
||||||
if ZMQ_CTX:
|
|
||||||
ZMQ_CTX.term()
|
|
||||||
ZMQ_CTX = None
|
|
||||||
|
|
||||||
global matchmaker
|
|
||||||
matchmaker = None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_ctxt():
|
|
||||||
if not zmq:
|
|
||||||
raise ImportError("Failed to import eventlet.green.zmq")
|
|
||||||
|
|
||||||
global ZMQ_CTX
|
|
||||||
if not ZMQ_CTX:
|
|
||||||
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
|
|
||||||
return ZMQ_CTX
|
|
||||||
|
|
||||||
|
|
||||||
def _get_matchmaker(*args, **kwargs):
|
|
||||||
global matchmaker
|
|
||||||
if not matchmaker:
|
|
||||||
mm = CONF.rpc_zmq_matchmaker
|
|
||||||
if mm.endswith('matchmaker.MatchMakerRing'):
|
|
||||||
mm.replace('matchmaker', 'matchmaker_ring')
|
|
||||||
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
|
|
||||||
' %(new)s instead') % dict(
|
|
||||||
orig=CONF.rpc_zmq_matchmaker, new=mm))
|
|
||||||
matchmaker = importutils.import_object(mm, *args, **kwargs)
|
|
||||||
return matchmaker
|
|
|
@ -1,323 +0,0 @@
|
||||||
# Copyright 2011 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_opts = [
|
|
||||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
|
||||||
default=300,
|
|
||||||
help='Heartbeat frequency'),
|
|
||||||
cfg.IntOpt('matchmaker_heartbeat_ttl',
|
|
||||||
default=600,
|
|
||||||
help='Heartbeat time-to-live.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(matchmaker_opts)
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
contextmanager = contextlib.contextmanager
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerException(Exception):
|
|
||||||
"""Signified a match could not be found."""
|
|
||||||
message = _("Match not found by MatchMaker.")
|
|
||||||
|
|
||||||
|
|
||||||
class Exchange(object):
|
|
||||||
"""Implements lookups.
|
|
||||||
|
|
||||||
Subclass this to support hashtables, dns, etc.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class Binding(object):
|
|
||||||
"""A binding on which to perform a lookup."""
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test(self, key):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerBase(object):
|
|
||||||
"""Match Maker Base Class.
|
|
||||||
|
|
||||||
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
|
|
||||||
MatchMaker.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
|
|
||||||
self.bindings = []
|
|
||||||
|
|
||||||
self.no_heartbeat_msg = _('Matchmaker does not implement '
|
|
||||||
'registration or heartbeat.')
|
|
||||||
|
|
||||||
def register(self, key, host):
|
|
||||||
"""Register a host on a backend.
|
|
||||||
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
"""Acknowledge that a key.host is alive.
|
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
|
||||||
publicly to acknowledge a system is alive (i.e. rpc message
|
|
||||||
successfully sent to host)
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
|
||||||
"""Checks if a host is alive."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def expire(self, topic, host):
|
|
||||||
"""Explicitly expire a host's registration."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def send_heartbeats(self):
|
|
||||||
"""Send all heartbeats.
|
|
||||||
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
|
||||||
which loops this method.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def unregister(self, key, host):
|
|
||||||
"""Unregister a topic."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def start_heartbeat(self):
|
|
||||||
"""Spawn heartbeat greenthread."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
|
||||||
"""Destroys the heartbeat greenthread."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def add_binding(self, binding, rule, last=True):
|
|
||||||
self.bindings.append((binding, rule, False, last))
|
|
||||||
|
|
||||||
#NOTE(ewindisch): kept the following method in case we implement the
|
|
||||||
# underlying support.
|
|
||||||
#def add_negate_binding(self, binding, rule, last=True):
|
|
||||||
# self.bindings.append((binding, rule, True, last))
|
|
||||||
|
|
||||||
def queues(self, key):
|
|
||||||
workers = []
|
|
||||||
|
|
||||||
# bit is for negate bindings - if we choose to implement it.
|
|
||||||
# last stops processing rules if this matches.
|
|
||||||
for (binding, exchange, bit, last) in self.bindings:
|
|
||||||
if binding.test(key):
|
|
||||||
workers.extend(exchange.run(key))
|
|
||||||
|
|
||||||
# Support last.
|
|
||||||
if last:
|
|
||||||
return workers
|
|
||||||
return workers
|
|
||||||
|
|
||||||
|
|
||||||
class HeartbeatMatchMakerBase(MatchMakerBase):
|
|
||||||
"""Base for a heart-beat capable MatchMaker.
|
|
||||||
|
|
||||||
Provides common methods for registering, unregistering, and maintaining
|
|
||||||
heartbeats.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.hosts = set()
|
|
||||||
self._heart = None
|
|
||||||
self.host_topic = {}
|
|
||||||
|
|
||||||
super(HeartbeatMatchMakerBase, self).__init__()
|
|
||||||
|
|
||||||
def send_heartbeats(self):
|
|
||||||
"""Send all heartbeats.
|
|
||||||
|
|
||||||
Use start_heartbeat to spawn a heartbeat greenthread,
|
|
||||||
which loops this method.
|
|
||||||
"""
|
|
||||||
for key, host in self.host_topic:
|
|
||||||
self.ack_alive(key, host)
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
"""Acknowledge that a host.topic is alive.
|
|
||||||
|
|
||||||
Used internally for updating heartbeats, but may also be used
|
|
||||||
publicly to acknowledge a system is alive (i.e. rpc message
|
|
||||||
successfully sent to host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement ack_alive")
|
|
||||||
|
|
||||||
def backend_register(self, key, host):
|
|
||||||
"""Implements registration logic.
|
|
||||||
|
|
||||||
Called by register(self,key,host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement backend_register")
|
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
|
||||||
"""Implements de-registration logic.
|
|
||||||
|
|
||||||
Called by unregister(self,key,host)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Must implement backend_unregister")
|
|
||||||
|
|
||||||
def register(self, key, host):
|
|
||||||
"""Register a host on a backend.
|
|
||||||
|
|
||||||
Heartbeats, if applicable, may keepalive registration.
|
|
||||||
"""
|
|
||||||
self.hosts.add(host)
|
|
||||||
self.host_topic[(key, host)] = host
|
|
||||||
key_host = '.'.join((key, host))
|
|
||||||
|
|
||||||
self.backend_register(key, key_host)
|
|
||||||
|
|
||||||
self.ack_alive(key, host)
|
|
||||||
|
|
||||||
def unregister(self, key, host):
|
|
||||||
"""Unregister a topic."""
|
|
||||||
if (key, host) in self.host_topic:
|
|
||||||
del self.host_topic[(key, host)]
|
|
||||||
|
|
||||||
self.hosts.discard(host)
|
|
||||||
self.backend_unregister(key, '.'.join((key, host)))
|
|
||||||
|
|
||||||
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
|
|
||||||
{'key': key, 'host': host})
|
|
||||||
|
|
||||||
def start_heartbeat(self):
|
|
||||||
"""Implementation of MatchMakerBase.start_heartbeat.
|
|
||||||
|
|
||||||
Launches greenthread looping send_heartbeats(),
|
|
||||||
yielding for CONF.matchmaker_heartbeat_freq seconds
|
|
||||||
between iterations.
|
|
||||||
"""
|
|
||||||
if not self.hosts:
|
|
||||||
raise MatchMakerException(
|
|
||||||
_("Register before starting heartbeat."))
|
|
||||||
|
|
||||||
def do_heartbeat():
|
|
||||||
while True:
|
|
||||||
self.send_heartbeats()
|
|
||||||
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
|
|
||||||
|
|
||||||
self._heart = eventlet.spawn(do_heartbeat)
|
|
||||||
|
|
||||||
def stop_heartbeat(self):
|
|
||||||
"""Destroys the heartbeat greenthread."""
|
|
||||||
if self._heart:
|
|
||||||
self._heart.kill()
|
|
||||||
|
|
||||||
|
|
||||||
class DirectBinding(Binding):
|
|
||||||
"""Specifies a host in the key via a '.' character.
|
|
||||||
|
|
||||||
Although dots are used in the key, the behavior here is
|
|
||||||
that it maps directly to a host, thus direct.
|
|
||||||
"""
|
|
||||||
def test(self, key):
|
|
||||||
return '.' in key
|
|
||||||
|
|
||||||
|
|
||||||
class TopicBinding(Binding):
|
|
||||||
"""Where a 'bare' key without dots.
|
|
||||||
|
|
||||||
AMQP generally considers topic exchanges to be those *with* dots,
|
|
||||||
but we deviate here in terminology as the behavior here matches
|
|
||||||
that of a topic exchange (whereas where there are dots, behavior
|
|
||||||
matches that of a direct exchange.
|
|
||||||
"""
|
|
||||||
def test(self, key):
|
|
||||||
return '.' not in key
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutBinding(Binding):
|
|
||||||
"""Match on fanout keys, where key starts with 'fanout.' string."""
|
|
||||||
def test(self, key):
|
|
||||||
return key.startswith('fanout~')
|
|
||||||
|
|
||||||
|
|
||||||
class StubExchange(Exchange):
|
|
||||||
"""Exchange that does nothing."""
|
|
||||||
def run(self, key):
|
|
||||||
return [(key, None)]
|
|
||||||
|
|
||||||
|
|
||||||
class LocalhostExchange(Exchange):
|
|
||||||
"""Exchange where all direct topics are local."""
|
|
||||||
def __init__(self, host='localhost'):
|
|
||||||
self.host = host
|
|
||||||
super(Exchange, self).__init__()
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
return [('.'.join((key.split('.')[0], self.host)), self.host)]
|
|
||||||
|
|
||||||
|
|
||||||
class DirectExchange(Exchange):
|
|
||||||
"""Exchange where all topic keys are split, sending to second half.
|
|
||||||
|
|
||||||
i.e. "compute.host" sends a message to "compute.host" running on "host"
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(Exchange, self).__init__()
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
e = key.split('.', 1)[1]
|
|
||||||
return [(key, e)]
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerLocalhost(MatchMakerBase):
|
|
||||||
"""Match Maker where all bare topics resolve to localhost.
|
|
||||||
|
|
||||||
Useful for testing.
|
|
||||||
"""
|
|
||||||
def __init__(self, host='localhost'):
|
|
||||||
super(MatchMakerLocalhost, self).__init__()
|
|
||||||
self.add_binding(FanoutBinding(), LocalhostExchange(host))
|
|
||||||
self.add_binding(DirectBinding(), DirectExchange())
|
|
||||||
self.add_binding(TopicBinding(), LocalhostExchange(host))
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerStub(MatchMakerBase):
|
|
||||||
"""Match Maker where topics are untouched.
|
|
||||||
|
|
||||||
Useful for testing, or for AMQP/brokered queues.
|
|
||||||
Will not work where knowledge of hosts is known (i.e. zeromq)
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(MatchMakerStub, self).__init__()
|
|
||||||
|
|
||||||
self.add_binding(FanoutBinding(), StubExchange())
|
|
||||||
self.add_binding(DirectBinding(), StubExchange())
|
|
||||||
self.add_binding(TopicBinding(), StubExchange())
|
|
|
@ -1,144 +0,0 @@
|
||||||
# Copyright 2013 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should accept a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import importutils
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common.rpc import matchmaker as mm_common
|
|
||||||
|
|
||||||
redis = importutils.try_import('redis')
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_redis_opts = [
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
default='127.0.0.1',
|
|
||||||
help='Host to locate redis'),
|
|
||||||
cfg.IntOpt('port',
|
|
||||||
default=6379,
|
|
||||||
help='Use this port to connect to redis host.'),
|
|
||||||
cfg.StrOpt('password',
|
|
||||||
default=None,
|
|
||||||
help='Password for Redis server. (optional)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
opt_group = cfg.OptGroup(name='matchmaker_redis',
|
|
||||||
title='Options for Redis-based MatchMaker')
|
|
||||||
CONF.register_group(opt_group)
|
|
||||||
CONF.register_opts(matchmaker_redis_opts, opt_group)
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisExchange(mm_common.Exchange):
|
|
||||||
def __init__(self, matchmaker):
|
|
||||||
self.matchmaker = matchmaker
|
|
||||||
self.redis = matchmaker.redis
|
|
||||||
super(RedisExchange, self).__init__()
|
|
||||||
|
|
||||||
|
|
||||||
class RedisTopicExchange(RedisExchange):
|
|
||||||
"""Exchange where all topic keys are split, sending to second half.
|
|
||||||
|
|
||||||
i.e. "compute.host" sends a message to "compute" running on "host"
|
|
||||||
"""
|
|
||||||
def run(self, topic):
|
|
||||||
while True:
|
|
||||||
member_name = self.redis.srandmember(topic)
|
|
||||||
|
|
||||||
if not member_name:
|
|
||||||
# If this happens, there are no
|
|
||||||
# longer any members.
|
|
||||||
break
|
|
||||||
|
|
||||||
if not self.matchmaker.is_alive(topic, member_name):
|
|
||||||
continue
|
|
||||||
|
|
||||||
host = member_name.split('.', 1)[1]
|
|
||||||
return [(member_name, host)]
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class RedisFanoutExchange(RedisExchange):
|
|
||||||
"""Return a list of all hosts."""
|
|
||||||
def run(self, topic):
|
|
||||||
topic = topic.split('~', 1)[1]
|
|
||||||
hosts = self.redis.smembers(topic)
|
|
||||||
good_hosts = filter(
|
|
||||||
lambda host: self.matchmaker.is_alive(topic, host), hosts)
|
|
||||||
|
|
||||||
return [(x, x.split('.', 1)[1]) for x in good_hosts]
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
|
|
||||||
"""MatchMaker registering and looking-up hosts with a Redis server."""
|
|
||||||
def __init__(self):
|
|
||||||
super(MatchMakerRedis, self).__init__()
|
|
||||||
|
|
||||||
if not redis:
|
|
||||||
raise ImportError("Failed to import module redis.")
|
|
||||||
|
|
||||||
self.redis = redis.Redis(
|
|
||||||
host=CONF.matchmaker_redis.host,
|
|
||||||
port=CONF.matchmaker_redis.port,
|
|
||||||
password=CONF.matchmaker_redis.password)
|
|
||||||
|
|
||||||
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
|
|
||||||
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
|
|
||||||
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
|
|
||||||
|
|
||||||
def ack_alive(self, key, host):
|
|
||||||
topic = "%s.%s" % (key, host)
|
|
||||||
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
|
|
||||||
# If we could not update the expiration, the key
|
|
||||||
# might have been pruned. Re-register, creating a new
|
|
||||||
# key in Redis.
|
|
||||||
self.register(self.topic_host[host], host)
|
|
||||||
|
|
||||||
def is_alive(self, topic, host):
|
|
||||||
if self.redis.ttl(host) == -1:
|
|
||||||
self.expire(topic, host)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def expire(self, topic, host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.delete(host)
|
|
||||||
pipe.srem(topic, host)
|
|
||||||
pipe.execute()
|
|
||||||
|
|
||||||
def backend_register(self, key, key_host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.sadd(key, key_host)
|
|
||||||
|
|
||||||
# No value is needed, we just
|
|
||||||
# care if it exists. Sets aren't viable
|
|
||||||
# because only keys can expire.
|
|
||||||
pipe.set(key_host, '')
|
|
||||||
|
|
||||||
pipe.execute()
|
|
||||||
|
|
||||||
def backend_unregister(self, key, key_host):
|
|
||||||
with self.redis.pipeline() as pipe:
|
|
||||||
pipe.multi()
|
|
||||||
pipe.srem(key, key_host)
|
|
||||||
pipe.delete(key_host)
|
|
||||||
pipe.execute()
|
|
|
@ -1,106 +0,0 @@
|
||||||
# Copyright 2011-2013 Cloudscaling Group, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
The MatchMaker classes should except a Topic or Fanout exchange key and
|
|
||||||
return keys for direct exchanges, per (approximate) AMQP parlance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common.rpc import matchmaker as mm
|
|
||||||
|
|
||||||
|
|
||||||
matchmaker_opts = [
|
|
||||||
# Matchmaker ring file
|
|
||||||
cfg.StrOpt('ringfile',
|
|
||||||
deprecated_name='matchmaker_ringfile',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default='/etc/oslo/matchmaker_ring.json',
|
|
||||||
help='Matchmaker ring file (JSON)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RingExchange(mm.Exchange):
|
|
||||||
"""Match Maker where hosts are loaded from a static JSON formatted file.
|
|
||||||
|
|
||||||
__init__ takes optional ring dictionary argument, otherwise
|
|
||||||
loads the ringfile from CONF.mathcmaker_ringfile.
|
|
||||||
"""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RingExchange, self).__init__()
|
|
||||||
|
|
||||||
if ring:
|
|
||||||
self.ring = ring
|
|
||||||
else:
|
|
||||||
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
|
|
||||||
self.ring = json.load(fh)
|
|
||||||
|
|
||||||
self.ring0 = {}
|
|
||||||
for k in self.ring.keys():
|
|
||||||
self.ring0[k] = itertools.cycle(self.ring[k])
|
|
||||||
|
|
||||||
def _ring_has(self, key):
|
|
||||||
return key in self.ring0
|
|
||||||
|
|
||||||
|
|
||||||
class RoundRobinRingExchange(RingExchange):
|
|
||||||
"""A Topic Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(RoundRobinRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
if not self._ring_has(key):
|
|
||||||
LOG.warn(
|
|
||||||
_("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (key, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
host = next(self.ring0[key])
|
|
||||||
return [(key + '.' + host, host)]
|
|
||||||
|
|
||||||
|
|
||||||
class FanoutRingExchange(RingExchange):
|
|
||||||
"""Fanout Exchange based on a hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(FanoutRingExchange, self).__init__(ring)
|
|
||||||
|
|
||||||
def run(self, key):
|
|
||||||
# Assume starts with "fanout~", strip it for lookup.
|
|
||||||
nkey = key.split('fanout~')[1:][0]
|
|
||||||
if not self._ring_has(nkey):
|
|
||||||
LOG.warn(
|
|
||||||
_("No key defining hosts for topic '%s', "
|
|
||||||
"see ringfile") % (nkey, )
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
|
|
||||||
|
|
||||||
|
|
||||||
class MatchMakerRing(mm.MatchMakerBase):
|
|
||||||
"""Match Maker where hosts are loaded from a static hashmap."""
|
|
||||||
def __init__(self, ring=None):
|
|
||||||
super(MatchMakerRing, self).__init__()
|
|
||||||
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
|
|
||||||
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
|
|
||||||
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
|
|
|
@ -1,225 +0,0 @@
|
||||||
# Copyright 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
A helper class for proxy objects to remote APIs.
|
|
||||||
|
|
||||||
For more information about rpc API version numbers, see:
|
|
||||||
rpc/dispatcher.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from designate.openstack.common import rpc
|
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.openstack.common.rpc import serializer as rpc_serializer
|
|
||||||
|
|
||||||
|
|
||||||
class RpcProxy(object):
|
|
||||||
"""A helper class for rpc clients.
|
|
||||||
|
|
||||||
This class is a wrapper around the RPC client API. It allows you to
|
|
||||||
specify the topic and API version in a single place. This is intended to
|
|
||||||
be used as a base class for a class that implements the client side of an
|
|
||||||
rpc API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# The default namespace, which can be overridden in a subclass.
|
|
||||||
RPC_API_NAMESPACE = None
|
|
||||||
|
|
||||||
def __init__(self, topic, default_version, version_cap=None,
|
|
||||||
serializer=None):
|
|
||||||
"""Initialize an RpcProxy.
|
|
||||||
|
|
||||||
:param topic: The topic to use for all messages.
|
|
||||||
:param default_version: The default API version to request in all
|
|
||||||
outgoing messages. This can be overridden on a per-message
|
|
||||||
basis.
|
|
||||||
:param version_cap: Optionally cap the maximum version used for sent
|
|
||||||
messages.
|
|
||||||
:param serializer: Optionaly (de-)serialize entities with a
|
|
||||||
provided helper.
|
|
||||||
"""
|
|
||||||
self.topic = topic
|
|
||||||
self.default_version = default_version
|
|
||||||
self.version_cap = version_cap
|
|
||||||
if serializer is None:
|
|
||||||
serializer = rpc_serializer.NoOpSerializer()
|
|
||||||
self.serializer = serializer
|
|
||||||
super(RpcProxy, self).__init__()
|
|
||||||
|
|
||||||
def _set_version(self, msg, vers):
|
|
||||||
"""Helper method to set the version in a message.
|
|
||||||
|
|
||||||
:param msg: The message having a version added to it.
|
|
||||||
:param vers: The version number to add to the message.
|
|
||||||
"""
|
|
||||||
v = vers if vers else self.default_version
|
|
||||||
if (self.version_cap and not
|
|
||||||
rpc_common.version_is_compatible(self.version_cap, v)):
|
|
||||||
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
|
|
||||||
msg['version'] = v
|
|
||||||
|
|
||||||
def _get_topic(self, topic):
|
|
||||||
"""Return the topic to use for a message."""
|
|
||||||
return topic if topic else self.topic
|
|
||||||
|
|
||||||
def can_send_version(self, version):
|
|
||||||
"""Check to see if a version is compatible with the version cap."""
|
|
||||||
return (not self.version_cap or
|
|
||||||
rpc_common.version_is_compatible(self.version_cap, version))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def make_namespaced_msg(method, namespace, **kwargs):
|
|
||||||
return {'method': method, 'namespace': namespace, 'args': kwargs}
|
|
||||||
|
|
||||||
def make_msg(self, method, **kwargs):
|
|
||||||
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
def _serialize_msg_args(self, context, kwargs):
|
|
||||||
"""Helper method called to serialize message arguments.
|
|
||||||
|
|
||||||
This calls our serializer on each argument, returning a new
|
|
||||||
set of args that have been serialized.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param kwargs: The arguments to serialize
|
|
||||||
:returns: A new set of serialized arguments
|
|
||||||
"""
|
|
||||||
new_kwargs = dict()
|
|
||||||
for argname, arg in six.iteritems(kwargs):
|
|
||||||
new_kwargs[argname] = self.serializer.serialize_entity(context,
|
|
||||||
arg)
|
|
||||||
return new_kwargs
|
|
||||||
|
|
||||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
|
||||||
"""rpc.call() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
|
||||||
response. If no timeout is specified, a default timeout will be
|
|
||||||
used that is usually sufficient.
|
|
||||||
|
|
||||||
:returns: The return value from the remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
real_topic = self._get_topic(topic)
|
|
||||||
try:
|
|
||||||
result = rpc.call(context, real_topic, msg, timeout)
|
|
||||||
return self.serializer.deserialize_entity(context, result)
|
|
||||||
except rpc.common.Timeout as exc:
|
|
||||||
raise rpc.common.Timeout(
|
|
||||||
exc.info, real_topic, msg.get('method'))
|
|
||||||
|
|
||||||
def multicall(self, context, msg, topic=None, version=None, timeout=None):
|
|
||||||
"""rpc.multicall() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
:param timeout: (Optional) A timeout to use when waiting for the
|
|
||||||
response. If no timeout is specified, a default timeout will be
|
|
||||||
used that is usually sufficient.
|
|
||||||
|
|
||||||
:returns: An iterator that lets you process each of the returned values
|
|
||||||
from the remote method as they arrive.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
real_topic = self._get_topic(topic)
|
|
||||||
try:
|
|
||||||
result = rpc.multicall(context, real_topic, msg, timeout)
|
|
||||||
return self.serializer.deserialize_entity(context, result)
|
|
||||||
except rpc.common.Timeout as exc:
|
|
||||||
raise rpc.common.Timeout(
|
|
||||||
exc.info, real_topic, msg.get('method'))
|
|
||||||
|
|
||||||
def cast(self, context, msg, topic=None, version=None):
|
|
||||||
"""rpc.cast() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.cast() does not wait on any return value from the
|
|
||||||
remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.cast(context, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
|
||||||
"""rpc.fanout_cast() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.fanout_cast() does not wait on any return value
|
|
||||||
from the remote method.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.fanout_cast(context, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def cast_to_server(self, context, server_params, msg, topic=None,
|
|
||||||
version=None):
|
|
||||||
"""rpc.cast_to_server() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
|
||||||
details.
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.cast_to_server() does not wait on any
|
|
||||||
return values.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
|
|
||||||
|
|
||||||
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
|
|
||||||
version=None):
|
|
||||||
"""rpc.fanout_cast_to_server() a remote method.
|
|
||||||
|
|
||||||
:param context: The request context
|
|
||||||
:param server_params: Server parameters. See rpc.cast_to_server() for
|
|
||||||
details.
|
|
||||||
:param msg: The message to send, including the method and args.
|
|
||||||
:param topic: Override the topic for this message.
|
|
||||||
:param version: (Optional) Override the requested API version in this
|
|
||||||
message.
|
|
||||||
|
|
||||||
:returns: None. rpc.fanout_cast_to_server() does not wait on any
|
|
||||||
return values.
|
|
||||||
"""
|
|
||||||
self._set_version(msg, version)
|
|
||||||
msg['args'] = self._serialize_msg_args(context, msg['args'])
|
|
||||||
rpc.fanout_cast_to_server(context, server_params,
|
|
||||||
self._get_topic(topic), msg)
|
|
|
@ -1,54 +0,0 @@
|
||||||
# Copyright 2013 IBM Corp.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Provides the definition of an RPC serialization handler"""
|
|
||||||
|
|
||||||
import abc
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class Serializer(object):
|
|
||||||
"""Generic (de-)serialization definition base class."""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def serialize_entity(self, context, entity):
|
|
||||||
"""Serialize something to primitive form.
|
|
||||||
|
|
||||||
:param context: Security context
|
|
||||||
:param entity: Entity to be serialized
|
|
||||||
:returns: Serialized form of entity
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def deserialize_entity(self, context, entity):
|
|
||||||
"""Deserialize something from primitive form.
|
|
||||||
|
|
||||||
:param context: Security context
|
|
||||||
:param entity: Primitive to be deserialized
|
|
||||||
:returns: Deserialized form of entity
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class NoOpSerializer(Serializer):
|
|
||||||
"""A serializer that does nothing."""
|
|
||||||
|
|
||||||
def serialize_entity(self, context, entity):
|
|
||||||
return entity
|
|
||||||
|
|
||||||
def deserialize_entity(self, context, entity):
|
|
||||||
return entity
|
|
|
@ -1,76 +0,0 @@
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
# Copyright 2011 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from designate.openstack.common.gettextutils import _
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import rpc
|
|
||||||
from designate.openstack.common.rpc import dispatcher as rpc_dispatcher
|
|
||||||
from designate.openstack.common import service
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Service(service.Service):
|
|
||||||
"""Service object for binaries running on hosts.
|
|
||||||
|
|
||||||
A service enables rpc by listening to queues based on topic and host.
|
|
||||||
"""
|
|
||||||
def __init__(self, host, topic, manager=None, serializer=None):
|
|
||||||
super(Service, self).__init__()
|
|
||||||
self.host = host
|
|
||||||
self.topic = topic
|
|
||||||
self.serializer = serializer
|
|
||||||
if manager is None:
|
|
||||||
self.manager = self
|
|
||||||
else:
|
|
||||||
self.manager = manager
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
super(Service, self).start()
|
|
||||||
|
|
||||||
self.conn = rpc.create_connection(new=True)
|
|
||||||
LOG.debug(_("Creating Consumer connection for Service %s") %
|
|
||||||
self.topic)
|
|
||||||
|
|
||||||
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
|
|
||||||
self.serializer)
|
|
||||||
|
|
||||||
# Share this same connection for these Consumers
|
|
||||||
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
|
|
||||||
|
|
||||||
node_topic = '%s.%s' % (self.topic, self.host)
|
|
||||||
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
|
|
||||||
|
|
||||||
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
|
|
||||||
|
|
||||||
# Hook to allow the manager to do other initializations after
|
|
||||||
# the rpc connection is created.
|
|
||||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
|
||||||
self.manager.initialize_service_hook(self)
|
|
||||||
|
|
||||||
# Consume from all consumers in a thread
|
|
||||||
self.conn.consume_in_thread()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
# Try to shut the connection down, but if we get any sort of
|
|
||||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
|
||||||
try:
|
|
||||||
self.conn.close()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
super(Service, self).stop()
|
|
|
@ -1,38 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
eventlet.monkey_patch()
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
|
||||||
from designate.openstack.common import rpc
|
|
||||||
from designate.openstack.common.rpc import impl_zmq
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(rpc.rpc_opts)
|
|
||||||
CONF.register_opts(impl_zmq.zmq_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
CONF(sys.argv[1:], project='oslo')
|
|
||||||
logging.setup("oslo")
|
|
||||||
|
|
||||||
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
|
|
||||||
reactor.consume_in_thread()
|
|
||||||
reactor.wait()
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
# Copyright 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'init',
|
||||||
|
'cleanup',
|
||||||
|
'set_defaults',
|
||||||
|
'add_extra_exmods',
|
||||||
|
'clear_extra_exmods',
|
||||||
|
'get_allowed_exmods',
|
||||||
|
'RequestContextSerializer',
|
||||||
|
'get_client',
|
||||||
|
'get_server',
|
||||||
|
'get_notifier',
|
||||||
|
'TRANSPORT_ALIASES',
|
||||||
|
]
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
|
import designate.context
|
||||||
|
import designate.exceptions
|
||||||
|
from designate.openstack.common import jsonutils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
TRANSPORT = None
|
||||||
|
NOTIFIER = None
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Additional entries to designate.exceptions goes here.
|
||||||
|
CONF.register_opts([
|
||||||
|
cfg.ListOpt(
|
||||||
|
'allowed_remote_exmods',
|
||||||
|
default=[],
|
||||||
|
help="Additional modules that contains allowed RPC exceptions.",
|
||||||
|
deprecated_name='allowed_rpc_exception_modules')
|
||||||
|
])
|
||||||
|
ALLOWED_EXMODS = [
|
||||||
|
designate.exceptions.__name__,
|
||||||
|
]
|
||||||
|
EXTRA_EXMODS = []
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(flaper87): The designate.openstack.common.rpc entries are
|
||||||
|
# for backwards compat with Havana rpc_backend configuration
|
||||||
|
# values. The designate.rpc entries are for compat with Folsom values.
|
||||||
|
TRANSPORT_ALIASES = {
|
||||||
|
'designate.openstack.common.rpc.impl_kombu': 'rabbit',
|
||||||
|
'designate.openstack.common.rpc.impl_qpid': 'qpid',
|
||||||
|
'designate.openstack.common.rpc.impl_zmq': 'zmq',
|
||||||
|
'designate.rpc.impl_kombu': 'rabbit',
|
||||||
|
'designate.rpc.impl_qpid': 'qpid',
|
||||||
|
'designate.rpc.impl_zmq': 'zmq',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def init(conf):
|
||||||
|
global TRANSPORT, NOTIFIER
|
||||||
|
exmods = get_allowed_exmods()
|
||||||
|
TRANSPORT = messaging.get_transport(conf,
|
||||||
|
allowed_remote_exmods=exmods,
|
||||||
|
aliases=TRANSPORT_ALIASES)
|
||||||
|
|
||||||
|
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||||
|
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def initialized():
|
||||||
|
return None not in [TRANSPORT, NOTIFIER]
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
global TRANSPORT, NOTIFIER
|
||||||
|
assert TRANSPORT is not None
|
||||||
|
assert NOTIFIER is not None
|
||||||
|
TRANSPORT.cleanup()
|
||||||
|
TRANSPORT = NOTIFIER = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(control_exchange):
|
||||||
|
messaging.set_transport_defaults(control_exchange)
|
||||||
|
|
||||||
|
|
||||||
|
def add_extra_exmods(*args):
|
||||||
|
EXTRA_EXMODS.extend(args)
|
||||||
|
|
||||||
|
|
||||||
|
def clear_extra_exmods():
|
||||||
|
del EXTRA_EXMODS[:]
|
||||||
|
|
||||||
|
|
||||||
|
def get_allowed_exmods():
|
||||||
|
return ALLOWED_EXMODS + EXTRA_EXMODS + CONF.allowed_remote_exmods
|
||||||
|
|
||||||
|
|
||||||
|
class JsonPayloadSerializer(messaging.NoOpSerializer):
|
||||||
|
@staticmethod
|
||||||
|
def serialize_entity(context, entity):
|
||||||
|
return jsonutils.to_primitive(entity, convert_instances=True)
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContextSerializer(messaging.Serializer):
|
||||||
|
|
||||||
|
def __init__(self, base):
|
||||||
|
self._base = base
|
||||||
|
|
||||||
|
def serialize_entity(self, context, entity):
|
||||||
|
if not self._base:
|
||||||
|
return entity
|
||||||
|
return self._base.serialize_entity(context, entity)
|
||||||
|
|
||||||
|
def deserialize_entity(self, context, entity):
|
||||||
|
if not self._base:
|
||||||
|
return entity
|
||||||
|
return self._base.deserialize_entity(context, entity)
|
||||||
|
|
||||||
|
def serialize_context(self, context):
|
||||||
|
return context.to_dict()
|
||||||
|
|
||||||
|
def deserialize_context(self, context):
|
||||||
|
return designate.context.DesignateContext.from_dict(context)
|
||||||
|
|
||||||
|
|
||||||
|
def get_transport_url(url_str=None):
|
||||||
|
return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
|
||||||
|
|
||||||
|
|
||||||
|
def get_client(target, version_cap=None, serializer=None):
|
||||||
|
assert TRANSPORT is not None
|
||||||
|
serializer = RequestContextSerializer(serializer)
|
||||||
|
return messaging.RPCClient(TRANSPORT,
|
||||||
|
target,
|
||||||
|
version_cap=version_cap,
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_server(target, endpoints, serializer=None):
|
||||||
|
assert TRANSPORT is not None
|
||||||
|
serializer = RequestContextSerializer(serializer)
|
||||||
|
return messaging.get_rpc_server(TRANSPORT,
|
||||||
|
target,
|
||||||
|
endpoints,
|
||||||
|
executor='eventlet',
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_listener(targets, endpoints, serializer=None):
|
||||||
|
assert TRANSPORT is not None
|
||||||
|
serializer = RequestContextSerializer(serializer)
|
||||||
|
return messaging.get_notification_listener(TRANSPORT,
|
||||||
|
targets,
|
||||||
|
endpoints,
|
||||||
|
executor='eventlet',
|
||||||
|
serializer=serializer)
|
||||||
|
|
||||||
|
|
||||||
|
def get_notifier(service=None, host=None, publisher_id=None):
|
||||||
|
assert NOTIFIER is not None
|
||||||
|
if not publisher_id:
|
||||||
|
publisher_id = "%s.%s" % (service, host or CONF.host)
|
||||||
|
return NOTIFIER.prepare(publisher_id=publisher_id)
|
|
@ -0,0 +1,118 @@
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import os
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from oslo import messaging
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from designate.openstack.common import service
|
||||||
|
from designate.openstack.common import log as logging
|
||||||
|
from designate.openstack.common.gettextutils import _
|
||||||
|
from designate import rpc
|
||||||
|
from designate import version
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Service(service.Service):
|
||||||
|
"""
|
||||||
|
Service class to be shared among the diverse service inside of Designate.
|
||||||
|
|
||||||
|
Partially inspired by the code at cinder.service but for now without
|
||||||
|
support for loading so called "endpoints" or "managers".
|
||||||
|
"""
|
||||||
|
def __init__(self, host, binary, topic, service_name=None, endpoints=None,
|
||||||
|
*args, **kwargs):
|
||||||
|
super(Service, self).__init__()
|
||||||
|
|
||||||
|
if not rpc.initialized():
|
||||||
|
rpc.init(CONF)
|
||||||
|
|
||||||
|
self.host = host
|
||||||
|
self.binary = binary
|
||||||
|
self.topic = topic
|
||||||
|
self.service_name = service_name
|
||||||
|
|
||||||
|
# TODO(ekarls): change this to be loadable via mod import or stevedore?
|
||||||
|
self.endpoints = endpoints or [self]
|
||||||
|
|
||||||
|
self.saved_args, self.saved_kwargs = args, kwargs
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
version_string = version.version_info.version_string()
|
||||||
|
LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'),
|
||||||
|
{'topic': self.topic, 'version_string': version_string})
|
||||||
|
|
||||||
|
LOG.debug(_("Creating RPC server on topic '%s'") % self.topic)
|
||||||
|
|
||||||
|
target = messaging.Target(topic=self.topic, server=self.host)
|
||||||
|
|
||||||
|
self.rpcserver = rpc.get_server(target, self.endpoints)
|
||||||
|
self.rpcserver.start()
|
||||||
|
|
||||||
|
self.notifier = rpc.get_notifier(self.service_name)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create(cls, host=None, binary=None, topic=None, service_name=None,
|
||||||
|
endpoints=None):
|
||||||
|
"""Instantiates class and passes back application object.
|
||||||
|
|
||||||
|
:param host: defaults to CONF.host
|
||||||
|
:param binary: defaults to basename of executable
|
||||||
|
:param topic: defaults to bin_name - 'cinder-' part
|
||||||
|
"""
|
||||||
|
if not host:
|
||||||
|
host = CONF.host
|
||||||
|
if not binary:
|
||||||
|
binary = os.path.basename(inspect.stack()[-1][1])
|
||||||
|
if not topic:
|
||||||
|
name = "_".join(binary.split('-')[1:]) + '_topic'
|
||||||
|
topic = CONF.get(name)
|
||||||
|
|
||||||
|
service_obj = cls(host, binary, topic, service_name, endpoints)
|
||||||
|
return service_obj
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
# Try to shut the connection down, but if we get any sort of
|
||||||
|
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||||
|
try:
|
||||||
|
self.rpcserver.stop()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
super(Service, self).stop()
|
||||||
|
|
||||||
|
|
||||||
|
_launcher = None
|
||||||
|
|
||||||
|
|
||||||
|
def serve(server, workers=None):
|
||||||
|
global _launcher
|
||||||
|
if _launcher:
|
||||||
|
raise RuntimeError(_('serve() can only be called once'))
|
||||||
|
|
||||||
|
_launcher = service.launch(server, workers=workers)
|
||||||
|
|
||||||
|
|
||||||
|
def wait():
|
||||||
|
try:
|
||||||
|
_launcher.wait()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
_launcher.stop()
|
||||||
|
rpc.cleanup()
|
|
@ -15,12 +15,13 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from designate.context import DesignateContext
|
from oslo import messaging
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common import rpc
|
|
||||||
from designate.openstack.common import service
|
from designate.openstack.common import service
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate import notification_handler
|
from designate import notification_handler
|
||||||
|
from designate import rpc
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -29,11 +30,11 @@ class Service(service.Service):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(Service, self).__init__(*args, **kwargs)
|
super(Service, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
rpc.init(cfg.CONF)
|
||||||
|
|
||||||
# Initialize extensions
|
# Initialize extensions
|
||||||
self.handlers = self._init_extensions()
|
self.handlers = self._init_extensions()
|
||||||
|
self.subscribers = self._get_subscribers()
|
||||||
# Get a rpc connection
|
|
||||||
self.rpc_conn = rpc.create_connection()
|
|
||||||
|
|
||||||
def _init_extensions(self):
|
def _init_extensions(self):
|
||||||
""" Loads and prepares all enabled extensions """
|
""" Loads and prepares all enabled extensions """
|
||||||
|
@ -51,78 +52,57 @@ class Service(service.Service):
|
||||||
|
|
||||||
return notification_handlers
|
return notification_handlers
|
||||||
|
|
||||||
|
def _get_subscribers(self):
|
||||||
|
subscriptions = {}
|
||||||
|
for handler in self.handlers:
|
||||||
|
for et in handler.get_event_types():
|
||||||
|
subscriptions.setdefault(et, [])
|
||||||
|
subscriptions[et].append(handler)
|
||||||
|
return subscriptions
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
# Setup notification subscriptions and start consuming
|
# Setup notification subscriptions and start consuming
|
||||||
self._setup_subscriptions()
|
targets = self._get_targets()
|
||||||
self.rpc_conn.consume_in_thread()
|
|
||||||
|
# TODO(ekarlso): Change this is to endpoint objects rather then
|
||||||
|
# ourselves?
|
||||||
|
self._server = rpc.get_listener(targets, [self])
|
||||||
|
self._server.start()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
# Try to shut the connection down, but if we get any sort of
|
# Try to shut the connection down, but if we get any sort of
|
||||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||||
try:
|
try:
|
||||||
self.rpc_conn.close()
|
self._server.stop()
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
super(Service, self).stop()
|
super(Service, self).stop()
|
||||||
|
|
||||||
def _setup_subscriptions(self):
|
def _get_targets(self):
|
||||||
"""
|
"""
|
||||||
Set's up subscriptions for the various exchange+topic combinations that
|
Set's up subscriptions for the various exchange+topic combinations that
|
||||||
we have a handler for.
|
we have a handler for.
|
||||||
"""
|
"""
|
||||||
|
targets = []
|
||||||
for handler in self.handlers:
|
for handler in self.handlers:
|
||||||
exchange, topics = handler.get_exchange_topics()
|
exchange, topics = handler.get_exchange_topics()
|
||||||
|
|
||||||
for topic in topics:
|
for topic in topics:
|
||||||
queue_name = "designate.notifications.%s.%s.%s" % (
|
target = messaging.Target(exchange=exchange, topic=topic)
|
||||||
handler.get_canonical_name(), exchange, topic)
|
targets.append(target)
|
||||||
|
return targets
|
||||||
|
|
||||||
self.rpc_conn.join_consumer_pool(
|
def info(self, context, publisher_id, event_type, payload, metadata):
|
||||||
self._process_notification,
|
|
||||||
queue_name,
|
|
||||||
topic,
|
|
||||||
exchange_name=exchange)
|
|
||||||
|
|
||||||
def _get_handler_event_types(self):
|
|
||||||
event_types = set()
|
|
||||||
for handler in self.handlers:
|
|
||||||
for et in handler.get_event_types():
|
|
||||||
event_types.add(et)
|
|
||||||
return event_types
|
|
||||||
|
|
||||||
def _process_notification(self, notification):
|
|
||||||
"""
|
"""
|
||||||
Processes an incoming notification, offering each extension the
|
Processes an incoming notification, offering each extension the
|
||||||
opportunity to handle it.
|
opportunity to handle it.
|
||||||
"""
|
"""
|
||||||
event_type = notification.get('event_type')
|
|
||||||
|
|
||||||
# NOTE(zykes): Only bother to actually do processing if there's any
|
# NOTE(zykes): Only bother to actually do processing if there's any
|
||||||
# matching events, skips logging of things like compute.exists etc.
|
# matching events, skips logging of things like compute.exists etc.
|
||||||
if event_type in self._get_handler_event_types():
|
if event_type in self._get_handler_event_types():
|
||||||
for handler in self.handlers:
|
for handler in self.handlers:
|
||||||
self._process_notification_for_handler(handler, notification)
|
LOG.debug('Found handler for: %s' % event_type)
|
||||||
|
handler.process_notification(context, event_type, payload)
|
||||||
def _process_notification_for_handler(self, handler, notification):
|
|
||||||
"""
|
|
||||||
Processes an incoming notification for a specific handler, checking
|
|
||||||
to see if the handler is interested in the notification before
|
|
||||||
handing it over.
|
|
||||||
"""
|
|
||||||
context = DesignateContext(
|
|
||||||
auth_token=notification.get('_context_auth_token', None),
|
|
||||||
user=notification.get('_context_user', None),
|
|
||||||
tenant=notification.get('_context_tenant', None),
|
|
||||||
roles=notification.get('_context_roles', []),
|
|
||||||
service_catalog=notification.get('_context_service_catalog', []),
|
|
||||||
is_admin=notification.get('_context_is_admin', False)
|
|
||||||
)
|
|
||||||
event_type = notification['event_type']
|
|
||||||
payload = notification['payload']
|
|
||||||
|
|
||||||
if event_type in handler.get_event_types():
|
|
||||||
LOG.debug('Found handler for: %s' % event_type)
|
|
||||||
handler.process_notification(context, event_type, payload)
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ class SQLAlchemyStorage(base.Storage):
|
||||||
if context.all_tenants:
|
if context.all_tenants:
|
||||||
LOG.debug('Including all tenants items in query results')
|
LOG.debug('Including all tenants items in query results')
|
||||||
else:
|
else:
|
||||||
query = query.filter(model.tenant_id == context.tenant_id)
|
query = query.filter(model.tenant_id == context.tenant)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,12 @@ import sqlalchemy
|
||||||
import tempfile
|
import tempfile
|
||||||
from migrate.versioning import api as versioning_api
|
from migrate.versioning import api as versioning_api
|
||||||
from testtools import testcase
|
from testtools import testcase
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo.messaging import conffixture as messaging_fixture
|
||||||
|
from oslo.messaging.notify import _impl_test as test_notifier
|
||||||
|
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.notifier import test_notifier
|
|
||||||
from designate.openstack.common.fixture import config
|
from designate.openstack.common.fixture import config
|
||||||
from designate.openstack.common import importutils
|
from designate.openstack.common import importutils
|
||||||
from designate.openstack.common import policy
|
from designate.openstack.common import policy
|
||||||
|
@ -38,6 +41,10 @@ from designate import exceptions
|
||||||
from designate.network_api import fake as fake_network_api
|
from designate.network_api import fake as fake_network_api
|
||||||
from designate import network_api
|
from designate import network_api
|
||||||
|
|
||||||
|
# NOTE: If eventlet isn't patched and there's a exc tests block
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch(os=False)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
cfg.CONF.import_opt('storage_driver', 'designate.central',
|
cfg.CONF.import_opt('storage_driver', 'designate.central',
|
||||||
|
@ -48,29 +55,25 @@ cfg.CONF.import_opt('auth_strategy', 'designate.api',
|
||||||
group='service:api')
|
group='service:api')
|
||||||
cfg.CONF.import_opt('database_connection', 'designate.storage.impl_sqlalchemy',
|
cfg.CONF.import_opt('database_connection', 'designate.storage.impl_sqlalchemy',
|
||||||
group='storage:sqlalchemy')
|
group='storage:sqlalchemy')
|
||||||
# NOTE: Since we're importing service classes in start_service this breaks
|
|
||||||
# if not here.
|
|
||||||
cfg.CONF.import_opt(
|
|
||||||
'notification_driver', 'designate.openstack.common.notifier.api')
|
|
||||||
|
|
||||||
|
|
||||||
class NotifierFixture(fixtures.Fixture):
|
class NotifierFixture(fixtures.Fixture):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NotifierFixture, self).setUp()
|
super(NotifierFixture, self).setUp()
|
||||||
self.addCleanup(self.clear)
|
self.addCleanup(test_notifier.reset)
|
||||||
|
|
||||||
def get(self):
|
def get(self):
|
||||||
return test_notifier.NOTIFICATIONS
|
return test_notifier.NOTIFICATIONS
|
||||||
|
|
||||||
def clear(self):
|
def clear(self):
|
||||||
test_notifier.NOTIFICATIONS = []
|
return test_notifier.reset()
|
||||||
|
|
||||||
|
|
||||||
class ServiceFixture(fixtures.Fixture):
|
class ServiceFixture(fixtures.Fixture):
|
||||||
def __init__(self, svc_name, *args, **kw):
|
def __init__(self, svc_name, *args, **kw):
|
||||||
cls = importutils.import_class(
|
cls = importutils.import_class(
|
||||||
'designate.%s.service.Service' % svc_name)
|
'designate.%s.service.Service' % svc_name)
|
||||||
self.svc = cls(*args, **kw)
|
self.svc = cls.create(binary='desgignate-' + svc_name, *args, **kw)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ServiceFixture, self).setUp()
|
super(ServiceFixture, self).setUp()
|
||||||
|
@ -236,12 +239,13 @@ class TestCase(test.BaseTestCase):
|
||||||
self.useFixture(fixtures.FakeLogger('designate', level='DEBUG'))
|
self.useFixture(fixtures.FakeLogger('designate', level='DEBUG'))
|
||||||
self.CONF = self.useFixture(config.Config(cfg.CONF)).conf
|
self.CONF = self.useFixture(config.Config(cfg.CONF)).conf
|
||||||
|
|
||||||
self.config(
|
self.messaging_conf = self.useFixture(
|
||||||
notification_driver=[
|
messaging_fixture.ConfFixture(cfg.CONF))
|
||||||
'designate.openstack.common.notifier.test_notifier',
|
self.messaging_conf.transport_driver = 'fake'
|
||||||
],
|
|
||||||
rpc_backend='designate.openstack.common.rpc.impl_fake',
|
self.config(notification_driver='test')
|
||||||
)
|
|
||||||
|
self.notifications = self.useFixture(NotifierFixture())
|
||||||
|
|
||||||
self.config(
|
self.config(
|
||||||
storage_driver='sqlalchemy',
|
storage_driver='sqlalchemy',
|
||||||
|
@ -279,9 +283,6 @@ class TestCase(test.BaseTestCase):
|
||||||
|
|
||||||
self.CONF([], project='designate')
|
self.CONF([], project='designate')
|
||||||
|
|
||||||
self.notifications = NotifierFixture()
|
|
||||||
self.useFixture(self.notifications)
|
|
||||||
|
|
||||||
self.useFixture(PolicyFixture())
|
self.useFixture(PolicyFixture())
|
||||||
|
|
||||||
self.network_api = NetworkAPIFixture()
|
self.network_api = NetworkAPIFixture()
|
||||||
|
@ -454,7 +455,7 @@ class TestCase(test.BaseTestCase):
|
||||||
values = self.get_domain_fixture(fixture=fixture, values=kwargs)
|
values = self.get_domain_fixture(fixture=fixture, values=kwargs)
|
||||||
|
|
||||||
if 'tenant_id' not in values:
|
if 'tenant_id' not in values:
|
||||||
values['tenant_id'] = context.tenant_id
|
values['tenant_id'] = context.tenant
|
||||||
|
|
||||||
return self.central_service.create_domain(context, values=values)
|
return self.central_service.create_domain(context, values=values)
|
||||||
|
|
||||||
|
|
|
@ -14,16 +14,14 @@
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
from oslo.config import cfg
|
||||||
from designate.tests.test_api import ApiTestCase
|
from designate.tests.test_api import ApiTestCase
|
||||||
|
from designate import context
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
|
from designate import rpc
|
||||||
from designate.api import middleware
|
from designate.api import middleware
|
||||||
|
|
||||||
|
|
||||||
class FakeContext(object):
|
|
||||||
def __init__(self, roles=[]):
|
|
||||||
self.roles = roles
|
|
||||||
|
|
||||||
|
|
||||||
class FakeRequest(object):
|
class FakeRequest(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.headers = {}
|
self.headers = {}
|
||||||
|
@ -51,7 +49,7 @@ class MaintenanceMiddlewareTest(ApiTestCase):
|
||||||
group='service:api')
|
group='service:api')
|
||||||
|
|
||||||
request = FakeRequest()
|
request = FakeRequest()
|
||||||
request.environ['context'] = FakeContext(roles=['user'])
|
request.environ['context'] = context.DesignateContext(roles=['user'])
|
||||||
|
|
||||||
app = middleware.MaintenanceMiddleware({})
|
app = middleware.MaintenanceMiddleware({})
|
||||||
|
|
||||||
|
@ -66,7 +64,7 @@ class MaintenanceMiddlewareTest(ApiTestCase):
|
||||||
group='service:api')
|
group='service:api')
|
||||||
|
|
||||||
request = FakeRequest()
|
request = FakeRequest()
|
||||||
request.environ['context'] = FakeContext(roles=[])
|
request.environ['context'] = context.DesignateContext(roles=[])
|
||||||
|
|
||||||
app = middleware.MaintenanceMiddleware({})
|
app = middleware.MaintenanceMiddleware({})
|
||||||
|
|
||||||
|
@ -94,7 +92,7 @@ class MaintenanceMiddlewareTest(ApiTestCase):
|
||||||
group='service:api')
|
group='service:api')
|
||||||
|
|
||||||
request = FakeRequest()
|
request = FakeRequest()
|
||||||
request.environ['context'] = FakeContext(roles=['admin'])
|
request.environ['context'] = context.DesignateContext(roles=['admin'])
|
||||||
|
|
||||||
app = middleware.MaintenanceMiddleware({})
|
app = middleware.MaintenanceMiddleware({})
|
||||||
|
|
||||||
|
@ -127,8 +125,8 @@ class KeystoneContextMiddlewareTest(ApiTestCase):
|
||||||
|
|
||||||
self.assertFalse(context.is_admin)
|
self.assertFalse(context.is_admin)
|
||||||
self.assertEqual('AuthToken', context.auth_token)
|
self.assertEqual('AuthToken', context.auth_token)
|
||||||
self.assertEqual('UserID', context.user_id)
|
self.assertEqual('UserID', context.user)
|
||||||
self.assertEqual('TenantID', context.tenant_id)
|
self.assertEqual('TenantID', context.tenant)
|
||||||
self.assertEqual(['admin', 'Member'], context.roles)
|
self.assertEqual(['admin', 'Member'], context.roles)
|
||||||
|
|
||||||
def test_process_request_invalid_keystone_token(self):
|
def test_process_request_invalid_keystone_token(self):
|
||||||
|
@ -161,20 +159,19 @@ class NoAuthContextMiddlewareTest(ApiTestCase):
|
||||||
|
|
||||||
self.assertIn('context', request.environ)
|
self.assertIn('context', request.environ)
|
||||||
|
|
||||||
context = request.environ['context']
|
ctxt = request.environ['context']
|
||||||
|
|
||||||
self.assertTrue(context.is_admin)
|
self.assertTrue(ctxt.is_admin)
|
||||||
self.assertIsNone(context.auth_token)
|
self.assertIsNone(ctxt.auth_token)
|
||||||
self.assertEqual('noauth-user', context.user_id)
|
self.assertEqual('noauth-user', ctxt.user)
|
||||||
self.assertEqual('noauth-project', context.tenant_id)
|
self.assertEqual('noauth-project', ctxt.tenant)
|
||||||
self.assertEqual([], context.roles)
|
self.assertEqual([], ctxt.roles)
|
||||||
|
|
||||||
|
|
||||||
class FaultMiddlewareTest(ApiTestCase):
|
class FaultMiddlewareTest(ApiTestCase):
|
||||||
__test__ = True
|
|
||||||
|
|
||||||
def test_notify_of_fault(self):
|
def test_notify_of_fault(self):
|
||||||
self.config(notify_api_faults=True)
|
self.config(notify_api_faults=True)
|
||||||
|
rpc.init(cfg.CONF)
|
||||||
app = middleware.FaultWrapperMiddleware({})
|
app = middleware.FaultWrapperMiddleware({})
|
||||||
|
|
||||||
class RaisingRequest(FakeRequest):
|
class RaisingRequest(FakeRequest):
|
||||||
|
@ -182,9 +179,9 @@ class FaultMiddlewareTest(ApiTestCase):
|
||||||
raise exceptions.DuplicateDomain()
|
raise exceptions.DuplicateDomain()
|
||||||
|
|
||||||
request = RaisingRequest()
|
request = RaisingRequest()
|
||||||
context = FakeContext()
|
ctxt = context.DesignateContext()
|
||||||
context.request_id = 'one'
|
ctxt.request_id = 'one'
|
||||||
request.environ['context'] = context
|
request.environ['context'] = ctxt
|
||||||
|
|
||||||
# Process the request
|
# Process the request
|
||||||
app(request)
|
app(request)
|
||||||
|
@ -192,7 +189,9 @@ class FaultMiddlewareTest(ApiTestCase):
|
||||||
notifications = self.get_notifications()
|
notifications = self.get_notifications()
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
|
|
||||||
self.assertEqual('ERROR', notifications[0]['priority'])
|
ctxt, message, priority = notifications.pop()
|
||||||
self.assertEqual('dns.api.fault', notifications[0]['event_type'])
|
|
||||||
self.assertIn('timestamp', notifications[0])
|
self.assertEqual('ERROR', message['priority'])
|
||||||
self.assertIn('publisher_id', notifications[0])
|
self.assertEqual('dns.api.fault', message['event_type'])
|
||||||
|
self.assertIn('timestamp', message)
|
||||||
|
self.assertIn('publisher_id', message)
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from designate.tests.test_api import ApiTestCase
|
from designate.tests.test_api import ApiTestCase
|
||||||
|
from designate.api import service
|
||||||
|
|
||||||
|
|
||||||
class ApiServiceTest(ApiTestCase):
|
class ApiServiceTest(ApiTestCase):
|
||||||
|
@ -23,8 +24,13 @@ class ApiServiceTest(ApiTestCase):
|
||||||
# Use a random port for the API
|
# Use a random port for the API
|
||||||
self.config(api_port=0, group='service:api')
|
self.config(api_port=0, group='service:api')
|
||||||
|
|
||||||
self.service = self.start_service('api')
|
# Bring up the Central service as if not the rpc will go into
|
||||||
|
# AssertError since TRANSPORT is None
|
||||||
|
self.start_service('central')
|
||||||
|
|
||||||
|
self.service = service.Service()
|
||||||
|
|
||||||
def test_start_and_stop(self):
|
def test_start_and_stop(self):
|
||||||
# NOTE: Start is already done by the fixture in start_service()
|
# NOTE: Start is already done by the fixture in start_service()
|
||||||
|
self.service.start()
|
||||||
self.service.stop()
|
self.service.stop()
|
||||||
|
|
|
@ -30,6 +30,8 @@ class ApiV1Test(ApiTestCase):
|
||||||
# Ensure the v1 API is enabled
|
# Ensure the v1 API is enabled
|
||||||
self.config(enable_api_v1=True, group='service:api')
|
self.config(enable_api_v1=True, group='service:api')
|
||||||
|
|
||||||
|
self.central_service = self.start_service('central')
|
||||||
|
|
||||||
# Create the application
|
# Create the application
|
||||||
self.app = api_v1.factory({})
|
self.app = api_v1.factory({})
|
||||||
|
|
||||||
|
@ -39,14 +41,12 @@ class ApiV1Test(ApiTestCase):
|
||||||
|
|
||||||
# Inject the TestAuth middleware
|
# Inject the TestAuth middleware
|
||||||
self.app.wsgi_app = middleware.TestContextMiddleware(
|
self.app.wsgi_app = middleware.TestContextMiddleware(
|
||||||
self.app.wsgi_app, self.admin_context.tenant_id,
|
self.app.wsgi_app, self.admin_context.tenant,
|
||||||
self.admin_context.user_id)
|
self.admin_context.user)
|
||||||
|
|
||||||
# Obtain a test client
|
# Obtain a test client
|
||||||
self.client = self.app.test_client()
|
self.client = self.app.test_client()
|
||||||
|
|
||||||
self.central_service = self.start_service('central')
|
|
||||||
|
|
||||||
def get(self, path, **kw):
|
def get(self, path, **kw):
|
||||||
expected_status_code = kw.pop('status_code', 200)
|
expected_status_code = kw.pop('status_code', 200)
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.tests.test_api.test_v1 import ApiV1Test
|
from designate.tests.test_api.test_v1 import ApiV1Test
|
||||||
|
@ -68,7 +68,7 @@ class ApiV1DomainsTest(ApiV1Test):
|
||||||
self.post('domains', data=fixture, status_code=500)
|
self.post('domains', data=fixture, status_code=500)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_domain',
|
@patch.object(central_service.Service, 'create_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_domain_timeout(self, _):
|
def test_create_domain_timeout(self, _):
|
||||||
# Create a domain
|
# Create a domain
|
||||||
fixture = self.get_domain_fixture(0)
|
fixture = self.get_domain_fixture(0)
|
||||||
|
@ -188,7 +188,7 @@ class ApiV1DomainsTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_domains',
|
@patch.object(central_service.Service, 'find_domains',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_domains_timeout(self, _):
|
def test_get_domains_timeout(self, _):
|
||||||
self.get('domains', status_code=504)
|
self.get('domains', status_code=504)
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ class ApiV1DomainsTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain',
|
@patch.object(central_service.Service, 'get_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_domain_timeout(self, _):
|
def test_get_domain_timeout(self, _):
|
||||||
# Create a domain
|
# Create a domain
|
||||||
domain = self.create_domain()
|
domain = self.create_domain()
|
||||||
|
@ -281,7 +281,7 @@ class ApiV1DomainsTest(ApiV1Test):
|
||||||
self.put('domains/%s' % domain['id'], data=data, status_code=400)
|
self.put('domains/%s' % domain['id'], data=data, status_code=400)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'update_domain',
|
@patch.object(central_service.Service, 'update_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_domain_timeout(self, _):
|
def test_update_domain_timeout(self, _):
|
||||||
# Create a domain
|
# Create a domain
|
||||||
domain = self.create_domain()
|
domain = self.create_domain()
|
||||||
|
@ -336,7 +336,7 @@ class ApiV1DomainsTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'delete_domain',
|
@patch.object(central_service.Service, 'delete_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_domain_timeout(self, _):
|
def test_delete_domain_timeout(self, _):
|
||||||
# Create a domain
|
# Create a domain
|
||||||
domain = self.create_domain()
|
domain = self.create_domain()
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.tests.test_api.test_v1 import ApiV1Test
|
from designate.tests.test_api.test_v1 import ApiV1Test
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ class ApiV1RecordsTest(ApiV1Test):
|
||||||
status_code=400)
|
status_code=400)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_record',
|
@patch.object(central_service.Service, 'create_record',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_record_timeout(self, _):
|
def test_create_record_timeout(self, _):
|
||||||
fixture = self.get_record_fixture(self.recordset['type'])
|
fixture = self.get_record_fixture(self.recordset['type'])
|
||||||
fixture.update({
|
fixture.update({
|
||||||
|
@ -289,7 +289,7 @@ class ApiV1RecordsTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_records',
|
@patch.object(central_service.Service, 'find_records',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_records_timeout(self, _):
|
def test_get_records_timeout(self, _):
|
||||||
self.get('domains/%s/records' % self.domain['id'],
|
self.get('domains/%s/records' % self.domain['id'],
|
||||||
status_code=504)
|
status_code=504)
|
||||||
|
@ -392,7 +392,7 @@ class ApiV1RecordsTest(ApiV1Test):
|
||||||
data=data, status_code=400)
|
data=data, status_code=400)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain',
|
@patch.object(central_service.Service, 'get_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_record_timeout(self, _):
|
def test_update_record_timeout(self, _):
|
||||||
# Create a record
|
# Create a record
|
||||||
record = self.create_record(self.domain, self.recordset)
|
record = self.create_record(self.domain, self.recordset)
|
||||||
|
@ -458,7 +458,7 @@ class ApiV1RecordsTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain',
|
@patch.object(central_service.Service, 'get_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_record_timeout(self, _):
|
def test_delete_record_timeout(self, _):
|
||||||
# Create a record
|
# Create a record
|
||||||
record = self.create_record(self.domain, self.recordset)
|
record = self.create_record(self.domain, self.recordset)
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate.openstack.common import log as logging
|
from designate.openstack.common import log as logging
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.tests.test_api.test_v1 import ApiV1Test
|
from designate.tests.test_api.test_v1 import ApiV1Test
|
||||||
|
@ -61,7 +61,7 @@ class ApiV1ServersTest(ApiV1Test):
|
||||||
self.post('servers', data=fixture, status_code=400)
|
self.post('servers', data=fixture, status_code=400)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_server',
|
@patch.object(central_service.Service, 'create_server',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_server_timeout(self, _):
|
def test_create_server_timeout(self, _):
|
||||||
# Create a server
|
# Create a server
|
||||||
fixture = self.get_server_fixture(0)
|
fixture = self.get_server_fixture(0)
|
||||||
|
@ -106,7 +106,7 @@ class ApiV1ServersTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_servers',
|
@patch.object(central_service.Service, 'find_servers',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_servers_timeout(self, _):
|
def test_get_servers_timeout(self, _):
|
||||||
self.get('servers', status_code=504)
|
self.get('servers', status_code=504)
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ class ApiV1ServersTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_server',
|
@patch.object(central_service.Service, 'get_server',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_server_timeout(self, _):
|
def test_get_server_timeout(self, _):
|
||||||
# Create a server
|
# Create a server
|
||||||
server = self.create_server()
|
server = self.create_server()
|
||||||
|
@ -176,7 +176,7 @@ class ApiV1ServersTest(ApiV1Test):
|
||||||
self.put('servers/%s' % server['id'], data=data, status_code=400)
|
self.put('servers/%s' % server['id'], data=data, status_code=400)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'update_server',
|
@patch.object(central_service.Service, 'update_server',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_server_timeout(self, _):
|
def test_update_server_timeout(self, _):
|
||||||
# Create a server
|
# Create a server
|
||||||
server = self.create_server()
|
server = self.create_server()
|
||||||
|
@ -228,7 +228,7 @@ class ApiV1ServersTest(ApiV1Test):
|
||||||
self.assertTrue(mock.called)
|
self.assertTrue(mock.called)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'delete_server',
|
@patch.object(central_service.Service, 'delete_server',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_server_timeout(self, _):
|
def test_delete_server_timeout(self, _):
|
||||||
# Create a server
|
# Create a server
|
||||||
server = self.create_server()
|
server = self.create_server()
|
||||||
|
|
|
@ -38,6 +38,9 @@ class ApiV2TestCase(ApiTestCase):
|
||||||
# Ensure the v2 API is enabled
|
# Ensure the v2 API is enabled
|
||||||
self.config(enable_api_v2=True, group='service:api')
|
self.config(enable_api_v2=True, group='service:api')
|
||||||
|
|
||||||
|
# Create and start an instance of the central service
|
||||||
|
self.central_service = self.start_service('central')
|
||||||
|
|
||||||
# Create the application
|
# Create the application
|
||||||
self.app = api_v2.factory({})
|
self.app = api_v2.factory({})
|
||||||
|
|
||||||
|
@ -46,15 +49,12 @@ class ApiV2TestCase(ApiTestCase):
|
||||||
|
|
||||||
# Inject the TestContext middleware
|
# Inject the TestContext middleware
|
||||||
self.app = middleware.TestContextMiddleware(
|
self.app = middleware.TestContextMiddleware(
|
||||||
self.app, self.admin_context.tenant_id,
|
self.app, self.admin_context.tenant,
|
||||||
self.admin_context.tenant_id)
|
self.admin_context.tenant)
|
||||||
|
|
||||||
# Obtain a test client
|
# Obtain a test client
|
||||||
self.client = TestApp(self.app)
|
self.client = TestApp(self.app)
|
||||||
|
|
||||||
# Create and start an instance of the central service
|
|
||||||
self.central_service = self.start_service('central')
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.app = None
|
self.app = None
|
||||||
self.client = None
|
self.client = None
|
||||||
|
|
|
@ -29,7 +29,7 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
|
||||||
|
|
||||||
response = self.client.get(
|
response = self.client.get(
|
||||||
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
|
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
|
||||||
headers={'X-Test-Tenant-Id': context.tenant_id})
|
headers={'X-Test-Tenant-Id': context.tenant})
|
||||||
|
|
||||||
self.assertEqual(200, response.status_int)
|
self.assertEqual(200, response.status_int)
|
||||||
self.assertEqual('application/json', response.content_type)
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
@ -58,7 +58,7 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
|
||||||
|
|
||||||
response = self.client.get(
|
response = self.client.get(
|
||||||
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
|
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
|
||||||
headers={'X-Test-Tenant-Id': context.tenant_id})
|
headers={'X-Test-Tenant-Id': context.tenant})
|
||||||
|
|
||||||
self.assertEqual(200, response.status_int)
|
self.assertEqual(200, response.status_int)
|
||||||
self.assertEqual('application/json', response.content_type)
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
@ -92,11 +92,11 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
|
||||||
def test_list_floatingip_no_record(self):
|
def test_list_floatingip_no_record(self):
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
response = self.client.get(
|
response = self.client.get(
|
||||||
'/reverse/floatingips',
|
'/reverse/floatingips',
|
||||||
headers={'X-Test-Tenant-Id': context.tenant_id})
|
headers={'X-Test-Tenant-Id': context.tenant})
|
||||||
|
|
||||||
self.assertIn('floatingips', response.json)
|
self.assertIn('floatingips', response.json)
|
||||||
self.assertIn('links', response.json)
|
self.assertIn('links', response.json)
|
||||||
|
@ -116,14 +116,14 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
|
||||||
|
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
context, fip['region'], fip['id'], fixture)
|
context, fip['region'], fip['id'], fixture)
|
||||||
|
|
||||||
response = self.client.get(
|
response = self.client.get(
|
||||||
'/reverse/floatingips',
|
'/reverse/floatingips',
|
||||||
headers={'X-Test-Tenant-Id': context.tenant_id})
|
headers={'X-Test-Tenant-Id': context.tenant})
|
||||||
|
|
||||||
self.assertIn('floatingips', response.json)
|
self.assertIn('floatingips', response.json)
|
||||||
self.assertIn('links', response.json)
|
self.assertIn('links', response.json)
|
||||||
|
@ -189,7 +189,7 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
# Unsetting via "None"
|
# Unsetting via "None"
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
||||||
|
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ class ApiV2NameServersTest(ApiV2TestCase):
|
||||||
self._assert_invalid_uuid(self.client.get, '/zones/%s/nameservers')
|
self._assert_invalid_uuid(self.client.get, '/zones/%s/nameservers')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain_servers',
|
@patch.object(central_service.Service, 'get_domain_servers',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_nameservers_timeout(self, _):
|
def test_get_nameservers_timeout(self, _):
|
||||||
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/nameservers'
|
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/nameservers'
|
||||||
|
|
||||||
|
|
|
@ -14,9 +14,9 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
||||||
|
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ class ApiV2RecordsTest(ApiV2TestCase):
|
||||||
url, body)
|
url, body)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_record',
|
@patch.object(central_service.Service, 'create_record',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_recordset_timeout(self, _):
|
def test_create_recordset_timeout(self, _):
|
||||||
fixture = self.get_record_fixture(self.rrset['type'], fixture=0)
|
fixture = self.get_record_fixture(self.rrset['type'], fixture=0)
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ class ApiV2RecordsTest(ApiV2TestCase):
|
||||||
self._assert_invalid_paging(data, url, key='records')
|
self._assert_invalid_paging(data, url, key='records')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_records',
|
@patch.object(central_service.Service, 'find_records',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_records_timeout(self, _):
|
def test_get_records_timeout(self, _):
|
||||||
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets/' \
|
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets/' \
|
||||||
'ba751950-6193-11e3-949a-0800200c9a66/records'
|
'ba751950-6193-11e3-949a-0800200c9a66/records'
|
||||||
|
@ -176,7 +176,7 @@ class ApiV2RecordsTest(ApiV2TestCase):
|
||||||
self.assertEqual(record['data'], response.json['record']['data'])
|
self.assertEqual(record['data'], response.json['record']['data'])
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_record',
|
@patch.object(central_service.Service, 'get_record',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_record_timeout(self, _):
|
def test_get_record_timeout(self, _):
|
||||||
url = '/zones/%s/recordsets/%s/records/' \
|
url = '/zones/%s/recordsets/%s/records/' \
|
||||||
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
||||||
|
@ -264,7 +264,7 @@ class ApiV2RecordsTest(ApiV2TestCase):
|
||||||
headers={'Accept': 'application/json'})
|
headers={'Accept': 'application/json'})
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_record',
|
@patch.object(central_service.Service, 'get_record',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_record_timeout(self, _):
|
def test_update_record_timeout(self, _):
|
||||||
url = '/zones/%s/recordsets/%s/records/' \
|
url = '/zones/%s/recordsets/%s/records/' \
|
||||||
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
||||||
|
@ -304,7 +304,7 @@ class ApiV2RecordsTest(ApiV2TestCase):
|
||||||
self.client.delete(url, status=204)
|
self.client.delete(url, status=204)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'delete_record',
|
@patch.object(central_service.Service, 'delete_record',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_record_timeout(self, _):
|
def test_delete_record_timeout(self, _):
|
||||||
url = '/zones/%s/recordsets/%s/records/' \
|
url = '/zones/%s/recordsets/%s/records/' \
|
||||||
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
'ba751950-6193-11e3-949a-0800200c9a66' % (
|
||||||
|
|
|
@ -14,9 +14,9 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
|
||||||
'invalid_object', 400, self.client.post_json, url, body)
|
'invalid_object', 400, self.client.post_json, url, body)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_recordset',
|
@patch.object(central_service.Service, 'create_recordset',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_recordset_timeout(self, _):
|
def test_create_recordset_timeout(self, _):
|
||||||
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
|
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
|
||||||
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets')
|
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_recordsets',
|
@patch.object(central_service.Service, 'find_recordsets',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_recordsets_timeout(self, _):
|
def test_get_recordsets_timeout(self, _):
|
||||||
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets'
|
url = '/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets'
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
|
||||||
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets/%s')
|
self._assert_invalid_uuid(self.client.get, '/zones/%s/recordsets/%s')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_recordset',
|
@patch.object(central_service.Service, 'get_recordset',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_recordset_timeout(self, _):
|
def test_get_recordset_timeout(self, _):
|
||||||
url = '/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66' % (
|
url = '/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66' % (
|
||||||
self.domain['id'])
|
self.domain['id'])
|
||||||
|
@ -268,7 +268,7 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
|
||||||
self.client.patch_json, url, body)
|
self.client.patch_json, url, body)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_recordset',
|
@patch.object(central_service.Service, 'get_recordset',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_recordset_timeout(self, _):
|
def test_update_recordset_timeout(self, _):
|
||||||
# Prepare an update body
|
# Prepare an update body
|
||||||
body = {'recordset': {'description': 'Tester'}}
|
body = {'recordset': {'description': 'Tester'}}
|
||||||
|
@ -301,7 +301,7 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
|
||||||
self.client.delete(url, status=204)
|
self.client.delete(url, status=204)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'delete_recordset',
|
@patch.object(central_service.Service, 'delete_recordset',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_recordset_timeout(self, _):
|
def test_delete_recordset_timeout(self, _):
|
||||||
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
|
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
|
||||||
% (self.domain['id']))
|
% (self.domain['id']))
|
||||||
|
|
|
@ -15,9 +15,9 @@
|
||||||
# under the License.
|
# under the License.
|
||||||
from dns import zone as dnszone
|
from dns import zone as dnszone
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
from oslo import messaging
|
||||||
from designate import exceptions
|
from designate import exceptions
|
||||||
from designate.central import service as central_service
|
from designate.central import service as central_service
|
||||||
from designate.openstack.common.rpc import common as rpc_common
|
|
||||||
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
from designate.tests.test_api.test_v2 import ApiV2TestCase
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
|
||||||
'/zones', {'zone': fixture})
|
'/zones', {'zone': fixture})
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'create_domain',
|
@patch.object(central_service.Service, 'create_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_create_zone_timeout(self, _):
|
def test_create_zone_timeout(self, _):
|
||||||
fixture = self.get_domain_fixture(0)
|
fixture = self.get_domain_fixture(0)
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
|
||||||
self._assert_invalid_paging(data, '/zones', key='zones')
|
self._assert_invalid_paging(data, '/zones', key='zones')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'find_domains',
|
@patch.object(central_service.Service, 'find_domains',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_zones_timeout(self, _):
|
def test_get_zones_timeout(self, _):
|
||||||
self._assert_exception('timeout', 504, self.client.get, '/zones/')
|
self._assert_exception('timeout', 504, self.client.get, '/zones/')
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
|
||||||
self._assert_invalid_uuid(self.client.get, '/zones/%s')
|
self._assert_invalid_uuid(self.client.get, '/zones/%s')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain',
|
@patch.object(central_service.Service, 'get_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_get_zone_timeout(self, _):
|
def test_get_zone_timeout(self, _):
|
||||||
url = '/zones/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
|
url = '/zones/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
|
||||||
self._assert_exception('timeout', 504, self.client.get, url,
|
self._assert_exception('timeout', 504, self.client.get, url,
|
||||||
|
@ -287,7 +287,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
|
||||||
url, body)
|
url, body)
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'get_domain',
|
@patch.object(central_service.Service, 'get_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_update_zone_timeout(self, _):
|
def test_update_zone_timeout(self, _):
|
||||||
# Prepare an update body
|
# Prepare an update body
|
||||||
body = {'zone': {'email': 'example@example.org'}}
|
body = {'zone': {'email': 'example@example.org'}}
|
||||||
|
@ -319,7 +319,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
|
||||||
self._assert_invalid_uuid(self.client.delete, '/zones/%s')
|
self._assert_invalid_uuid(self.client.delete, '/zones/%s')
|
||||||
|
|
||||||
@patch.object(central_service.Service, 'delete_domain',
|
@patch.object(central_service.Service, 'delete_domain',
|
||||||
side_effect=rpc_common.Timeout())
|
side_effect=messaging.MessagingTimeout())
|
||||||
def test_delete_zone_timeout(self, _):
|
def test_delete_zone_timeout(self, _):
|
||||||
url = '/zones/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
|
url = '/zones/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980'
|
||||||
|
|
||||||
|
|
|
@ -440,14 +440,14 @@ class CentralServiceTest(CentralTestCase):
|
||||||
self.assertEqual(len(notifications), 1)
|
self.assertEqual(len(notifications), 1)
|
||||||
|
|
||||||
# Ensure the notification wrapper contains the correct info
|
# Ensure the notification wrapper contains the correct info
|
||||||
notification = notifications.pop()
|
ctxt, message, priority = notifications.pop()
|
||||||
self.assertEqual(notification['event_type'], 'dns.domain.create')
|
self.assertEqual(message['event_type'], 'dns.domain.create')
|
||||||
self.assertEqual(notification['priority'], 'INFO')
|
self.assertEqual(message['priority'], 'INFO')
|
||||||
self.assertIsNotNone(notification['timestamp'])
|
self.assertIsNotNone(message['timestamp'])
|
||||||
self.assertIsNotNone(notification['message_id'])
|
self.assertIsNotNone(message['message_id'])
|
||||||
|
|
||||||
# Ensure the notification payload contains the correct info
|
# Ensure the notification payload contains the correct info
|
||||||
payload = notification['payload']
|
payload = message['payload']
|
||||||
self.assertEqual(payload['id'], domain['id'])
|
self.assertEqual(payload['id'], domain['id'])
|
||||||
self.assertEqual(payload['name'], domain['name'])
|
self.assertEqual(payload['name'], domain['name'])
|
||||||
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
||||||
|
@ -502,7 +502,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
context = self.get_admin_context()
|
context = self.get_admin_context()
|
||||||
|
|
||||||
# Explicitly set a tenant_id
|
# Explicitly set a tenant_id
|
||||||
context.tenant_id = '1'
|
context.tenant = '1'
|
||||||
|
|
||||||
# Create the Parent Domain using fixture 0
|
# Create the Parent Domain using fixture 0
|
||||||
parent_domain = self.create_domain(fixture=0, context=context)
|
parent_domain = self.create_domain(fixture=0, context=context)
|
||||||
|
@ -510,7 +510,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
context = self.get_admin_context()
|
context = self.get_admin_context()
|
||||||
|
|
||||||
# Explicitly use a different tenant_id
|
# Explicitly use a different tenant_id
|
||||||
context.tenant_id = '2'
|
context.tenant = '2'
|
||||||
|
|
||||||
# Prepare values for the subdomain using fixture 1 as a base
|
# Prepare values for the subdomain using fixture 1 as a base
|
||||||
values = self.get_domain_fixture(1)
|
values = self.get_domain_fixture(1)
|
||||||
|
@ -723,14 +723,14 @@ class CentralServiceTest(CentralTestCase):
|
||||||
self.assertEqual(len(notifications), 1)
|
self.assertEqual(len(notifications), 1)
|
||||||
|
|
||||||
# Ensure the notification wrapper contains the correct info
|
# Ensure the notification wrapper contains the correct info
|
||||||
notification = notifications.pop()
|
ctxt, message, priority = notifications.pop()
|
||||||
self.assertEqual(notification['event_type'], 'dns.domain.update')
|
self.assertEqual(message['event_type'], 'dns.domain.update')
|
||||||
self.assertEqual(notification['priority'], 'INFO')
|
self.assertEqual(message['priority'], 'INFO')
|
||||||
self.assertIsNotNone(notification['timestamp'])
|
self.assertIsNotNone(message['timestamp'])
|
||||||
self.assertIsNotNone(notification['message_id'])
|
self.assertIsNotNone(message['message_id'])
|
||||||
|
|
||||||
# Ensure the notification payload contains the correct info
|
# Ensure the notification payload contains the correct info
|
||||||
payload = notification['payload']
|
payload = message['payload']
|
||||||
self.assertEqual(payload['id'], domain['id'])
|
self.assertEqual(payload['id'], domain['id'])
|
||||||
self.assertEqual(payload['name'], domain['name'])
|
self.assertEqual(payload['name'], domain['name'])
|
||||||
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
||||||
|
@ -784,14 +784,14 @@ class CentralServiceTest(CentralTestCase):
|
||||||
self.assertEqual(len(notifications), 1)
|
self.assertEqual(len(notifications), 1)
|
||||||
|
|
||||||
# Ensure the notification wrapper contains the correct info
|
# Ensure the notification wrapper contains the correct info
|
||||||
notification = notifications.pop()
|
ctxt, message, priority = notifications.pop()
|
||||||
self.assertEqual(notification['event_type'], 'dns.domain.delete')
|
self.assertEqual(message['event_type'], 'dns.domain.delete')
|
||||||
self.assertEqual(notification['priority'], 'INFO')
|
self.assertEqual(message['priority'], 'INFO')
|
||||||
self.assertIsNotNone(notification['timestamp'])
|
self.assertIsNotNone(message['timestamp'])
|
||||||
self.assertIsNotNone(notification['message_id'])
|
self.assertIsNotNone(message['message_id'])
|
||||||
|
|
||||||
# Ensure the notification payload contains the correct info
|
# Ensure the notification payload contains the correct info
|
||||||
payload = notification['payload']
|
payload = message['payload']
|
||||||
self.assertEqual(payload['id'], domain['id'])
|
self.assertEqual(payload['id'], domain['id'])
|
||||||
self.assertEqual(payload['name'], domain['name'])
|
self.assertEqual(payload['name'], domain['name'])
|
||||||
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
self.assertEqual(payload['tenant_id'], domain['tenant_id'])
|
||||||
|
@ -1458,7 +1458,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
fip_ptr = self.central_service.get_floatingip(
|
fip_ptr = self.central_service.get_floatingip(
|
||||||
context, fip['region'], fip['id'])
|
context, fip['region'], fip['id'])
|
||||||
|
@ -1475,7 +1475,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
expected = self.central_service.update_floatingip(
|
expected = self.central_service.update_floatingip(
|
||||||
context, fip['region'], fip['id'], fixture)
|
context, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1489,7 +1489,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
def test_get_floatingip_not_allocated(self):
|
def test_get_floatingip_not_allocated(self):
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
self.network_api.fake.deallocate_floatingip(fip['id'])
|
self.network_api.fake.deallocate_floatingip(fip['id'])
|
||||||
|
|
||||||
with testtools.ExpectedException(exceptions.NotFound):
|
with testtools.ExpectedException(exceptions.NotFound):
|
||||||
|
@ -1508,7 +1508,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
# First allocate and create a FIP as tenant a
|
# First allocate and create a FIP as tenant a
|
||||||
fip = self.network_api.fake.allocate_floatingip(context_a.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
|
||||||
|
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
context_a, fip['region'], fip['id'], fixture)
|
context_a, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1522,12 +1522,12 @@ class CentralServiceTest(CentralTestCase):
|
||||||
# Ensure that the record is still in DB (No invalidation)
|
# Ensure that the record is still in DB (No invalidation)
|
||||||
criterion = {
|
criterion = {
|
||||||
'managed_resource_id': fip['id'],
|
'managed_resource_id': fip['id'],
|
||||||
'managed_tenant_id': context_a.tenant_id}
|
'managed_tenant_id': context_a.tenant}
|
||||||
self.central_service.find_record(elevated_a, criterion)
|
self.central_service.find_record(elevated_a, criterion)
|
||||||
|
|
||||||
# Now give the fip id to tenant 'b' and see that it get's deleted
|
# Now give the fip id to tenant 'b' and see that it get's deleted
|
||||||
self.network_api.fake.allocate_floatingip(
|
self.network_api.fake.allocate_floatingip(
|
||||||
context_b.tenant_id, fip['id'])
|
context_b.tenant, fip['id'])
|
||||||
|
|
||||||
# There should be a fip returned with ptrdname of None
|
# There should be a fip returned with ptrdname of None
|
||||||
fip_ptr = self.central_service.get_floatingip(
|
fip_ptr = self.central_service.get_floatingip(
|
||||||
|
@ -1549,7 +1549,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
def test_list_floatingips_no_record(self):
|
def test_list_floatingips_no_record(self):
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
fips = self.central_service.list_floatingips(context)
|
fips = self.central_service.list_floatingips(context)
|
||||||
|
|
||||||
|
@ -1567,7 +1567,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
fip_ptr = self.central_service.update_floatingip(
|
fip_ptr = self.central_service.update_floatingip(
|
||||||
context, fip['region'], fip['id'], fixture)
|
context, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1593,7 +1593,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
# First allocate and create a FIP as tenant a
|
# First allocate and create a FIP as tenant a
|
||||||
fip = self.network_api.fake.allocate_floatingip(context_a.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
|
||||||
|
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
context_a, fip['region'], fip['id'], fixture)
|
context_a, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1606,12 +1606,12 @@ class CentralServiceTest(CentralTestCase):
|
||||||
# Ensure that the record is still in DB (No invalidation)
|
# Ensure that the record is still in DB (No invalidation)
|
||||||
criterion = {
|
criterion = {
|
||||||
'managed_resource_id': fip['id'],
|
'managed_resource_id': fip['id'],
|
||||||
'managed_tenant_id': context_a.tenant_id}
|
'managed_tenant_id': context_a.tenant}
|
||||||
self.central_service.find_record(elevated_a, criterion)
|
self.central_service.find_record(elevated_a, criterion)
|
||||||
|
|
||||||
# Now give the fip id to tenant 'b' and see that it get's deleted
|
# Now give the fip id to tenant 'b' and see that it get's deleted
|
||||||
self.network_api.fake.allocate_floatingip(
|
self.network_api.fake.allocate_floatingip(
|
||||||
context_b.tenant_id, fip['id'])
|
context_b.tenant, fip['id'])
|
||||||
|
|
||||||
# There should be a fip returned with ptrdname of None
|
# There should be a fip returned with ptrdname of None
|
||||||
fips = self.central_service.list_floatingips(context_b)
|
fips = self.central_service.list_floatingips(context_b)
|
||||||
|
@ -1630,7 +1630,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
fip_ptr = self.central_service.update_floatingip(
|
fip_ptr = self.central_service.update_floatingip(
|
||||||
context, fip['region'], fip['id'], fixture)
|
context, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1653,7 +1653,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
# Test that re-setting as tenant a an already set floatingip leaves
|
# Test that re-setting as tenant a an already set floatingip leaves
|
||||||
# only 1 record
|
# only 1 record
|
||||||
fip = self.network_api.fake.allocate_floatingip(context_a.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
|
||||||
|
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
context_a, fip['region'], fip['id'], fixture)
|
context_a, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1672,7 +1672,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
# Now test that tenant b allocating the same fip and setting a ptr
|
# Now test that tenant b allocating the same fip and setting a ptr
|
||||||
# deletes any records
|
# deletes any records
|
||||||
fip = self.network_api.fake.allocate_floatingip(
|
fip = self.network_api.fake.allocate_floatingip(
|
||||||
context_b.tenant_id, fip['id'])
|
context_b.tenant, fip['id'])
|
||||||
|
|
||||||
self.central_service.update_floatingip(
|
self.central_service.update_floatingip(
|
||||||
context_b, fip['region'], fip['id'], fixture)
|
context_b, fip['region'], fip['id'], fixture)
|
||||||
|
@ -1686,7 +1686,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
context = self.get_context(tenant='a')
|
context = self.get_context(tenant='a')
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
self.network_api.fake.deallocate_floatingip(fip['id'])
|
self.network_api.fake.deallocate_floatingip(fip['id'])
|
||||||
|
|
||||||
# If one attempts to assign a de-allocated FIP or not-owned it should
|
# If one attempts to assign a de-allocated FIP or not-owned it should
|
||||||
|
@ -1702,7 +1702,7 @@ class CentralServiceTest(CentralTestCase):
|
||||||
|
|
||||||
fixture = self.get_ptr_fixture()
|
fixture = self.get_ptr_fixture()
|
||||||
|
|
||||||
fip = self.network_api.fake.allocate_floatingip(context.tenant_id)
|
fip = self.network_api.fake.allocate_floatingip(context.tenant)
|
||||||
|
|
||||||
fip_ptr = self.central_service.update_floatingip(
|
fip_ptr = self.central_service.update_floatingip(
|
||||||
context, fip['region'], fip['id'], fixture)
|
context, fip['region'], fip['id'], fixture)
|
||||||
|
|
|
@ -30,7 +30,7 @@ class StorageTestCase(object):
|
||||||
fixture = self.get_quota_fixture(fixture, values)
|
fixture = self.get_quota_fixture(fixture, values)
|
||||||
|
|
||||||
if 'tenant_id' not in fixture:
|
if 'tenant_id' not in fixture:
|
||||||
fixture['tenant_id'] = context.tenant_id
|
fixture['tenant_id'] = context.tenant
|
||||||
|
|
||||||
return fixture, self.storage.create_quota(context, fixture)
|
return fixture, self.storage.create_quota(context, fixture)
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ class StorageTestCase(object):
|
||||||
fixture = self.get_domain_fixture(fixture, values)
|
fixture = self.get_domain_fixture(fixture, values)
|
||||||
|
|
||||||
if 'tenant_id' not in fixture:
|
if 'tenant_id' not in fixture:
|
||||||
fixture['tenant_id'] = context.tenant_id
|
fixture['tenant_id'] = context.tenant
|
||||||
|
|
||||||
return fixture, self.storage.create_domain(context, fixture)
|
return fixture, self.storage.create_domain(context, fixture)
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ class StorageTestCase(object):
|
||||||
# Quota Tests
|
# Quota Tests
|
||||||
def test_create_quota(self):
|
def test_create_quota(self):
|
||||||
values = self.get_quota_fixture()
|
values = self.get_quota_fixture()
|
||||||
values['tenant_id'] = self.admin_context.tenant_id
|
values['tenant_id'] = self.admin_context.tenant
|
||||||
|
|
||||||
result = self.storage.create_quota(self.admin_context, values=values)
|
result = self.storage.create_quota(self.admin_context, values=values)
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ class StorageTestCase(object):
|
||||||
self.assertIsNotNone(result['created_at'])
|
self.assertIsNotNone(result['created_at'])
|
||||||
self.assertIsNone(result['updated_at'])
|
self.assertIsNone(result['updated_at'])
|
||||||
|
|
||||||
self.assertEqual(result['tenant_id'], self.admin_context.tenant_id)
|
self.assertEqual(result['tenant_id'], self.admin_context.tenant)
|
||||||
self.assertEqual(result['resource'], values['resource'])
|
self.assertEqual(result['resource'], values['resource'])
|
||||||
self.assertEqual(result['hard_limit'], values['hard_limit'])
|
self.assertEqual(result['hard_limit'], values['hard_limit'])
|
||||||
|
|
||||||
|
@ -598,7 +598,7 @@ class StorageTestCase(object):
|
||||||
# Domain Tests
|
# Domain Tests
|
||||||
def test_create_domain(self):
|
def test_create_domain(self):
|
||||||
values = {
|
values = {
|
||||||
'tenant_id': self.admin_context.tenant_id,
|
'tenant_id': self.admin_context.tenant,
|
||||||
'name': 'example.net.',
|
'name': 'example.net.',
|
||||||
'email': 'example@example.net'
|
'email': 'example@example.net'
|
||||||
}
|
}
|
||||||
|
@ -609,7 +609,7 @@ class StorageTestCase(object):
|
||||||
self.assertIsNotNone(result['created_at'])
|
self.assertIsNotNone(result['created_at'])
|
||||||
self.assertIsNone(result['updated_at'])
|
self.assertIsNone(result['updated_at'])
|
||||||
|
|
||||||
self.assertEqual(result['tenant_id'], self.admin_context.tenant_id)
|
self.assertEqual(result['tenant_id'], self.admin_context.tenant)
|
||||||
self.assertEqual(result['name'], values['name'])
|
self.assertEqual(result['name'], values['name'])
|
||||||
self.assertEqual(result['email'], values['email'])
|
self.assertEqual(result['email'], values['email'])
|
||||||
self.assertIn('status', result)
|
self.assertIn('status', result)
|
||||||
|
@ -1034,7 +1034,7 @@ class StorageTestCase(object):
|
||||||
self.assertIsNotNone(result['hash'])
|
self.assertIsNotNone(result['hash'])
|
||||||
self.assertIsNone(result['updated_at'])
|
self.assertIsNone(result['updated_at'])
|
||||||
|
|
||||||
self.assertEqual(result['tenant_id'], self.admin_context.tenant_id)
|
self.assertEqual(result['tenant_id'], self.admin_context.tenant)
|
||||||
self.assertEqual(result['data'], values['data'])
|
self.assertEqual(result['data'], values['data'])
|
||||||
self.assertIn('status', result)
|
self.assertIn('status', result)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"admin": "role:admin or is_admin:True",
|
"admin": "role:admin or is_admin:True",
|
||||||
"owner": "tenant_id:%(tenant_id)s",
|
"owner": "tenant:%(tenant_id)s",
|
||||||
"admin_or_owner": "rule:admin or rule:owner",
|
"admin_or_owner": "rule:admin or rule:owner",
|
||||||
|
|
||||||
"default": "rule:admin_or_owner",
|
"default": "rule:admin_or_owner",
|
||||||
|
|
|
@ -8,6 +8,7 @@ kombu>=2.4.8
|
||||||
lockfile>=0.8
|
lockfile>=0.8
|
||||||
netaddr>=0.7.6
|
netaddr>=0.7.6
|
||||||
oslo.config>=1.2.0
|
oslo.config>=1.2.0
|
||||||
|
oslo.messaging>=1.3.0
|
||||||
oslo.rootwrap
|
oslo.rootwrap
|
||||||
Paste
|
Paste
|
||||||
PasteDeploy>=1.5.0
|
PasteDeploy>=1.5.0
|
||||||
|
|
Loading…
Reference in New Issue