This feature adds a new RPC backend for communication between sysinv-api, sysinv-conductor and sysinv-agent processes. This backend is implemented using a patched zerorpc library [1], which is built on top of ZeroMQ and message-pack. The motivation behind this change is to decouple sysinv from RabbitMQ, and use a brokerless solution for RPC instead. The key points are: - All imports of rpcapi.py are replaced by rpcapiproxy.py, which decides the backend to use (rabbitmq or zeromq) according to configuration. - During an upgrade process the rpc service listens to both rabbitmq and zeromq. For communication between hosts, the client backend api is chosen according to host software version. - In future versions, the usage of RabbitMQ will no longer be necessary and its usage can be removed. I have marked these parts of code with "TODO(RPCHybridMode)" to easily track it. [1] https://review.opendev.org/c/starlingx/integ/+/864310 TEST PLAN: PASS: Bootstrap and host-unlock on AIO-SX, AIO-Duplex, Standard PASS: Bootstrap and host-unlock on DC system-controller and subcloud PASS: Verify sysinv.log and confirm no error occurs in RPC communication PASS: Perform system cli commands that interacts with sysinv RPCs: - system host-cpu-max-frequency-modify - system license-install - system storage-backend-add ceph-external - system host-swact PASS: Backup & Restore on AIO-SX PASS: Bootstrap replay (updating mgmt and cluster subnet) on AIO-SX PASS: Platform upgrade on AIO-DX (22.06 -> 22.12) PASS: Platform upgrade on AIO-DX+ (22.06 -> 22.12) PASS: Platform upgrade on AIO-SX (22.06 -> 22.12) Depends-On: https://review.opendev.org/c/starlingx/tools/+/859576 Depends-On: https://review.opendev.org/c/starlingx/stx-puppet/+/859575 Depends-On: https://review.opendev.org/c/starlingx/ansible-playbooks/+/862609 Story: 2010087 Task: 46444 Change-Id: I5cd61b541a6d8c62628a0f99db0e35af1eae5961 Signed-off-by: Alyson Deives Pereira <alyson.deivespereira@windriver.com> Signed-off-by: Eduardo Juliano Alberti <eduardo.alberti@windriver.com>changes/71/859571/64
parent
aede2f1492
commit
c6a41c20a9
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
function log {
|
||||
logger -p local1.info $1
|
||||
}
|
||||
|
||||
log "$NAME: restarting sysinv services"
|
||||
|
||||
sm-restart service sysinv-conductor
|
||||
sleep 2
|
||||
pmon-restart sysinv-agent
|
||||
|
||||
exit 0
|
@ -0,0 +1,26 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import sysinv.agent.rpcapi as rpcapi
|
||||
from sysinv.agent.rpcapizmq import AgentAPI as ZMQAgentAPI
|
||||
from sysinv.agent.rpcapi import AgentAPI as AMQPAgentAPI
|
||||
from sysinv.zmq_rpc.zmq_rpc import is_rpc_hybrid_mode_active
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
MANAGER_TOPIC = rpcapi.MANAGER_TOPIC
|
||||
|
||||
|
||||
def AgentAPI(topic=None):
|
||||
rpc_backend = cfg.CONF.rpc_backend
|
||||
rpc_backend_zeromq = cfg.CONF.rpc_backend_zeromq
|
||||
rpc_backend_hybrid_mode = is_rpc_hybrid_mode_active()
|
||||
LOG.debug("Current agent rpc_backend: {} "
|
||||
"use_zeromq: {} hybrid_mode: {}".format(rpc_backend,
|
||||
rpc_backend_zeromq,
|
||||
rpc_backend_hybrid_mode))
|
||||
if rpc_backend_zeromq:
|
||||
return ZMQAgentAPI(topic)
|
||||
return AMQPAgentAPI(topic)
|
@ -0,0 +1,62 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
Client side of the agent RPC API using ZeroMQ backend.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from sysinv.agent.rpcapi import AgentAPI as BaseAgentAPI
|
||||
from sysinv.agent.rpcapi import MANAGER_TOPIC
|
||||
from sysinv.zmq_rpc.zmq_rpc import ZmqRpcClient
|
||||
from sysinv.zmq_rpc.zmq_rpc import is_rpc_hybrid_mode_active
|
||||
from sysinv.zmq_rpc.zmq_rpc import is_zmq_backend_available
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentAPI(ZmqRpcClient, BaseAgentAPI):
|
||||
def __init__(self, topic=None):
|
||||
if topic is None:
|
||||
topic = MANAGER_TOPIC
|
||||
host = None
|
||||
port = CONF.rpc_zeromq_agent_bind_port
|
||||
super(AgentAPI, self).__init__(host, port, topic)
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
if is_rpc_hybrid_mode_active():
|
||||
host_uuid = msg['args']['host_uuid']
|
||||
if not is_zmq_backend_available(host_uuid):
|
||||
LOG.debug("RPC hybrid mode is active and agent zmq backend is "
|
||||
"not yet available in host {}. Calling RPC call "
|
||||
"method {} through rabbitmq".format(host_uuid,
|
||||
msg['method']))
|
||||
rpcapi = BaseAgentAPI()
|
||||
return rpcapi.call(context, msg, topic, version, timeout)
|
||||
|
||||
return super(AgentAPI, self).call(context, msg, timeout)
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
if is_rpc_hybrid_mode_active():
|
||||
host_uuid = msg['args']['host_uuid']
|
||||
if not is_zmq_backend_available(host_uuid):
|
||||
LOG.debug("RPC hybrid mode is active and agent zmq backend is "
|
||||
"not yet available in host {}. Calling RPC cast "
|
||||
"method {} through rabbitmq".format(host_uuid,
|
||||
msg['method']))
|
||||
rpcapi = BaseAgentAPI()
|
||||
return rpcapi.cast(context, msg, topic, version)
|
||||
|
||||
return super(AgentAPI, self).cast(context, msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
if is_rpc_hybrid_mode_active():
|
||||
method = msg['method']
|
||||
LOG.debug("RPC hybrid mode is active. Calling RPC fanout_cast "
|
||||
"method {} through rabbitmq and zmq".format(method))
|
||||
rpcapi = BaseAgentAPI()
|
||||
rpcapi.fanout_cast(context, msg, topic, version)
|
||||
return super(AgentAPI, self).fanout_cast(context, msg)
|
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
import sysinv.conductor.rpcapi as rpcapi
|
||||
from sysinv.conductor.rpcapi import ConductorAPI as AMQPConductorAPI
|
||||
from sysinv.conductor.rpcapizmq import ConductorAPI as ZMQConductorAPI
|
||||
from sysinv.zmq_rpc.zmq_rpc import is_rpc_hybrid_mode_active
|
||||
from sysinv.zmq_rpc.zmq_rpc import check_connection
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
MANAGER_TOPIC = rpcapi.MANAGER_TOPIC
|
||||
|
||||
|
||||
def ConductorAPI(topic=None):
|
||||
rpc_backend_zeromq = cfg.CONF.rpc_backend_zeromq
|
||||
rpc_backend_hybrid_mode = is_rpc_hybrid_mode_active()
|
||||
rpc_backend = cfg.CONF.rpc_backend
|
||||
LOG.debug("Current conductor rpc_backend: {} "
|
||||
"use_zeromq: {} hybrid_mode: {}".format(rpc_backend,
|
||||
rpc_backend_zeromq,
|
||||
rpc_backend_hybrid_mode))
|
||||
# Hybrid mode is expected to be defined for controller-1 only during upgrade
|
||||
# all other nodes should be running ZeroMQ exclusively
|
||||
if rpc_backend_hybrid_mode:
|
||||
# in controller-1 agent, we need to know if conductor
|
||||
# is able to listen to ZeroRPC.
|
||||
# If conductor is running on same host, we know it is running in
|
||||
# hybrid mode, and we assume ZeroMQ is preferred.
|
||||
# Otherwise, it can be conductor running on controller-0 before
|
||||
# migrate to ZeroMQ, so we verify before send the RPC call
|
||||
# if ZeroMQ is running and if yes, use it, otherwise use RabbitMQ
|
||||
if os.path.isfile("/var/run/sysinv-conductor.pid"):
|
||||
return ZMQConductorAPI(topic)
|
||||
else:
|
||||
if check_connection(cfg.CONF.rpc_zeromq_conductor_bind_ip,
|
||||
cfg.CONF.rpc_zeromq_conductor_bind_port):
|
||||
return ZMQConductorAPI(topic)
|
||||
else:
|
||||
return AMQPConductorAPI(topic)
|
||||
if rpc_backend_zeromq:
|
||||
return ZMQConductorAPI(topic)
|
||||
return AMQPConductorAPI(topic)
|
@ -0,0 +1,45 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
Client side of the conductor RPC API using ZeroMQ backend.
|
||||
"""
|
||||
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from sysinv.common import constants
|
||||
from sysinv.conductor.rpcapi import ConductorAPI as BaseConductorAPI
|
||||
from sysinv.conductor.rpcapi import MANAGER_TOPIC
|
||||
from sysinv.zmq_rpc.zmq_rpc import ZmqRpcClient
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ConductorAPI(ZmqRpcClient, BaseConductorAPI):
|
||||
def __init__(self, topic=None):
|
||||
if topic is None:
|
||||
topic = MANAGER_TOPIC
|
||||
host = CONF.rpc_zeromq_conductor_bind_ip
|
||||
|
||||
# It is expected to have a value assigned
|
||||
# if we are using default value, puppet was not executed or
|
||||
# there was an issue.
|
||||
# We can still use it in case conductor is running locally
|
||||
# otherwise we try to communicate using controller hostname
|
||||
if host == "::" and not os.path.isfile("/var/run/sysinv-conductor.pid"):
|
||||
host = constants.CONTROLLER_HOSTNAME
|
||||
|
||||
port = CONF.rpc_zeromq_conductor_bind_port
|
||||
super(ConductorAPI, self).__init__(host, port, topic)
|
||||
|
||||
def call(self, context, msg, topic=None, version=None, timeout=None):
|
||||
return super(ConductorAPI, self).call(context, msg, timeout)
|
||||
|
||||
def cast(self, context, msg, topic=None, version=None):
|
||||
return super(ConductorAPI, self).cast(context, msg)
|
||||
|
||||
def fanout_cast(self, context, msg, topic=None, version=None):
|
||||
return super(ConductorAPI, self).fanout_cast(context, msg)
|
@ -0,0 +1,3 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
@ -0,0 +1,46 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import zerorpc
|
||||
from oslo_config import cfg
|
||||
from sysinv.zmq_rpc.serializer import decode
|
||||
from sysinv.zmq_rpc.serializer import encode
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ClientProvider(object):
|
||||
def __init__(self):
|
||||
self.clients = {}
|
||||
|
||||
def _create_client(self, endpoint):
|
||||
# pylint: disable=unexpected-keyword-arg
|
||||
return zerorpc.Client(
|
||||
connect_to=endpoint,
|
||||
encoder=encode,
|
||||
decoder=decode,
|
||||
# TODO: with the default of 5s we get heartbeat timeouts when
|
||||
# executing some RPCs that take longer than that to finish.
|
||||
# We need to understand why this is happening because this scenario
|
||||
# should be supported by zerorpc
|
||||
heartbeat=None,
|
||||
# TODO: we need to determine the correct timeout value here based on
|
||||
# the max time an RPC can take to execute
|
||||
timeout=CONF.rpc_response_timeout)
|
||||
|
||||
def get_client_for_endpoint(self, endpoint):
|
||||
client = self.clients.get(endpoint, None)
|
||||
if client is None:
|
||||
client = self._create_client(endpoint)
|
||||
self.clients[endpoint] = client
|
||||
return client
|
||||
|
||||
def cleanup(self):
|
||||
for endpoint, client in self.clients.items():
|
||||
try:
|
||||
client.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.clients.clear()
|
@ -0,0 +1,65 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import datetime
|
||||
import ipaddress
|
||||
import netaddr
|
||||
import uuid
|
||||
|
||||
from oslo_utils import timeutils
|
||||
from sysinv.objects.base import SysinvObject
|
||||
from sysinv.common.context import RequestContext
|
||||
from sysinv.openstack.common.context import RequestContext as BaseRequestContext
|
||||
from sysinv.openstack.common.rpc.amqp import RpcContext
|
||||
from sysinv.openstack.common.rpc.common import CommonRpcContext
|
||||
|
||||
|
||||
def encode(obj, chain=None):
|
||||
if isinstance(obj, (RequestContext, BaseRequestContext,
|
||||
RpcContext, CommonRpcContext)):
|
||||
if isinstance(obj, RequestContext):
|
||||
context_type = b'request'
|
||||
elif isinstance(obj, BaseRequestContext):
|
||||
context_type = b'base_request'
|
||||
elif isinstance(obj, RpcContext):
|
||||
context_type = b'rpc'
|
||||
else:
|
||||
context_type = b'common_rpc'
|
||||
return {b'context': True,
|
||||
b'context_type': context_type,
|
||||
b'data': obj.to_dict()}
|
||||
if hasattr(obj, 'obj_to_primitive') and callable(obj.obj_to_primitive):
|
||||
return obj.obj_to_primitive()
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return obj.strftime(timeutils.PERFECT_TIME_FORMAT)
|
||||
if isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
if netaddr and isinstance(obj, (netaddr.IPAddress, netaddr.IPNetwork)):
|
||||
return str(obj)
|
||||
if ipaddress and isinstance(obj,
|
||||
(ipaddress.IPv4Address,
|
||||
ipaddress.IPv6Address)):
|
||||
return str(obj)
|
||||
if isinstance(obj, Exception):
|
||||
return repr(obj)
|
||||
return obj if chain is None else chain(obj)
|
||||
|
||||
|
||||
def decode(obj, chain=None):
|
||||
try:
|
||||
if b'context' in obj:
|
||||
context_dict = obj[b'data']
|
||||
context_type = obj[b'context_type']
|
||||
if context_type == b'request':
|
||||
return RequestContext.from_dict(context_dict)
|
||||
if context_type == b'base_request':
|
||||
return BaseRequestContext.from_dict(context_dict)
|
||||
if context_type == b'rpc':
|
||||
return RpcContext.from_dict(context_dict)
|
||||
return CommonRpcContext.from_dict(context_dict)
|
||||
if isinstance(obj, dict) and 'sysinv_object.name' in obj:
|
||||
return SysinvObject.obj_from_primitive(obj)
|
||||
return obj if chain is None else chain(obj)
|
||||
except KeyError:
|
||||
return obj if chain is None else chain(obj)
|
@ -0,0 +1,237 @@
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import zerorpc
|
||||
import eventlet
|
||||
import os
|
||||
|
||||
from eventlet import greenthread
|
||||
from oslo_log import log
|
||||
from zerorpc import exceptions
|
||||
|
||||
from sysinv.db import api
|
||||
from sysinv.objects.base import SysinvObject
|
||||
from sysinv.zmq_rpc.client_provider import ClientProvider
|
||||
from sysinv.zmq_rpc.serializer import decode
|
||||
from sysinv.zmq_rpc.serializer import encode
|
||||
import sysinv.openstack.common.rpc.common as rpc_common
|
||||
import tsconfig.tsconfig as tsc
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
client_provider = ClientProvider()
|
||||
|
||||
|
||||
class RpcWrapper(object):
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.target_methods = [f for f in dir(self.target) if
|
||||
not f.startswith('_')]
|
||||
|
||||
def __getattr__(self, func):
|
||||
def method(context, **kwargs):
|
||||
if func in self.target_methods:
|
||||
# hydrate any sysinv object passed as argument with the context
|
||||
kwargs = self._inject_context(context, kwargs)
|
||||
LOG.debug("Calling RPC server method {} with context {} args {}"
|
||||
.format(func, context, kwargs))
|
||||
retval = getattr(self.target, func)(context, **kwargs)
|
||||
LOG.debug("Finished RPC server method {} with context {} args {}"
|
||||
.format(func, context, kwargs))
|
||||
return retval
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
return method
|
||||
|
||||
def __dir__(self):
|
||||
return dir(self.target)
|
||||
|
||||
def _process_iterable(self, context, action_fn, values):
|
||||
"""Process an iterable, taking an action on each value.
|
||||
:param:context: Request context
|
||||
:param:action_fn: Action to take on each item in values
|
||||
:param:values: Iterable container of things to take action on
|
||||
:returns: A new container of the same type (except set) with
|
||||
items from values having had action applied.
|
||||
"""
|
||||
iterable = values.__class__
|
||||
if iterable == set:
|
||||
# NOTE(danms): A set can't have an unhashable value inside, such as
|
||||
# a dict. Convert sets to tuples, which is fine, since we can't
|
||||
# send them over RPC anyway.
|
||||
iterable = tuple
|
||||
return iterable([action_fn(context, value) for value in values])
|
||||
|
||||
def _inject_context_to_arg(self, ctx, arg):
|
||||
if isinstance(arg, SysinvObject):
|
||||
arg._context = ctx
|
||||
elif isinstance(arg, (tuple, list, set)):
|
||||
arg = self._process_iterable(ctx, self._inject_context_to_arg, arg)
|
||||
return arg
|
||||
|
||||
def _inject_context(self, context, kwargs):
|
||||
new_kwargs = dict()
|
||||
for argname, arg in kwargs.items():
|
||||
new_kwargs[argname] = self._inject_context_to_arg(context, arg)
|
||||
return new_kwargs
|
||||
|
||||
|
||||
class ZmqRpcServer(object):
|
||||
def __init__(self, target, host, port):
|
||||
self.target = target
|
||||
self.endpoint = get_tcp_endpoint(host, port)
|
||||
self.server = None
|
||||
|
||||
def run(self):
|
||||
def _run_in_thread():
|
||||
try:
|
||||
LOG.info("Starting zmq server at {}".format(self.endpoint))
|
||||
# pylint: disable=unexpected-keyword-arg
|
||||
# TODO with the default of 5s hearbeat we get LostRemote
|
||||
# exceptions when executing some RPCs that take longer than
|
||||
# that to finish. We need to understand why this happens
|
||||
# because this scenario should be supported by zerorpc
|
||||
self.server = zerorpc.Server(RpcWrapper(self.target),
|
||||
heartbeat=None,
|
||||
encoder=encode,
|
||||
decoder=decode)
|
||||
self.server.bind(self.endpoint)
|
||||
self.server.run()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
return
|
||||
except Exception as e:
|
||||
LOG.error("Error while running zmq rpc server at {}: "
|
||||
"{}".format(self.endpoint, str(e)))
|
||||
return
|
||||
|
||||
return greenthread.spawn(_run_in_thread)
|
||||
|
||||
def stop(self):
|
||||
if self.server:
|
||||
self.server.close()
|
||||
client_provider.cleanup()
|
||||
|
||||
|
||||
class ZmqRpcClient(object):
|
||||
def __init__(self, host, port, topic):
|
||||
try:
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.topic = topic
|
||||
self.client = None
|
||||
if host is not None:
|
||||
endpoint = get_tcp_endpoint(host, port)
|
||||
self.client = client_provider.get_client_for_endpoint(endpoint)
|
||||
|
||||
LOG.debug("Started zmq rpc client to [{}]:{}".format(
|
||||
self.host, self.port))
|
||||
except Exception as e:
|
||||
LOG.error("Error while running zmq client to {}:{}: {}".format(
|
||||
self.host, self.port, str(e)))
|
||||
|
||||
def _exec(self, client, context, method, **kwargs):
|
||||
if not client:
|
||||
host_uuid = kwargs.get('host_uuid', None)
|
||||
if host_uuid is None:
|
||||
raise Exception("Missing host_uuid parameter for rpc endpoint")
|
||||
dbapi = api.get_instance()
|
||||
host = dbapi.ihost_get(host_uuid)
|
||||
endpoint = get_tcp_endpoint(host.mgmt_ip, self.port)
|
||||
client = client_provider.get_client_for_endpoint(endpoint)
|
||||
|
||||
try:
|
||||
LOG.debug(
|
||||
"Calling RPC client method {} with context {} args {}".format(
|
||||
method, context, kwargs))
|
||||
return getattr(client, method)(context, **kwargs)
|
||||
except exceptions.TimeoutExpired:
|
||||
raise rpc_common.Timeout(topic=self.topic,
|
||||
method=method)
|
||||
except exceptions.RemoteError as e:
|
||||
raise rpc_common.RemoteError(exc_type=e.name,
|
||||
value=e.msg,
|
||||
traceback=e.traceback)
|
||||
except exceptions.LostRemote as e:
|
||||
raise rpc_common.LostRemote(lost_remote_msg=str(e),
|
||||
topic=self.topic,
|
||||
method=method)
|
||||
|
||||
def call(self, context, msg, timeout=None):
|
||||
method = msg['method']
|
||||
args = msg['args']
|
||||
if timeout is not None:
|
||||
args['timeout_'] = timeout
|
||||
return self._exec(self.client, context, method, **args)
|
||||
|
||||
def cast(self, context, msg):
|
||||
method = msg['method']
|
||||
args = msg['args']
|
||||
args['async_'] = True
|
||||
return self._exec(self.client, context, method, **args)
|
||||
|
||||
def fanout_cast(self, context, msg):
|
||||
method = msg['method']
|
||||
args = msg['args']
|
||||
args['async_'] = True
|
||||
endpoints = self.get_fanout_endpoints()
|
||||
for endpoint in endpoints:
|
||||
client = client_provider.get_client_for_endpoint(endpoint)
|
||||
LOG.debug("Calling fanout method {} to endpoint {}".format(
|
||||
method, endpoint))
|
||||
self._exec(client, context, method, **args)
|
||||
|
||||
def get_fanout_endpoints(self):
|
||||
endpoints = []
|
||||
dbapi = api.get_instance()
|
||||
hosts = dbapi.ihost_get_list()
|
||||
for host in hosts:
|
||||
LOG.debug(
|
||||
"Evaluating host {} to add as endpoint ("
|
||||
"availability={}, operational={}, "
|
||||
"personality={}, subfunctions={})".format(
|
||||
host.hostname, host.availability, host.operational,
|
||||
host.personality, host.subfunctions))
|
||||
endpoint = get_tcp_endpoint(host.mgmt_ip, self.port)
|
||||
endpoints.append(endpoint)
|
||||
LOG.debug("Add host {} with endpoint {} to fanout request".format(
|
||||
host.hostname, endpoint))
|