Integrate osprofiler into Senlin
Add option to enable osprofiler to measure performance of Senlin. Change-Id: Ib828c73b99a83d29440caeaaf26d639330e81ace Implements: blueprint senlin-osprofiler
This commit is contained in:
parent
554306ac2d
commit
f7fd5bae3e
|
@ -1,7 +1,7 @@
|
|||
|
||||
# senlin-api pipeline
|
||||
[pipeline:senlin-api]
|
||||
pipeline = request_id faultwrap ssl versionnegotiation webhook authtoken context trust apiv1app
|
||||
pipeline = request_id faultwrap ssl versionnegotiation osprofiler webhook authtoken context trust apiv1app
|
||||
|
||||
[app:apiv1app]
|
||||
paste.app_factory = senlin.api.common.wsgi:app_factory
|
||||
|
@ -37,3 +37,6 @@ senlin.filter_factory = senlin.api.middleware:webhook_filter
|
|||
# Auth middleware that validates token against keystone
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
|
||||
[filter:osprofiler]
|
||||
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
features:
|
||||
- Integrated OSProfiler into Senlin, support using OSProfiler to measure
|
||||
performance of Senlin.
|
|
@ -22,6 +22,7 @@ oslo.serialization>=1.10.0 # Apache-2.0
|
|||
oslo.service>=1.10.0 # Apache-2.0
|
||||
oslo.utils>=3.16.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.13.0 # Apache-2.0
|
||||
osprofiler>=1.4.0 # Apache-2.0
|
||||
PasteDeploy>=1.5.0 # MIT
|
||||
pytz>=2013.6 # MIT
|
||||
PyYAML>=3.10.0 # MIT
|
||||
|
|
|
@ -26,6 +26,7 @@ import six
|
|||
from senlin.api.common import wsgi
|
||||
from senlin.common.i18n import _LI
|
||||
from senlin.common import messaging
|
||||
from senlin.common import profiler
|
||||
from senlin import version
|
||||
|
||||
_lazy.enable_lazy()
|
||||
|
@ -47,6 +48,7 @@ def main():
|
|||
port = cfg.CONF.senlin_api.bind_port
|
||||
LOG.info(_LI('Starting Senlin API on %(host)s:%(port)s'),
|
||||
{'host': host, 'port': port})
|
||||
profiler.setup('senlin-api', host)
|
||||
server = wsgi.Server('senlin-api', cfg.CONF.senlin_api)
|
||||
server.start(app, default_port=port)
|
||||
systemd.notify_once()
|
||||
|
|
|
@ -22,6 +22,7 @@ from oslo_service import service
|
|||
|
||||
from senlin.common import consts
|
||||
from senlin.common import messaging
|
||||
from senlin.common import profiler
|
||||
from senlin import objects
|
||||
|
||||
_lazy.enable_lazy()
|
||||
|
@ -37,6 +38,7 @@ def main():
|
|||
|
||||
from senlin.engine import service as engine
|
||||
|
||||
profiler.setup('senlin-engine', cfg.CONF.host)
|
||||
srv = engine.EngineService(cfg.CONF.host, consts.ENGINE_TOPIC)
|
||||
launcher = service.launch(cfg.CONF, srv,
|
||||
workers=cfg.CONF.num_engine_workers)
|
||||
|
|
|
@ -17,6 +17,7 @@ import socket
|
|||
|
||||
from keystoneauth1 import loading as ks_loading
|
||||
from oslo_config import cfg
|
||||
from osprofiler import opts as profiler
|
||||
|
||||
from senlin.api.common import wsgi
|
||||
from senlin.common.i18n import _
|
||||
|
@ -183,6 +184,10 @@ cfg.CONF.register_group(zaqar_group)
|
|||
ks_loading.register_session_conf_options(cfg.CONF, 'zaqar')
|
||||
ks_loading.register_auth_conf_options(cfg.CONF, 'zaqar')
|
||||
|
||||
# OSProfiler
|
||||
group, opts = profiler.list_opts()[0]
|
||||
cfg.CONF.register_opts(opts, group=group)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Return a list of oslo.config options available.
|
||||
|
@ -212,3 +217,4 @@ def list_opts():
|
|||
yield revision_group.name, revision_opts
|
||||
yield receiver_group.name, receiver_opts
|
||||
yield zaqar_group.name, zaqar_opts
|
||||
yield profiler.list_opts()[0]
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
import eventlet
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging as messaging
|
||||
from osprofiler import profiler
|
||||
|
||||
from senlin.common import consts
|
||||
from senlin.common import context
|
||||
|
@ -40,10 +41,22 @@ class RequestContextSerializer(messaging.Serializer):
|
|||
|
||||
@staticmethod
|
||||
def serialize_context(ctxt):
|
||||
return ctxt.to_dict()
|
||||
_context = ctxt.to_dict()
|
||||
prof = profiler.get()
|
||||
if prof:
|
||||
trace_info = {
|
||||
"hmac_key": prof.hmac_key,
|
||||
"base_id": prof.get_base_id(),
|
||||
"parent_id": prof.get_id()
|
||||
}
|
||||
_context.update({"trace_info": trace_info})
|
||||
return _context
|
||||
|
||||
@staticmethod
|
||||
def deserialize_context(ctxt):
|
||||
trace_info = ctxt.pop("trace_info", None)
|
||||
if trace_info:
|
||||
profiler.init(**trace_info)
|
||||
return context.RequestContext.from_dict(ctxt)
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
import osprofiler.profiler
|
||||
import osprofiler.web
|
||||
|
||||
from senlin.common import context
|
||||
from senlin.common.i18n import _LW
|
||||
from senlin.common import messaging
|
||||
|
||||
cfg.CONF.import_opt('enabled', 'senlin.common.config', group='profiler')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(binary, host):
|
||||
if cfg.CONF.profiler.enabled:
|
||||
_notifier = osprofiler.notifier.create(
|
||||
"Messaging", oslo_messaging, context.get_admin_context().to_dict(),
|
||||
messaging.TRANSPORT, "senlin", binary, host)
|
||||
osprofiler.notifier.set(_notifier)
|
||||
osprofiler.web.enable(cfg.CONF.profiler.hmac_keys)
|
||||
LOG.warning(_LW("OSProfiler is enabled.\nIt means that any person who "
|
||||
"knows any of hmac_keys that are specified in "
|
||||
"/etc/senlin/senlin.conf can trace his requests. \n"
|
||||
"In real life only an operator can read this file so "
|
||||
"there is no security issue. Note that even if any "
|
||||
"person can trigger the profiler, only an admin user "
|
||||
"can retrieve trace.\n"
|
||||
"To disable OSProfiler set in senlin.conf:\n"
|
||||
"[profiler]\nenabled=false"))
|
||||
else:
|
||||
osprofiler.web.disable()
|
|
@ -18,12 +18,15 @@ import six
|
|||
import sys
|
||||
import threading
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api as oslo_db_api
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_db.sqlalchemy import enginefacade
|
||||
from oslo_db.sqlalchemy import utils as sa_utils
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
import osprofiler.sqlalchemy
|
||||
import sqlalchemy
|
||||
from sqlalchemy.orm import joinedload_all
|
||||
|
||||
from senlin.common import consts
|
||||
|
@ -43,6 +46,11 @@ def _get_main_context_manager():
|
|||
global _main_context_manager
|
||||
if not _main_context_manager:
|
||||
_main_context_manager = enginefacade.transaction_context()
|
||||
cfg.CONF.import_group('profiler', 'senlin.common.config')
|
||||
if cfg.CONF.profiler.enabled:
|
||||
if cfg.CONF.profiler.trace_sqlalchemy:
|
||||
eng = _main_context_manager.get_legacy_facade().get_engine()
|
||||
osprofiler.sqlalchemy.add_tracing(sqlalchemy, eng, "db")
|
||||
return _main_context_manager
|
||||
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import eventlet
|
|||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
from osprofiler import profiler
|
||||
|
||||
from senlin.common import consts
|
||||
from senlin.common import exception
|
||||
|
@ -180,6 +181,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return self.RES_OK, ''
|
||||
|
||||
@profiler.trace('ClusterAction.do_create', hide_args=False)
|
||||
def do_create(self):
|
||||
"""Handler for CLUSTER_CREATE action.
|
||||
|
||||
|
@ -202,6 +204,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_update', hide_args=False)
|
||||
def do_update(self):
|
||||
"""Handler for CLUSTER_UPDATE action.
|
||||
|
||||
|
@ -304,6 +307,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return self.RES_OK, ''
|
||||
|
||||
@profiler.trace('ClusterAction.do_delete', hide_args=False)
|
||||
def do_delete(self):
|
||||
"""Handler for the CLUSTER_DELETE action.
|
||||
|
||||
|
@ -332,6 +336,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_add_nodes', hide_args=False)
|
||||
def do_add_nodes(self):
|
||||
"""Handler for the CLUSTER_ADD_NODES action.
|
||||
|
||||
|
@ -401,6 +406,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_del_nodes', hide_args=False)
|
||||
def do_del_nodes(self):
|
||||
"""Handler for the CLUSTER_DEL_NODES action.
|
||||
|
||||
|
@ -463,6 +469,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_replace_nodes', hide_args=False)
|
||||
def do_replace_nodes(self):
|
||||
"""Handler for the CLUSTER_REPLACE_NODES action.
|
||||
|
||||
|
@ -553,6 +560,7 @@ class ClusterAction(base.Action):
|
|||
self.cluster.eval_status(self.context, self.CLUSTER_REPLACE_NODES)
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_check', hide_args=False)
|
||||
def do_check(self):
|
||||
"""Handler for CLUSTER_CHECK action.
|
||||
|
||||
|
@ -587,6 +595,7 @@ class ClusterAction(base.Action):
|
|||
self.cluster.eval_status(self.context, self.CLUSTER_CHECK)
|
||||
return res, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_recover', hide_args=False)
|
||||
def do_recover(self):
|
||||
"""Handler for the CLUSTER_RECOVER action.
|
||||
|
||||
|
@ -646,6 +655,7 @@ class ClusterAction(base.Action):
|
|||
self.cluster.set_status(self.context, self.cluster.RESIZING,
|
||||
_('Cluster resize started.'), **kwargs)
|
||||
|
||||
@profiler.trace('ClusterAction.do_resize', hide_args=False)
|
||||
def do_resize(self):
|
||||
"""Handler for the CLUSTER_RESIZE action.
|
||||
|
||||
|
@ -690,6 +700,7 @@ class ClusterAction(base.Action):
|
|||
self.cluster.eval_status(self.context, self.CLUSTER_RESIZE)
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_scale_out', hide_args=False)
|
||||
def do_scale_out(self):
|
||||
"""Handler for the CLUSTER_SCALE_OUT action.
|
||||
|
||||
|
@ -729,6 +740,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_scale_in', hide_args=False)
|
||||
def do_scale_in(self):
|
||||
"""Handler for the CLUSTER_SCALE_IN action.
|
||||
|
||||
|
@ -789,6 +801,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_attach_policy', hide_args=False)
|
||||
def do_attach_policy(self):
|
||||
"""Handler for the CLUSTER_ATTACH_POLICY action.
|
||||
|
||||
|
@ -809,6 +822,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_detach_policy', hide_args=False)
|
||||
def do_detach_policy(self):
|
||||
"""Handler for the CLUSTER_DETACH_POLICY action.
|
||||
|
||||
|
@ -827,6 +841,7 @@ class ClusterAction(base.Action):
|
|||
|
||||
return result, reason
|
||||
|
||||
@profiler.trace('ClusterAction.do_update_policy', hide_args=False)
|
||||
def do_update_policy(self):
|
||||
"""Handler for the CLUSTER_UPDATE_POLICY action.
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
import eventlet
|
||||
|
||||
from osprofiler import profiler
|
||||
|
||||
from senlin.common.i18n import _
|
||||
from senlin.common import scaleutils as su
|
||||
from senlin.engine.actions import base
|
||||
|
@ -53,6 +55,7 @@ class NodeAction(base.Action):
|
|||
except Exception:
|
||||
self.node = None
|
||||
|
||||
@profiler.trace('NodeAction.do_create', hide_args=False)
|
||||
def do_create(self):
|
||||
"""Handler for the NODE_CREATE action.
|
||||
|
||||
|
@ -83,6 +86,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node creation failed.')
|
||||
|
||||
@profiler.trace('NodeAction.do_delete', hide_args=False)
|
||||
def do_delete(self):
|
||||
"""Handler for the NODE_DELETE action.
|
||||
|
||||
|
@ -124,6 +128,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node deletion failed.')
|
||||
|
||||
@profiler.trace('NodeAction.do_update', hide_args=False)
|
||||
def do_update(self):
|
||||
"""Handler for the NODE_UPDATE action.
|
||||
|
||||
|
@ -136,6 +141,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node update failed.')
|
||||
|
||||
@profiler.trace('NodeAction.do_join', hide_args=False)
|
||||
def do_join(self):
|
||||
"""Handler for the NODE_JOIN action.
|
||||
|
||||
|
@ -160,6 +166,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node failed in joining cluster.')
|
||||
|
||||
@profiler.trace('NodeAction.do_leave', hide_args=False)
|
||||
def do_leave(self):
|
||||
"""Handler for the NODE_LEAVE action.
|
||||
|
||||
|
@ -183,6 +190,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node failed in leaving cluster.')
|
||||
|
||||
@profiler.trace('NodeAction.do_check', hide_args=False)
|
||||
def do_check(self):
|
||||
"""Handler for the NODE_check action.
|
||||
|
||||
|
@ -194,6 +202,7 @@ class NodeAction(base.Action):
|
|||
else:
|
||||
return self.RES_ERROR, _('Node status is not ACTIVE.')
|
||||
|
||||
@profiler.trace('NodeAction.do_recover', hide_args=False)
|
||||
def do_recover(self):
|
||||
"""Handler for the NODE_RECOVER action.
|
||||
|
||||
|
|
|
@ -14,8 +14,10 @@ import time
|
|||
|
||||
import eventlet
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context as oslo_context
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import threadgroup
|
||||
from osprofiler import profiler
|
||||
|
||||
from senlin.common import context
|
||||
from senlin.common.i18n import _
|
||||
|
@ -56,10 +58,31 @@ class ThreadGroupManager(object):
|
|||
# TODO(Yanyan): have this task call dbapi purge events
|
||||
pass
|
||||
|
||||
def _serialize_profile_info(self):
|
||||
prof = profiler.get()
|
||||
trace_info = None
|
||||
if prof:
|
||||
trace_info = {
|
||||
"hmac_key": prof.hmac_key,
|
||||
"base_id": prof.get_base_id(),
|
||||
"parent_id": prof.get_id()
|
||||
}
|
||||
return trace_info
|
||||
|
||||
def _start_with_trace(self, cnxt, trace, func, *args, **kwargs):
|
||||
if trace:
|
||||
profiler.init(**trace)
|
||||
if cnxt is not None:
|
||||
cnxt.update_store()
|
||||
return func(*args, **kwargs)
|
||||
|
||||
def start(self, func, *args, **kwargs):
|
||||
'''Run the given method in a thread.'''
|
||||
|
||||
return self.group.add_thread(func, *args, **kwargs)
|
||||
req_cnxt = oslo_context.get_current()
|
||||
return self.group.add_thread(self._start_with_trace, req_cnxt,
|
||||
self._serialize_profile_info(),
|
||||
func, *args, **kwargs)
|
||||
|
||||
def start_action(self, worker_id, action_id=None):
|
||||
'''Run action(s) in sub-thread(s).
|
||||
|
|
|
@ -19,6 +19,7 @@ import oslo_messaging
|
|||
from oslo_service import service
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
from osprofiler import profiler
|
||||
import six
|
||||
|
||||
from senlin.common import consts
|
||||
|
@ -86,6 +87,7 @@ def request_context2(func):
|
|||
return wrapped
|
||||
|
||||
|
||||
@profiler.trace_cls("rpc")
|
||||
class EngineService(service.Service):
|
||||
"""Lifecycle manager for a running service engine.
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import copy
|
|||
from oslo_context import context as oslo_context
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
from osprofiler import profiler
|
||||
import six
|
||||
|
||||
from senlin.common import consts
|
||||
|
@ -197,16 +198,19 @@ class Profile(object):
|
|||
return self.id
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.create_object', hide_args=False)
|
||||
def create_object(cls, ctx, obj):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_create(obj)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.delete_object', hide_args=False)
|
||||
def delete_object(cls, ctx, obj, **params):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_delete(obj, **params)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.update_object', hide_args=False)
|
||||
def update_object(cls, ctx, obj, new_profile_id=None, **params):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
new_profile = None
|
||||
|
@ -215,26 +219,31 @@ class Profile(object):
|
|||
return profile.do_update(obj, new_profile, **params)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.get_details', hide_args=False)
|
||||
def get_details(cls, ctx, obj):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_get_details(obj)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.join_cluster', hide_args=False)
|
||||
def join_cluster(cls, ctx, obj, cluster_id):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_join(obj, cluster_id)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.leave_cluster', hide_args=False)
|
||||
def leave_cluster(cls, ctx, obj):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_leave(obj)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.check_object', hide_args=False)
|
||||
def check_object(cls, ctx, obj):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_check(obj)
|
||||
|
||||
@classmethod
|
||||
@profiler.trace('Profile.recover_object', hide_args=False)
|
||||
def recover_object(cls, ctx, obj, **options):
|
||||
profile = cls.load(ctx, profile_id=obj.profile_id)
|
||||
return profile.do_recover(obj, **options)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
import eventlet
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context as oslo_context
|
||||
from oslo_service import threadgroup
|
||||
|
||||
from senlin.db import api as db_api
|
||||
|
@ -42,9 +43,9 @@ class DummyThreadGroup(object):
|
|||
def stop_timers(self):
|
||||
pass
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
callback = args[1]
|
||||
self.threads.append(callback)
|
||||
def add_thread(self, callback, cnxt, trace, func, *args, **kwargs):
|
||||
# callback here is _start_with_trace, func is the 'real' callback
|
||||
self.threads.append(func)
|
||||
return DummyThread()
|
||||
|
||||
def stop(self, graceful=False):
|
||||
|
@ -84,7 +85,10 @@ class SchedulerTest(base.SenlinTestCase):
|
|||
tgm = scheduler.ThreadGroupManager()
|
||||
tgm.start(f)
|
||||
|
||||
mock_group.add_thread.assert_called_once_with(f)
|
||||
mock_group.add_thread.assert_called_once_with(
|
||||
tgm._start_with_trace,
|
||||
oslo_context.get_current(),
|
||||
None, f)
|
||||
|
||||
@mock.patch.object(db_api, 'action_acquire_1st_ready')
|
||||
@mock.patch.object(db_api, 'action_acquire')
|
||||
|
@ -100,8 +104,11 @@ class SchedulerTest(base.SenlinTestCase):
|
|||
tgm = scheduler.ThreadGroupManager()
|
||||
tgm.start_action('4567', '0123')
|
||||
|
||||
mock_group.add_thread.assert_called_once_with(actionm.ActionProc,
|
||||
tgm.db_session, '0123')
|
||||
mock_group.add_thread.assert_called_once_with(
|
||||
tgm._start_with_trace,
|
||||
oslo_context.get_current(),
|
||||
None, actionm.ActionProc,
|
||||
tgm.db_session, '0123')
|
||||
mock_thread = mock_group.add_thread.return_value
|
||||
self.assertEqual(mock_thread, tgm.workers['0123'])
|
||||
mock_thread.link.assert_called_once_with(mock.ANY, '0123')
|
||||
|
@ -118,8 +125,11 @@ class SchedulerTest(base.SenlinTestCase):
|
|||
tgm = scheduler.ThreadGroupManager()
|
||||
tgm.start_action('4567')
|
||||
|
||||
mock_group.add_thread.assert_called_once_with(actionm.ActionProc,
|
||||
tgm.db_session, '0123')
|
||||
mock_group.add_thread.assert_called_once_with(
|
||||
tgm._start_with_trace,
|
||||
oslo_context.get_current(),
|
||||
None, actionm.ActionProc,
|
||||
tgm.db_session, '0123')
|
||||
mock_thread = mock_group.add_thread.return_value
|
||||
self.assertEqual(mock_thread, tgm.workers['0123'])
|
||||
mock_thread.link.assert_called_once_with(mock.ANY, '0123')
|
||||
|
|
Loading…
Reference in New Issue