Make task_manager logging more helpful

1. Log successfull reservation attempt
2. Log reservation release
3. Include purpose in logging (requires changes on caller side)
4. Include lock timing (time to acquire and time to release) in logging

Closes-Bug: #1468689
Change-Id: I8120ac0f66bc3d85fcd11bedee764f37032add64
This commit is contained in:
Dmitry Tantsur 2015-06-23 14:06:25 +02:00
parent e84be558a9
commit dc84f5dd97
5 changed files with 142 additions and 60 deletions

View File

@ -378,7 +378,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
driver_name = node_obj.driver if 'driver' in delta else None
with task_manager.acquire(context, node_id, shared=False,
driver_name=driver_name):
driver_name=driver_name,
purpose='node update'):
node_obj.save()
return node_obj
@ -407,7 +408,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"The desired new state is %(state)s."
% {'node': node_id, 'state': new_state})
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='changing node power state') as task:
task.driver.power.validate(task)
# Set the target_power_state and clear any last_error, since we're
# starting a new operation. This will expose to other processes
@ -462,7 +464,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
# require an exclusive lock, we need to do so to guarantee that the
# state doesn't unexpectedly change between doing a vendor.validate
# and vendor.vendor_passthru.
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='calling vendor passthru') as task:
if not getattr(task.driver, 'vendor', None):
raise exception.UnsupportedDriverExtension(
driver=task.node.driver,
@ -605,7 +608,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug("RPC get_node_vendor_passthru_methods called for node %s"
% node_id)
with task_manager.acquire(context, node_id, shared=True) as task:
lock_purpose = 'listing vendor passthru methods'
with task_manager.acquire(context, node_id, shared=True,
purpose=lock_purpose) as task:
if not getattr(task.driver, 'vendor', None):
raise exception.UnsupportedDriverExtension(
driver=task.node.driver,
@ -670,7 +675,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
# to have locked this node, we'll fail to acquire the lock. The
# client should perhaps retry in this case unless we decide we
# want to add retries or extra synchronization here.
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='node deployment') as task:
node = task.node
if node.maintenance:
raise exception.NodeInMaintenance(op=_('provisioning'),
@ -747,7 +753,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug("RPC do_node_tear_down called for node %s." % node_id)
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='node tear down') as task:
try:
# NOTE(ghe): Valid power driver values are needed to perform
# a tear-down. Deploy info is useful to purge the cache but not
@ -826,7 +833,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug("RPC continue_node_clean called for node %s.", node_id)
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='node cleaning') as task:
if task.node.provision_state != states.CLEANING:
raise exception.InvalidStateRequested(_(
'Cannot continue cleaning on %(node)s, node is in '
@ -991,7 +999,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
:raises: NoFreeConductorWorker
"""
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='provision action %s'
% action) as task:
if (action == states.VERBS['provide'] and
task.node.provision_state == states.MANAGEABLE):
task.process_event('provide',
@ -1052,7 +1062,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
node.maintenance or node.reservation is not None):
continue
with task_manager.acquire(context, node_uuid) as task:
with task_manager.acquire(context, node_uuid,
purpose='power state sync') as task:
if (task.node.provision_state == states.DEPLOYWAIT or
task.node.maintenance):
continue
@ -1141,7 +1152,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
# Node is mapped here, but not updated by this conductor last
try:
with task_manager.acquire(context, node_uuid) as task:
with task_manager.acquire(context, node_uuid,
purpose='node take over') as task:
# NOTE(deva): now that we have the lock, check again to
# avoid racing with deletes and other state changes
node = task.node
@ -1210,7 +1222,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
LOG.debug('RPC validate_driver_interfaces called for node %s.',
node_id)
ret_dict = {}
with task_manager.acquire(context, node_id, shared=True) as task:
lock_purpose = 'driver interface validation'
with task_manager.acquire(context, node_id, shared=True,
purpose=lock_purpose) as task:
# NOTE(sirushtim): the is_whole_disk_image variable is needed by
# deploy drivers for doing their validate(). Since the deploy
# isn't being done yet and the driver information could change in
@ -1257,7 +1271,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
state to perform deletion.
"""
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id,
purpose='node deletion') as task:
node = task.node
if node.instance_uuid is not None:
raise exception.NodeAssociated(node=node.uuid,
@ -1309,7 +1324,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC destroy_port called for port %(port)s',
{'port': port.uuid})
with task_manager.acquire(context, port.node_id) as task:
with task_manager.acquire(context, port.node_id,
purpose='port deletion') as task:
port.destroy()
LOG.info(_LI('Successfully deleted port %(port)s. '
'The node associated with the port was '
@ -1334,7 +1350,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC get_console_information called for node %s' % node_id)
with task_manager.acquire(context, node_id, shared=True) as task:
lock_purpose = 'getting console information'
with task_manager.acquire(context, node_id, shared=True,
purpose=lock_purpose) as task:
node = task.node
if not getattr(task.driver, 'console', None):
@ -1372,7 +1390,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
'enabled %(enabled)s' % {'node': node_id,
'enabled': enabled})
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='setting console mode') as task:
node = task.node
if not getattr(task.driver, 'console', None):
raise exception.UnsupportedDriverExtension(driver=node.driver,
@ -1433,7 +1452,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
port_uuid = port_obj.uuid
LOG.debug("RPC update_port called for port %s.", port_uuid)
with task_manager.acquire(context, port_obj.node_id) as task:
with task_manager.acquire(context, port_obj.node_id,
purpose='port update') as task:
node = task.node
if 'address' in port_obj.obj_what_changed():
vif = port_obj.extra.get('vif_port_id')
@ -1491,9 +1511,11 @@ class ConductorManager(periodic_task.PeriodicTasks):
'event_type': 'hardware.ipmi.metrics.update'}
try:
lock_purpose = 'getting sensors data'
with task_manager.acquire(context,
node_uuid,
shared=True) as task:
shared=True,
purpose=lock_purpose) as task:
task.driver.management.validate(task)
sensors_data = task.driver.management.get_sensors_data(
task)
@ -1573,7 +1595,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC set_boot_device called for node %(node)s with '
'device %(device)s', {'node': node_id, 'device': device})
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id,
purpose='setting boot device') as task:
node = task.node
if not getattr(task.driver, 'management', None):
raise exception.UnsupportedDriverExtension(
@ -1608,7 +1631,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC get_boot_device called for node %s', node_id)
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id,
purpose='getting boot device') as task:
if not getattr(task.driver, 'management', None):
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='management')
@ -1637,7 +1661,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC get_supported_boot_devices called for node %s', node_id)
with task_manager.acquire(context, node_id, shared=True) as task:
lock_purpose = 'getting supported boot devices'
with task_manager.acquire(context, node_id, shared=True,
purpose=lock_purpose) as task:
if not getattr(task.driver, 'management', None):
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='management')
@ -1669,7 +1695,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""
LOG.debug('RPC inspect_hardware called for node %s', node_id)
with task_manager.acquire(context, node_id, shared=False) as task:
with task_manager.acquire(context, node_id, shared=False,
purpose='hardware inspection') as task:
if not getattr(task.driver, 'inspect', None):
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='inspect')
@ -1751,7 +1778,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
workers_count = 0
for node_uuid, driver in node_iter:
try:
with task_manager.acquire(context, node_uuid) as task:
with task_manager.acquire(context, node_uuid,
purpose='node state check') as task:
if (task.node.maintenance or
task.node.provision_state != provision_state):
continue

View File

@ -58,7 +58,7 @@ Example usage:
::
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id, purpose='power on') as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
@ -68,7 +68,7 @@ an exception occurs). Common use of this is within the Manager like so:
::
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
@ -86,7 +86,7 @@ raised in the background thread.):
if isinstance(e, Exception):
...
with task_manager.acquire(context, node_id) as task:
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.set_spawn_error_hook(on_error)
task.spawn_after(self._spawn_worker,
@ -95,6 +95,7 @@ raised in the background thread.):
"""
import functools
import time
from oslo_config import cfg
from oslo_log import log as logging
@ -129,7 +130,8 @@ def require_exclusive_lock(f):
return wrapper
def acquire(context, node_id, shared=False, driver_name=None):
def acquire(context, node_id, shared=False, driver_name=None,
purpose='unspecified action'):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
@ -137,11 +139,12 @@ def acquire(context, node_id, shared=False, driver_name=None):
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: Name of Driver. Default: None.
:param purpose: human-readable purpose to put to debug logs.
:returns: An instance of :class:`TaskManager`.
"""
return TaskManager(context, node_id, shared=shared,
driver_name=driver_name)
driver_name=driver_name, purpose=purpose)
class TaskManager(object):
@ -152,7 +155,8 @@ class TaskManager(object):
"""
def __init__(self, context, node_id, shared=False, driver_name=None):
def __init__(self, context, node_id, shared=False, driver_name=None,
purpose='unspecified action'):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
@ -166,6 +170,7 @@ class TaskManager(object):
lock. Default: False.
:param driver_name: The name of the driver to load, if different
from the Node's current driver.
:param purpose: human-readable purpose to put to debug logs.
:raises: DriverNotFound
:raises: NodeNotFound
:raises: NodeLocked
@ -180,6 +185,8 @@ class TaskManager(object):
self.shared = shared
self.fsm = states.machine.copy()
self._purpose = purpose
self._debug_timer = time.time()
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
@ -189,11 +196,18 @@ class TaskManager(object):
stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
LOG.debug("Attempting to reserve node %(node)s",
{'node': node_id})
self.node = objects.Node.reserve(context, CONF.host, node_id)
LOG.debug("Node %(node)s successfully reserved for %(purpose)s "
"(took %(time).2f seconds)",
{'node': node_id, 'purpose': purpose,
'time': time.time() - self._debug_timer})
self._debug_timer = time.time()
try:
LOG.debug("Attempting to get %(type)s lock on node %(node)s (for "
"%(purpose)s)",
{'type': 'shared' if shared else 'exclusive',
'node': node_id, 'purpose': purpose})
if not self.shared:
reserve_node()
else:
@ -261,6 +275,12 @@ class TaskManager(object):
# squelch the exception if the node was deleted
# within the task's context.
pass
if self.node:
LOG.debug("Successfully released %(type)s lock for %(purpose)s "
"on node %(node)s (lock was held %(time).2f sec)",
{'type': 'shared' if self.shared else 'exclusive',
'purpose': self._purpose, 'node': self.node.uuid,
'time': time.time() - self._debug_timer})
self.node = None
self.driver = None
self.ports = None

View File

@ -132,7 +132,9 @@ class Inspector(base.InspectInterface):
try:
# TODO(dtantsur): we need an exclusive lock only once
# inspection is finished.
with task_manager.acquire(context, node_uuid) as task:
lock_purpose = 'checking hardware inspection status'
with task_manager.acquire(context, node_uuid,
purpose=lock_purpose) as task:
_check_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
@ -157,7 +159,9 @@ def _start_inspection(node_uuid, context):
{'node': node_uuid, 'err': exc})
# NOTE(dtantsur): if acquire fails our last option is to rely on
# timeout
with task_manager.acquire(context, node_uuid) as task:
lock_purpose = 'recording hardware inspection error'
with task_manager.acquire(context, node_uuid,
purpose=lock_purpose) as task:
task.node.last_error = _('Failed to start inspection: %s') % exc
task.process_event('fail')
else:

View File

@ -2860,7 +2860,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.assertFalse(sync_mock.called)
def test_node_in_deploywait_on_acquire(self, get_nodeinfo_mock,
@ -2882,7 +2883,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.assertFalse(sync_mock.called)
def test_node_in_maintenance_on_acquire(self, get_nodeinfo_mock,
@ -2902,7 +2904,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.assertFalse(sync_mock.called)
def test_node_disappears_on_acquire(self, get_nodeinfo_mock,
@ -2921,7 +2924,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.assertFalse(sync_mock.called)
def test_single_node(self, get_nodeinfo_mock, get_node_mock,
@ -2939,7 +2943,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
sync_mock.assert_called_once_with(task, mock.ANY)
def test__sync_power_state_multiple_nodes(self, get_nodeinfo_mock,
@ -3013,7 +3018,8 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
for x in nodes[:1] + nodes[2:]]
self.assertEqual(get_node_calls,
get_node_mock.call_args_list)
acquire_calls = [mock.call(self.context, x.uuid)
acquire_calls = [mock.call(self.context, x.uuid,
purpose=mock.ANY)
for x in nodes[:1] + nodes[6:]]
self.assertEqual(acquire_calls, acquire_mock.call_args_list)
sync_calls = [mock.call(tasks[0], mock.ANY),
@ -3079,7 +3085,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
@ -3099,7 +3106,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.spawn_after.called)
def test_acquire_node_locked(self, get_nodeinfo_mock, mapped_mock,
@ -3116,7 +3124,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.spawn_after.called)
def test_no_deploywait_after_lock(self, get_nodeinfo_mock, mapped_mock,
@ -3134,7 +3143,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(task.spawn_after.called)
def test_maintenance_after_lock(self, get_nodeinfo_mock, mapped_mock,
@ -3156,8 +3166,10 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
self.assertEqual([mock.call(self.node.uuid, task.node.driver),
mock.call(self.node2.uuid, self.node2.driver)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid),
mock.call(self.context, self.node2.uuid)],
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
mock.call(self.context, self.node2.uuid,
purpose=mock.ANY)],
acquire_mock.call_args_list)
# First node skipped
self.assertFalse(task.spawn_after.called)
@ -3185,7 +3197,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
@ -3210,7 +3223,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
# have exited the loop early due to unknown exception
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
'fail',
callback=self.service._spawn_worker,
@ -3234,7 +3248,8 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
# Should only have ran 2.
self.assertEqual([mock.call(self.node.uuid, self.node.driver)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid)] * 2,
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
acquire_mock.call_args_list)
process_event_call = mock.call(
'fail',
@ -3440,7 +3455,8 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
self.task.spawn_after.assert_called_once_with(
self.service._spawn_worker,
@ -3472,7 +3488,8 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
# assert acquire() gets called 2 times only instead of 3. When
# NoFreeConductorWorker is raised the loop should be broken
expected = [mock.call(self.context, self.node.uuid)] * 2
expected = [mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2
self.assertEqual(expected, acquire_mock.call_args_list)
# assert spawn_after has been called twice
@ -3499,7 +3516,8 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 3 times
expected = [mock.call(self.context, self.node.uuid)] * 3
expected = [mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 3
self.assertEqual(expected, acquire_mock.call_args_list)
# assert spawn_after has been called only 2 times
@ -3528,7 +3546,8 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
# assert acquire() gets called only once because of the worker limit
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
self.task.spawn_after.assert_called_once_with(
@ -3769,7 +3788,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context, self.node.uuid)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail')
def test__check_inspect_timeouts_acquire_node_disappears(self,
@ -3787,7 +3807,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.process_event.called)
def test__check_inspect_timeouts_acquire_node_locked(self,
@ -3806,7 +3827,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(self.task.process_event.called)
def test__check_inspect_timeouts_no_acquire_after_lock(self,
@ -3826,7 +3848,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.assertFalse(task.process_event.called)
def test__check_inspect_timeouts_to_maintenance_after_lock(
@ -3848,8 +3871,10 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
self.assertEqual([mock.call(self.node.uuid, task.node.driver),
mock.call(self.node2.uuid, self.node2.driver)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid),
mock.call(self.context, self.node2.uuid)],
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
mock.call(self.context, self.node2.uuid,
purpose=mock.ANY)],
acquire_mock.call_args_list)
# First node skipped
self.assertFalse(task.process_event.called)
@ -3873,7 +3898,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail')
def test__check_inspect_timeouts_exit_with_other_exception(
@ -3895,7 +3921,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid)
self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail')
def test__check_inspect_timeouts_worker_limit(self, get_nodeinfo_mock,
@ -3916,7 +3943,8 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
# Should only have ran 2.
self.assertEqual([mock.call(self.node.uuid, self.node.driver)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid)] * 2,
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
acquire_mock.call_args_list)
process_event_call = mock.call('fail')
self.assertEqual([process_event_call] * 2,

View File

@ -450,6 +450,8 @@ class TaskManagerStateModelTestCases(tests_base.TestCase):
t.driver = mock.Mock()
t.ports = mock.Mock()
t.shared = True
t._purpose = 'purpose'
t._debug_timer = 3.14
t.release_resources(t)
self.assertIsNone(t.node)