Merge "Split BaseConductorManager from ConductorManager"
This commit is contained in:
commit
e1417ff476
318
ironic/conductor/base_manager.py
Normal file
318
ironic/conductor/base_manager.py
Normal file
@ -0,0 +1,318 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base conductor manager functionality."""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from eventlet import greenpool
|
||||||
|
from oslo_concurrency import lockutils
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_context import context as ironic_context
|
||||||
|
from oslo_db import exception as db_exception
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_service import periodic_task
|
||||||
|
from oslo_utils import excutils
|
||||||
|
|
||||||
|
from ironic.common import driver_factory
|
||||||
|
from ironic.common import exception
|
||||||
|
from ironic.common import hash_ring as hash
|
||||||
|
from ironic.common.i18n import _
|
||||||
|
from ironic.common.i18n import _LC
|
||||||
|
from ironic.common.i18n import _LE
|
||||||
|
from ironic.common.i18n import _LI
|
||||||
|
from ironic.common.i18n import _LW
|
||||||
|
from ironic.common import rpc
|
||||||
|
from ironic.common import states
|
||||||
|
from ironic.conductor import task_manager
|
||||||
|
from ironic.db import api as dbapi
|
||||||
|
|
||||||
|
|
||||||
|
conductor_opts = [
|
||||||
|
cfg.IntOpt('workers_pool_size',
|
||||||
|
default=100,
|
||||||
|
help=_('The size of the workers greenthread pool.')),
|
||||||
|
cfg.IntOpt('heartbeat_interval',
|
||||||
|
default=10,
|
||||||
|
help=_('Seconds between conductor heart beats.')),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(conductor_opts, 'conductor')
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
WORKER_SPAWN_lOCK = "conductor_worker_spawn"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseConductorManager(periodic_task.PeriodicTasks):
|
||||||
|
|
||||||
|
def __init__(self, host, topic):
|
||||||
|
super(BaseConductorManager, self).__init__(CONF)
|
||||||
|
if not host:
|
||||||
|
host = CONF.host
|
||||||
|
self.host = host
|
||||||
|
self.topic = topic
|
||||||
|
self.notifier = rpc.get_notifier()
|
||||||
|
|
||||||
|
def _get_driver(self, driver_name):
|
||||||
|
"""Get the driver.
|
||||||
|
|
||||||
|
:param driver_name: name of the driver.
|
||||||
|
:returns: the driver; an instance of a class which implements
|
||||||
|
:class:`ironic.drivers.base.BaseDriver`.
|
||||||
|
:raises: DriverNotFound if the driver is not loaded.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self._driver_factory[driver_name].obj
|
||||||
|
except KeyError:
|
||||||
|
raise exception.DriverNotFound(driver_name=driver_name)
|
||||||
|
|
||||||
|
def init_host(self):
|
||||||
|
self.dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
|
self._keepalive_evt = threading.Event()
|
||||||
|
"""Event for the keepalive thread."""
|
||||||
|
|
||||||
|
self._worker_pool = greenpool.GreenPool(
|
||||||
|
size=CONF.conductor.workers_pool_size)
|
||||||
|
"""GreenPool of background workers for performing tasks async."""
|
||||||
|
|
||||||
|
self.ring_manager = hash.HashRingManager()
|
||||||
|
"""Consistent hash ring which maps drivers to conductors."""
|
||||||
|
|
||||||
|
# NOTE(deva): instantiating DriverFactory may raise DriverLoadError
|
||||||
|
# or DriverNotFound
|
||||||
|
self._driver_factory = driver_factory.DriverFactory()
|
||||||
|
"""Driver factory loads all enabled drivers."""
|
||||||
|
|
||||||
|
self.drivers = self._driver_factory.names
|
||||||
|
"""List of driver names which this conductor supports."""
|
||||||
|
|
||||||
|
if not self.drivers:
|
||||||
|
msg = _LE("Conductor %s cannot be started because no drivers "
|
||||||
|
"were loaded. This could be because no drivers were "
|
||||||
|
"specified in 'enabled_drivers' config option.")
|
||||||
|
LOG.error(msg, self.host)
|
||||||
|
raise exception.NoDriversLoaded(conductor=self.host)
|
||||||
|
|
||||||
|
# Collect driver-specific periodic tasks
|
||||||
|
for driver_obj in driver_factory.drivers().values():
|
||||||
|
self._collect_periodic_tasks(driver_obj)
|
||||||
|
for iface_name in (driver_obj.core_interfaces +
|
||||||
|
driver_obj.standard_interfaces +
|
||||||
|
['vendor']):
|
||||||
|
iface = getattr(driver_obj, iface_name, None)
|
||||||
|
if iface:
|
||||||
|
self._collect_periodic_tasks(iface)
|
||||||
|
|
||||||
|
# clear all locks held by this conductor before registering
|
||||||
|
self.dbapi.clear_node_reservations_for_conductor(self.host)
|
||||||
|
try:
|
||||||
|
# Register this conductor with the cluster
|
||||||
|
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
||||||
|
'drivers': self.drivers})
|
||||||
|
except exception.ConductorAlreadyRegistered:
|
||||||
|
# This conductor was already registered and did not shut down
|
||||||
|
# properly, so log a warning and update the record.
|
||||||
|
LOG.warning(
|
||||||
|
_LW("A conductor with hostname %(hostname)s "
|
||||||
|
"was previously registered. Updating registration"),
|
||||||
|
{'hostname': self.host})
|
||||||
|
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
||||||
|
'drivers': self.drivers},
|
||||||
|
update_existing=True)
|
||||||
|
self.conductor = cdr
|
||||||
|
|
||||||
|
# NOTE(lucasagomes): If the conductor server dies abruptly
|
||||||
|
# mid deployment (OMM Killer, power outage, etc...) we
|
||||||
|
# can not resume the deployment even if the conductor
|
||||||
|
# comes back online. Cleaning the reservation of the nodes
|
||||||
|
# (dbapi.clear_node_reservations_for_conductor) is not enough to
|
||||||
|
# unstick it, so let's gracefully fail the deployment so the node
|
||||||
|
# can go through the steps (deleting & cleaning) to make itself
|
||||||
|
# available again.
|
||||||
|
filters = {'reserved': False,
|
||||||
|
'provision_state': states.DEPLOYING}
|
||||||
|
last_error = (_("The deployment can't be resumed by conductor "
|
||||||
|
"%s. Moving to fail state.") % self.host)
|
||||||
|
self._fail_if_in_state(ironic_context.get_admin_context(), filters,
|
||||||
|
states.DEPLOYING, 'provision_updated_at',
|
||||||
|
last_error=last_error)
|
||||||
|
|
||||||
|
# Spawn a dedicated greenthread for the keepalive
|
||||||
|
try:
|
||||||
|
self._spawn_worker(self._conductor_service_record_keepalive)
|
||||||
|
LOG.info(_LI('Successfully started conductor with hostname '
|
||||||
|
'%(hostname)s.'),
|
||||||
|
{'hostname': self.host})
|
||||||
|
except exception.NoFreeConductorWorker:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.critical(_LC('Failed to start keepalive'))
|
||||||
|
self.del_host()
|
||||||
|
|
||||||
|
def _collect_periodic_tasks(self, obj):
|
||||||
|
for n, method in inspect.getmembers(obj, inspect.ismethod):
|
||||||
|
if getattr(method, '_periodic_enabled', False):
|
||||||
|
self.add_periodic_task(method)
|
||||||
|
|
||||||
|
def del_host(self, deregister=True):
|
||||||
|
# Conductor deregistration fails if called on non-initialized
|
||||||
|
# conductor (e.g. when rpc server is unreachable).
|
||||||
|
if not hasattr(self, 'conductor'):
|
||||||
|
return
|
||||||
|
self._keepalive_evt.set()
|
||||||
|
if deregister:
|
||||||
|
try:
|
||||||
|
# Inform the cluster that this conductor is shutting down.
|
||||||
|
# Note that rebalancing will not occur immediately, but when
|
||||||
|
# the periodic sync takes place.
|
||||||
|
self.dbapi.unregister_conductor(self.host)
|
||||||
|
LOG.info(_LI('Successfully stopped conductor with hostname '
|
||||||
|
'%(hostname)s.'),
|
||||||
|
{'hostname': self.host})
|
||||||
|
except exception.ConductorNotFound:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
LOG.info(_LI('Not deregistering conductor with hostname '
|
||||||
|
'%(hostname)s.'),
|
||||||
|
{'hostname': self.host})
|
||||||
|
# Waiting here to give workers the chance to finish. This has the
|
||||||
|
# benefit of releasing locks workers placed on nodes, as well as
|
||||||
|
# having work complete normally.
|
||||||
|
self._worker_pool.waitall()
|
||||||
|
|
||||||
|
def periodic_tasks(self, context, raise_on_error=False):
|
||||||
|
"""Periodic tasks are run at pre-specified interval."""
|
||||||
|
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
||||||
|
|
||||||
|
def iter_nodes(self, fields=None, **kwargs):
|
||||||
|
"""Iterate over nodes mapped to this conductor.
|
||||||
|
|
||||||
|
Requests node set from and filters out nodes that are not
|
||||||
|
mapped to this conductor.
|
||||||
|
|
||||||
|
Yields tuples (node_uuid, driver, ...) where ... is derived from
|
||||||
|
fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
|
||||||
|
fields=['foo'] means yielding ('uuid', 'driver', 'foo').
|
||||||
|
|
||||||
|
:param fields: list of fields to fetch in addition to uuid and driver
|
||||||
|
:param kwargs: additional arguments to pass to dbapi when looking for
|
||||||
|
nodes
|
||||||
|
:return: generator yielding tuples of requested fields
|
||||||
|
"""
|
||||||
|
columns = ['uuid', 'driver'] + list(fields or ())
|
||||||
|
node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
|
||||||
|
for result in node_list:
|
||||||
|
if self._mapped_to_this_conductor(*result[:2]):
|
||||||
|
yield result
|
||||||
|
|
||||||
|
@lockutils.synchronized(WORKER_SPAWN_lOCK, 'ironic-')
|
||||||
|
def _spawn_worker(self, func, *args, **kwargs):
|
||||||
|
|
||||||
|
"""Create a greenthread to run func(*args, **kwargs).
|
||||||
|
|
||||||
|
Spawns a greenthread if there are free slots in pool, otherwise raises
|
||||||
|
exception. Execution control returns immediately to the caller.
|
||||||
|
|
||||||
|
:returns: GreenThread object.
|
||||||
|
:raises: NoFreeConductorWorker if worker pool is currently full.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._worker_pool.free():
|
||||||
|
return self._worker_pool.spawn(func, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
raise exception.NoFreeConductorWorker()
|
||||||
|
|
||||||
|
def _conductor_service_record_keepalive(self):
|
||||||
|
while not self._keepalive_evt.is_set():
|
||||||
|
try:
|
||||||
|
self.dbapi.touch_conductor(self.host)
|
||||||
|
except db_exception.DBConnectionError:
|
||||||
|
LOG.warning(_LW('Conductor could not connect to database '
|
||||||
|
'while heartbeating.'))
|
||||||
|
self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
|
||||||
|
|
||||||
|
def _mapped_to_this_conductor(self, node_uuid, driver):
|
||||||
|
"""Check that node is mapped to this conductor.
|
||||||
|
|
||||||
|
Note that because mappings are eventually consistent, it is possible
|
||||||
|
for two conductors to simultaneously believe that a node is mapped to
|
||||||
|
them. Any operation that depends on exclusive control of a node should
|
||||||
|
take out a lock.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ring = self.ring_manager[driver]
|
||||||
|
except exception.DriverNotFound:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.host in ring.get_hosts(node_uuid)
|
||||||
|
|
||||||
|
def _fail_if_in_state(self, context, filters, provision_state,
|
||||||
|
sort_key, callback_method=None,
|
||||||
|
err_handler=None, last_error=None):
|
||||||
|
"""Fail nodes that are in specified state.
|
||||||
|
|
||||||
|
Retrieves nodes that satisfy the criteria in 'filters'.
|
||||||
|
If any of these nodes is in 'provision_state', it has failed
|
||||||
|
in whatever provisioning activity it was currently doing.
|
||||||
|
That failure is processed here.
|
||||||
|
|
||||||
|
:param: context: request context
|
||||||
|
:param: filters: criteria (as a dictionary) to get the desired
|
||||||
|
list of nodes that satisfy the filter constraints.
|
||||||
|
For example, if filters['provisioned_before'] = 60,
|
||||||
|
this would process nodes whose provision_updated_at
|
||||||
|
field value was 60 or more seconds before 'now'.
|
||||||
|
:param: provision_state: provision_state that the node is in,
|
||||||
|
for the provisioning activity to have failed.
|
||||||
|
:param: sort_key: the nodes are sorted based on this key.
|
||||||
|
:param: callback_method: the callback method to be invoked in a
|
||||||
|
spawned thread, for a failed node. This
|
||||||
|
method must take a :class:`TaskManager` as
|
||||||
|
the first (and only required) parameter.
|
||||||
|
:param: err_handler: for a failed node, the error handler to invoke
|
||||||
|
if an error occurs trying to spawn an thread
|
||||||
|
to do the callback_method.
|
||||||
|
:param: last_error: the error message to be updated in node.last_error
|
||||||
|
|
||||||
|
"""
|
||||||
|
node_iter = self.iter_nodes(filters=filters,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir='asc')
|
||||||
|
|
||||||
|
workers_count = 0
|
||||||
|
for node_uuid, driver in node_iter:
|
||||||
|
try:
|
||||||
|
with task_manager.acquire(context, node_uuid,
|
||||||
|
purpose='node state check') as task:
|
||||||
|
if (task.node.maintenance or
|
||||||
|
task.node.provision_state != provision_state):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# timeout has been reached - process the event 'fail'
|
||||||
|
if callback_method:
|
||||||
|
task.process_event('fail',
|
||||||
|
callback=self._spawn_worker,
|
||||||
|
call_args=(callback_method, task),
|
||||||
|
err_handler=err_handler)
|
||||||
|
else:
|
||||||
|
task.node.last_error = last_error
|
||||||
|
task.process_event('fail')
|
||||||
|
except exception.NoFreeConductorWorker:
|
||||||
|
break
|
||||||
|
except (exception.NodeLocked, exception.NodeNotFound):
|
||||||
|
continue
|
||||||
|
workers_count += 1
|
||||||
|
if workers_count >= CONF.conductor.periodic_max_workers:
|
||||||
|
break
|
@ -43,16 +43,10 @@ a change, etc.
|
|||||||
|
|
||||||
import collections
|
import collections
|
||||||
import datetime
|
import datetime
|
||||||
import inspect
|
|
||||||
import tempfile
|
import tempfile
|
||||||
import threading
|
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
|
||||||
from oslo_concurrency import lockutils
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_context import context as ironic_context
|
|
||||||
from oslo_db import exception as db_exception
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
from oslo_service import periodic_task
|
from oslo_service import periodic_task
|
||||||
@ -60,27 +54,22 @@ from oslo_utils import excutils
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from ironic.common import dhcp_factory
|
from ironic.common import dhcp_factory
|
||||||
from ironic.common import driver_factory
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.glance_service import service_utils as glance_utils
|
from ironic.common.glance_service import service_utils as glance_utils
|
||||||
from ironic.common import hash_ring as hash
|
|
||||||
from ironic.common.i18n import _
|
from ironic.common.i18n import _
|
||||||
from ironic.common.i18n import _LC
|
|
||||||
from ironic.common.i18n import _LE
|
from ironic.common.i18n import _LE
|
||||||
from ironic.common.i18n import _LI
|
from ironic.common.i18n import _LI
|
||||||
from ironic.common.i18n import _LW
|
from ironic.common.i18n import _LW
|
||||||
from ironic.common import images
|
from ironic.common import images
|
||||||
from ironic.common import rpc
|
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.common import swift
|
from ironic.common import swift
|
||||||
|
from ironic.conductor import base_manager
|
||||||
from ironic.conductor import task_manager
|
from ironic.conductor import task_manager
|
||||||
from ironic.conductor import utils
|
from ironic.conductor import utils
|
||||||
from ironic.db import api as dbapi
|
|
||||||
from ironic import objects
|
from ironic import objects
|
||||||
from ironic.objects import base as objects_base
|
from ironic.objects import base as objects_base
|
||||||
|
|
||||||
MANAGER_TOPIC = 'ironic.conductor_manager'
|
MANAGER_TOPIC = 'ironic.conductor_manager'
|
||||||
WORKER_SPAWN_lOCK = "conductor_worker_spawn"
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@ -89,9 +78,6 @@ conductor_opts = [
|
|||||||
help=_('URL of Ironic API service. If not set ironic can '
|
help=_('URL of Ironic API service. If not set ironic can '
|
||||||
'get the current value from the keystone service '
|
'get the current value from the keystone service '
|
||||||
'catalog.')),
|
'catalog.')),
|
||||||
cfg.IntOpt('heartbeat_interval',
|
|
||||||
default=10,
|
|
||||||
help=_('Seconds between conductor heart beats.')),
|
|
||||||
cfg.IntOpt('heartbeat_timeout',
|
cfg.IntOpt('heartbeat_timeout',
|
||||||
default=60,
|
default=60,
|
||||||
help=_('Maximum time (in seconds) since the last check-in '
|
help=_('Maximum time (in seconds) since the last check-in '
|
||||||
@ -126,9 +112,6 @@ conductor_opts = [
|
|||||||
help=_('Maximum number of worker threads that can be started '
|
help=_('Maximum number of worker threads that can be started '
|
||||||
'simultaneously by a periodic task. Should be less '
|
'simultaneously by a periodic task. Should be less '
|
||||||
'than RPC thread pool size.')),
|
'than RPC thread pool size.')),
|
||||||
cfg.IntOpt('workers_pool_size',
|
|
||||||
default=100,
|
|
||||||
help=_('The size of the workers greenthread pool.')),
|
|
||||||
cfg.IntOpt('node_locked_retry_attempts',
|
cfg.IntOpt('node_locked_retry_attempts',
|
||||||
default=3,
|
default=3,
|
||||||
help=_('Number of attempts to grab a node lock.')),
|
help=_('Number of attempts to grab a node lock.')),
|
||||||
@ -197,7 +180,7 @@ CONF.register_opts(conductor_opts, 'conductor')
|
|||||||
SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL)
|
SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL)
|
||||||
|
|
||||||
|
|
||||||
class ConductorManager(periodic_task.PeriodicTasks):
|
class ConductorManager(base_manager.BaseConductorManager):
|
||||||
"""Ironic Conductor manager main class."""
|
"""Ironic Conductor manager main class."""
|
||||||
|
|
||||||
# NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
|
# NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
|
||||||
@ -206,171 +189,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
|||||||
target = messaging.Target(version=RPC_API_VERSION)
|
target = messaging.Target(version=RPC_API_VERSION)
|
||||||
|
|
||||||
def __init__(self, host, topic):
|
def __init__(self, host, topic):
|
||||||
super(ConductorManager, self).__init__(CONF)
|
super(ConductorManager, self).__init__(host, topic)
|
||||||
if not host:
|
|
||||||
host = CONF.host
|
|
||||||
self.host = host
|
|
||||||
self.topic = topic
|
|
||||||
self.power_state_sync_count = collections.defaultdict(int)
|
self.power_state_sync_count = collections.defaultdict(int)
|
||||||
self.notifier = rpc.get_notifier()
|
|
||||||
|
|
||||||
def _get_driver(self, driver_name):
|
|
||||||
"""Get the driver.
|
|
||||||
|
|
||||||
:param driver_name: name of the driver.
|
|
||||||
:returns: the driver; an instance of a class which implements
|
|
||||||
:class:`ironic.drivers.base.BaseDriver`.
|
|
||||||
:raises: DriverNotFound if the driver is not loaded.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return self._driver_factory[driver_name].obj
|
|
||||||
except KeyError:
|
|
||||||
raise exception.DriverNotFound(driver_name=driver_name)
|
|
||||||
|
|
||||||
def init_host(self):
|
|
||||||
self.dbapi = dbapi.get_instance()
|
|
||||||
|
|
||||||
self._keepalive_evt = threading.Event()
|
|
||||||
"""Event for the keepalive thread."""
|
|
||||||
|
|
||||||
self._worker_pool = greenpool.GreenPool(
|
|
||||||
size=CONF.conductor.workers_pool_size)
|
|
||||||
"""GreenPool of background workers for performing tasks async."""
|
|
||||||
|
|
||||||
self.ring_manager = hash.HashRingManager()
|
|
||||||
"""Consistent hash ring which maps drivers to conductors."""
|
|
||||||
|
|
||||||
# NOTE(deva): instantiating DriverFactory may raise DriverLoadError
|
|
||||||
# or DriverNotFound
|
|
||||||
self._driver_factory = driver_factory.DriverFactory()
|
|
||||||
"""Driver factory loads all enabled drivers."""
|
|
||||||
|
|
||||||
self.drivers = self._driver_factory.names
|
|
||||||
"""List of driver names which this conductor supports."""
|
|
||||||
|
|
||||||
if not self.drivers:
|
|
||||||
msg = _LE("Conductor %s cannot be started because no drivers "
|
|
||||||
"were loaded. This could be because no drivers were "
|
|
||||||
"specified in 'enabled_drivers' config option.")
|
|
||||||
LOG.error(msg, self.host)
|
|
||||||
raise exception.NoDriversLoaded(conductor=self.host)
|
|
||||||
|
|
||||||
# Collect driver-specific periodic tasks
|
|
||||||
for driver_obj in driver_factory.drivers().values():
|
|
||||||
self._collect_periodic_tasks(driver_obj)
|
|
||||||
for iface_name in (driver_obj.core_interfaces +
|
|
||||||
driver_obj.standard_interfaces +
|
|
||||||
['vendor']):
|
|
||||||
iface = getattr(driver_obj, iface_name, None)
|
|
||||||
if iface:
|
|
||||||
self._collect_periodic_tasks(iface)
|
|
||||||
|
|
||||||
# clear all locks held by this conductor before registering
|
|
||||||
self.dbapi.clear_node_reservations_for_conductor(self.host)
|
|
||||||
try:
|
|
||||||
# Register this conductor with the cluster
|
|
||||||
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
|
||||||
'drivers': self.drivers})
|
|
||||||
except exception.ConductorAlreadyRegistered:
|
|
||||||
# This conductor was already registered and did not shut down
|
|
||||||
# properly, so log a warning and update the record.
|
|
||||||
LOG.warning(
|
|
||||||
_LW("A conductor with hostname %(hostname)s "
|
|
||||||
"was previously registered. Updating registration"),
|
|
||||||
{'hostname': self.host})
|
|
||||||
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
|
||||||
'drivers': self.drivers},
|
|
||||||
update_existing=True)
|
|
||||||
self.conductor = cdr
|
|
||||||
|
|
||||||
# NOTE(lucasagomes): If the conductor server dies abruptly
|
|
||||||
# mid deployment (OMM Killer, power outage, etc...) we
|
|
||||||
# can not resume the deployment even if the conductor
|
|
||||||
# comes back online. Cleaning the reservation of the nodes
|
|
||||||
# (dbapi.clear_node_reservations_for_conductor) is not enough to
|
|
||||||
# unstick it, so let's gracefully fail the deployment so the node
|
|
||||||
# can go through the steps (deleting & cleaning) to make itself
|
|
||||||
# available again.
|
|
||||||
filters = {'reserved': False,
|
|
||||||
'provision_state': states.DEPLOYING}
|
|
||||||
last_error = (_("The deployment can't be resumed by conductor "
|
|
||||||
"%s. Moving to fail state.") % self.host)
|
|
||||||
self._fail_if_in_state(ironic_context.get_admin_context(), filters,
|
|
||||||
states.DEPLOYING, 'provision_updated_at',
|
|
||||||
last_error=last_error)
|
|
||||||
|
|
||||||
# Spawn a dedicated greenthread for the keepalive
|
|
||||||
try:
|
|
||||||
self._spawn_worker(self._conductor_service_record_keepalive)
|
|
||||||
LOG.info(_LI('Successfully started conductor with hostname '
|
|
||||||
'%(hostname)s.'),
|
|
||||||
{'hostname': self.host})
|
|
||||||
except exception.NoFreeConductorWorker:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.critical(_LC('Failed to start keepalive'))
|
|
||||||
self.del_host()
|
|
||||||
|
|
||||||
def _collect_periodic_tasks(self, obj):
|
|
||||||
for n, method in inspect.getmembers(obj, inspect.ismethod):
|
|
||||||
if getattr(method, '_periodic_enabled', False):
|
|
||||||
self.add_periodic_task(method)
|
|
||||||
|
|
||||||
def del_host(self, deregister=True):
|
|
||||||
# Conductor deregistration fails if called on non-initialized
|
|
||||||
# conductor (e.g. when rpc server is unreachable).
|
|
||||||
if not hasattr(self, 'conductor'):
|
|
||||||
return
|
|
||||||
self._keepalive_evt.set()
|
|
||||||
if deregister:
|
|
||||||
try:
|
|
||||||
# Inform the cluster that this conductor is shutting down.
|
|
||||||
# Note that rebalancing will not occur immediately, but when
|
|
||||||
# the periodic sync takes place.
|
|
||||||
self.dbapi.unregister_conductor(self.host)
|
|
||||||
LOG.info(_LI('Successfully stopped conductor with hostname '
|
|
||||||
'%(hostname)s.'),
|
|
||||||
{'hostname': self.host})
|
|
||||||
except exception.ConductorNotFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
LOG.info(_LI('Not deregistering conductor with hostname '
|
|
||||||
'%(hostname)s.'),
|
|
||||||
{'hostname': self.host})
|
|
||||||
# Waiting here to give workers the chance to finish. This has the
|
|
||||||
# benefit of releasing locks workers placed on nodes, as well as
|
|
||||||
# having work complete normally.
|
|
||||||
self._worker_pool.waitall()
|
|
||||||
|
|
||||||
def periodic_tasks(self, context, raise_on_error=False):
|
|
||||||
"""Periodic tasks are run at pre-specified interval."""
|
|
||||||
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
|
||||||
|
|
||||||
@lockutils.synchronized(WORKER_SPAWN_lOCK, 'ironic-')
|
|
||||||
def _spawn_worker(self, func, *args, **kwargs):
|
|
||||||
|
|
||||||
"""Create a greenthread to run func(*args, **kwargs).
|
|
||||||
|
|
||||||
Spawns a greenthread if there are free slots in pool, otherwise raises
|
|
||||||
exception. Execution control returns immediately to the caller.
|
|
||||||
|
|
||||||
:returns: GreenThread object.
|
|
||||||
:raises: NoFreeConductorWorker if worker pool is currently full.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._worker_pool.free():
|
|
||||||
return self._worker_pool.spawn(func, *args, **kwargs)
|
|
||||||
else:
|
|
||||||
raise exception.NoFreeConductorWorker()
|
|
||||||
|
|
||||||
def _conductor_service_record_keepalive(self):
|
|
||||||
while not self._keepalive_evt.is_set():
|
|
||||||
try:
|
|
||||||
self.dbapi.touch_conductor(self.host)
|
|
||||||
except db_exception.DBConnectionError:
|
|
||||||
LOG.warning(_LW('Conductor could not connect to database '
|
|
||||||
'while heartbeating.'))
|
|
||||||
self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
|
|
||||||
|
|
||||||
@messaging.expected_exceptions(exception.InvalidParameterValue,
|
@messaging.expected_exceptions(exception.InvalidParameterValue,
|
||||||
exception.MissingParameterValue,
|
exception.MissingParameterValue,
|
||||||
@ -1452,42 +1272,6 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
|||||||
if workers_count == CONF.conductor.periodic_max_workers:
|
if workers_count == CONF.conductor.periodic_max_workers:
|
||||||
break
|
break
|
||||||
|
|
||||||
def _mapped_to_this_conductor(self, node_uuid, driver):
|
|
||||||
"""Check that node is mapped to this conductor.
|
|
||||||
|
|
||||||
Note that because mappings are eventually consistent, it is possible
|
|
||||||
for two conductors to simultaneously believe that a node is mapped to
|
|
||||||
them. Any operation that depends on exclusive control of a node should
|
|
||||||
take out a lock.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
ring = self.ring_manager[driver]
|
|
||||||
except exception.DriverNotFound:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return self.host in ring.get_hosts(node_uuid)
|
|
||||||
|
|
||||||
def iter_nodes(self, fields=None, **kwargs):
|
|
||||||
"""Iterate over nodes mapped to this conductor.
|
|
||||||
|
|
||||||
Requests node set from and filters out nodes that are not
|
|
||||||
mapped to this conductor.
|
|
||||||
|
|
||||||
Yields tuples (node_uuid, driver, ...) where ... is derived from
|
|
||||||
fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
|
|
||||||
fields=['foo'] means yielding ('uuid', 'driver', 'foo').
|
|
||||||
|
|
||||||
:param fields: list of fields to fetch in addition to uuid and driver
|
|
||||||
:param kwargs: additional arguments to pass to dbapi when looking for
|
|
||||||
nodes
|
|
||||||
:return: generator yielding tuples of requested fields
|
|
||||||
"""
|
|
||||||
columns = ['uuid', 'driver'] + list(fields or ())
|
|
||||||
node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
|
|
||||||
for result in node_list:
|
|
||||||
if self._mapped_to_this_conductor(*result[:2]):
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@messaging.expected_exceptions(exception.NodeLocked)
|
@messaging.expected_exceptions(exception.NodeLocked)
|
||||||
def validate_driver_interfaces(self, context, node_id):
|
def validate_driver_interfaces(self, context, node_id):
|
||||||
"""Validate the `core` and `standardized` interfaces for drivers.
|
"""Validate the `core` and `standardized` interfaces for drivers.
|
||||||
@ -2024,65 +1808,6 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
|||||||
self._fail_if_in_state(context, filters, states.INSPECTING,
|
self._fail_if_in_state(context, filters, states.INSPECTING,
|
||||||
sort_key, last_error=last_error)
|
sort_key, last_error=last_error)
|
||||||
|
|
||||||
def _fail_if_in_state(self, context, filters, provision_state,
|
|
||||||
sort_key, callback_method=None,
|
|
||||||
err_handler=None, last_error=None):
|
|
||||||
"""Fail nodes that are in specified state.
|
|
||||||
|
|
||||||
Retrieves nodes that satisfy the criteria in 'filters'.
|
|
||||||
If any of these nodes is in 'provision_state', it has failed
|
|
||||||
in whatever provisioning activity it was currently doing.
|
|
||||||
That failure is processed here.
|
|
||||||
|
|
||||||
:param: context: request context
|
|
||||||
:param: filters: criteria (as a dictionary) to get the desired
|
|
||||||
list of nodes that satisfy the filter constraints.
|
|
||||||
For example, if filters['provisioned_before'] = 60,
|
|
||||||
this would process nodes whose provision_updated_at
|
|
||||||
field value was 60 or more seconds before 'now'.
|
|
||||||
:param: provision_state: provision_state that the node is in,
|
|
||||||
for the provisioning activity to have failed.
|
|
||||||
:param: sort_key: the nodes are sorted based on this key.
|
|
||||||
:param: callback_method: the callback method to be invoked in a
|
|
||||||
spawned thread, for a failed node. This
|
|
||||||
method must take a :class:`TaskManager` as
|
|
||||||
the first (and only required) parameter.
|
|
||||||
:param: err_handler: for a failed node, the error handler to invoke
|
|
||||||
if an error occurs trying to spawn an thread
|
|
||||||
to do the callback_method.
|
|
||||||
:param: last_error: the error message to be updated in node.last_error
|
|
||||||
|
|
||||||
"""
|
|
||||||
node_iter = self.iter_nodes(filters=filters,
|
|
||||||
sort_key=sort_key,
|
|
||||||
sort_dir='asc')
|
|
||||||
|
|
||||||
workers_count = 0
|
|
||||||
for node_uuid, driver in node_iter:
|
|
||||||
try:
|
|
||||||
with task_manager.acquire(context, node_uuid,
|
|
||||||
purpose='node state check') as task:
|
|
||||||
if (task.node.maintenance or
|
|
||||||
task.node.provision_state != provision_state):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# timeout has been reached - process the event 'fail'
|
|
||||||
if callback_method:
|
|
||||||
task.process_event('fail',
|
|
||||||
callback=self._spawn_worker,
|
|
||||||
call_args=(callback_method, task),
|
|
||||||
err_handler=err_handler)
|
|
||||||
else:
|
|
||||||
task.node.last_error = last_error
|
|
||||||
task.process_event('fail')
|
|
||||||
except exception.NoFreeConductorWorker:
|
|
||||||
break
|
|
||||||
except (exception.NodeLocked, exception.NodeNotFound):
|
|
||||||
continue
|
|
||||||
workers_count += 1
|
|
||||||
if workers_count >= CONF.conductor.periodic_max_workers:
|
|
||||||
break
|
|
||||||
|
|
||||||
@messaging.expected_exceptions(exception.NodeLocked,
|
@messaging.expected_exceptions(exception.NodeLocked,
|
||||||
exception.UnsupportedDriverExtension,
|
exception.UnsupportedDriverExtension,
|
||||||
exception.InvalidParameterValue,
|
exception.InvalidParameterValue,
|
||||||
|
@ -17,10 +17,17 @@
|
|||||||
|
|
||||||
"""Test utils for Ironic Managers."""
|
"""Test utils for Ironic Managers."""
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_utils import strutils
|
||||||
|
from oslo_utils import uuidutils
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
from stevedore import dispatch
|
from stevedore import dispatch
|
||||||
|
|
||||||
from ironic.common import driver_factory
|
from ironic.common import driver_factory
|
||||||
|
from ironic.common import exception
|
||||||
|
from ironic.common import states
|
||||||
|
from ironic.conductor import manager
|
||||||
|
from ironic import objects
|
||||||
|
|
||||||
|
|
||||||
def mock_the_extension_manager(driver="fake", namespace="ironic.drivers"):
|
def mock_the_extension_manager(driver="fake", namespace="ironic.drivers"):
|
||||||
@ -55,3 +62,126 @@ def mock_the_extension_manager(driver="fake", namespace="ironic.drivers"):
|
|||||||
for e in [mock_ext])
|
for e in [mock_ext])
|
||||||
|
|
||||||
return (mock_ext_mgr, mock_ext)
|
return (mock_ext_mgr, mock_ext)
|
||||||
|
|
||||||
|
|
||||||
|
class CommonMixIn(object):
|
||||||
|
@staticmethod
|
||||||
|
def _create_node(**kwargs):
|
||||||
|
attrs = {'id': 1,
|
||||||
|
'uuid': uuidutils.generate_uuid(),
|
||||||
|
'power_state': states.POWER_OFF,
|
||||||
|
'target_power_state': None,
|
||||||
|
'maintenance': False,
|
||||||
|
'reservation': None}
|
||||||
|
attrs.update(kwargs)
|
||||||
|
node = mock.Mock(spec_set=objects.Node)
|
||||||
|
for attr in attrs:
|
||||||
|
setattr(node, attr, attrs[attr])
|
||||||
|
return node
|
||||||
|
|
||||||
|
def _create_task(self, node=None, node_attrs=None):
|
||||||
|
if node_attrs is None:
|
||||||
|
node_attrs = {}
|
||||||
|
if node is None:
|
||||||
|
node = self._create_node(**node_attrs)
|
||||||
|
task = mock.Mock(spec_set=['node', 'release_resources',
|
||||||
|
'spawn_after', 'process_event'])
|
||||||
|
task.node = node
|
||||||
|
return task
|
||||||
|
|
||||||
|
def _get_nodeinfo_list_response(self, nodes=None):
|
||||||
|
if nodes is None:
|
||||||
|
nodes = [self.node]
|
||||||
|
elif not isinstance(nodes, (list, tuple)):
|
||||||
|
nodes = [nodes]
|
||||||
|
return [tuple(getattr(n, c) for c in self.columns) for n in nodes]
|
||||||
|
|
||||||
|
def _get_acquire_side_effect(self, task_infos):
|
||||||
|
"""Helper method to generate a task_manager.acquire() side effect.
|
||||||
|
|
||||||
|
This accepts a list of information about task mocks to return.
|
||||||
|
task_infos can be a single entity or a list.
|
||||||
|
|
||||||
|
Each task_info can be a single entity, the task to return, or it
|
||||||
|
can be a tuple of (task, exception_to_raise_on_exit). 'task' can
|
||||||
|
be an exception to raise on __enter__.
|
||||||
|
|
||||||
|
Examples: _get_acquire_side_effect(self, task): Yield task
|
||||||
|
_get_acquire_side_effect(self, [task, enter_exception(),
|
||||||
|
(task2, exit_exception())])
|
||||||
|
Yield task on first call to acquire()
|
||||||
|
raise enter_exception() in __enter__ on 2nd call to
|
||||||
|
acquire()
|
||||||
|
Yield task2 on 3rd call to acquire(), but raise
|
||||||
|
exit_exception() on __exit__()
|
||||||
|
"""
|
||||||
|
tasks = []
|
||||||
|
exit_exceptions = []
|
||||||
|
if not isinstance(task_infos, list):
|
||||||
|
task_infos = [task_infos]
|
||||||
|
for task_info in task_infos:
|
||||||
|
if isinstance(task_info, tuple):
|
||||||
|
task, exc = task_info
|
||||||
|
else:
|
||||||
|
task = task_info
|
||||||
|
exc = None
|
||||||
|
tasks.append(task)
|
||||||
|
exit_exceptions.append(exc)
|
||||||
|
|
||||||
|
class FakeAcquire(object):
|
||||||
|
def __init__(fa_self, context, node_id, *args, **kwargs):
|
||||||
|
# We actually verify these arguments via
|
||||||
|
# acquire_mock.call_args_list(). However, this stores the
|
||||||
|
# node_id so we can assert we're returning the correct node
|
||||||
|
# in __enter__().
|
||||||
|
fa_self.node_id = node_id
|
||||||
|
|
||||||
|
def __enter__(fa_self):
|
||||||
|
task = tasks.pop(0)
|
||||||
|
if isinstance(task, Exception):
|
||||||
|
raise task
|
||||||
|
# NOTE(comstud): Not ideal to throw this into
|
||||||
|
# a helper, however it's the cleanest way
|
||||||
|
# to verify we're dealing with the correct task/node.
|
||||||
|
if strutils.is_int_like(fa_self.node_id):
|
||||||
|
self.assertEqual(fa_self.node_id, task.node.id)
|
||||||
|
else:
|
||||||
|
self.assertEqual(fa_self.node_id, task.node.uuid)
|
||||||
|
return task
|
||||||
|
|
||||||
|
def __exit__(fa_self, exc_typ, exc_val, exc_tb):
|
||||||
|
exc = exit_exceptions.pop(0)
|
||||||
|
if exc_typ is None and exc is not None:
|
||||||
|
raise exc
|
||||||
|
|
||||||
|
return FakeAcquire
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceSetUpMixin(object):
|
||||||
|
def setUp(self):
|
||||||
|
super(ServiceSetUpMixin, self).setUp()
|
||||||
|
self.hostname = 'test-host'
|
||||||
|
self.config(enabled_drivers=['fake'])
|
||||||
|
self.config(node_locked_retry_attempts=1, group='conductor')
|
||||||
|
self.config(node_locked_retry_interval=0, group='conductor')
|
||||||
|
self.service = manager.ConductorManager(self.hostname, 'test-topic')
|
||||||
|
mock_the_extension_manager()
|
||||||
|
self.driver = driver_factory.get_driver("fake")
|
||||||
|
|
||||||
|
def _stop_service(self):
|
||||||
|
try:
|
||||||
|
objects.Conductor.get_by_hostname(self.context, self.hostname)
|
||||||
|
except exception.ConductorNotFound:
|
||||||
|
return
|
||||||
|
self.service.del_host()
|
||||||
|
|
||||||
|
def _start_service(self):
|
||||||
|
self.service.init_host()
|
||||||
|
self.addCleanup(self._stop_service)
|
||||||
|
|
||||||
|
|
||||||
|
def mock_record_keepalive(func_or_class):
|
||||||
|
return mock.patch.object(
|
||||||
|
manager.ConductorManager,
|
||||||
|
'_conductor_service_record_keepalive',
|
||||||
|
lambda _: None)(func_or_class)
|
||||||
|
187
ironic/tests/unit/conductor/test_base_manager.py
Normal file
187
ironic/tests/unit/conductor/test_base_manager.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Test class for Ironic BaseConductorManager."""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_db import exception as db_exception
|
||||||
|
|
||||||
|
from ironic.common import driver_factory
|
||||||
|
from ironic.common import exception
|
||||||
|
from ironic.conductor import base_manager
|
||||||
|
from ironic.conductor import manager
|
||||||
|
from ironic.drivers import base as drivers_base
|
||||||
|
from ironic import objects
|
||||||
|
from ironic.tests.unit.conductor import mgr_utils
|
||||||
|
from ironic.tests.unit.db import base as tests_db_base
|
||||||
|
from ironic.tests.unit.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
@mgr_utils.mock_record_keepalive
|
||||||
|
class StartStopTestCase(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||||
|
def test_start_registers_conductor(self):
|
||||||
|
self.assertRaises(exception.ConductorNotFound,
|
||||||
|
objects.Conductor.get_by_hostname,
|
||||||
|
self.context, self.hostname)
|
||||||
|
self._start_service()
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
||||||
|
self.assertEqual(self.hostname, res['hostname'])
|
||||||
|
|
||||||
|
def test_start_clears_conductor_locks(self):
|
||||||
|
node = obj_utils.create_test_node(self.context,
|
||||||
|
reservation=self.hostname)
|
||||||
|
node.save()
|
||||||
|
self._start_service()
|
||||||
|
node.refresh()
|
||||||
|
self.assertIsNone(node.reservation)
|
||||||
|
|
||||||
|
def test_stop_unregisters_conductor(self):
|
||||||
|
self._start_service()
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
||||||
|
self.assertEqual(self.hostname, res['hostname'])
|
||||||
|
self.service.del_host()
|
||||||
|
self.assertRaises(exception.ConductorNotFound,
|
||||||
|
objects.Conductor.get_by_hostname,
|
||||||
|
self.context, self.hostname)
|
||||||
|
|
||||||
|
def test_stop_doesnt_unregister_conductor(self):
|
||||||
|
self._start_service()
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
||||||
|
self.assertEqual(self.hostname, res['hostname'])
|
||||||
|
self.service.del_host(deregister=False)
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
||||||
|
self.assertEqual(self.hostname, res['hostname'])
|
||||||
|
|
||||||
|
@mock.patch.object(manager.ConductorManager, 'init_host')
|
||||||
|
def test_stop_uninitialized_conductor(self, mock_init):
|
||||||
|
self._start_service()
|
||||||
|
self.service.del_host()
|
||||||
|
|
||||||
|
@mock.patch.object(driver_factory.DriverFactory, '__getitem__',
|
||||||
|
lambda *args: mock.MagicMock())
|
||||||
|
def test_start_registers_driver_names(self):
|
||||||
|
init_names = ['fake1', 'fake2']
|
||||||
|
restart_names = ['fake3', 'fake4']
|
||||||
|
|
||||||
|
df = driver_factory.DriverFactory()
|
||||||
|
with mock.patch.object(df._extension_manager, 'names') as mock_names:
|
||||||
|
# verify driver names are registered
|
||||||
|
self.config(enabled_drivers=init_names)
|
||||||
|
mock_names.return_value = init_names
|
||||||
|
self._start_service()
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context,
|
||||||
|
self.hostname)
|
||||||
|
self.assertEqual(init_names, res['drivers'])
|
||||||
|
|
||||||
|
# verify that restart registers new driver names
|
||||||
|
self.config(enabled_drivers=restart_names)
|
||||||
|
mock_names.return_value = restart_names
|
||||||
|
self._start_service()
|
||||||
|
res = objects.Conductor.get_by_hostname(self.context,
|
||||||
|
self.hostname)
|
||||||
|
self.assertEqual(restart_names, res['drivers'])
|
||||||
|
|
||||||
|
@mock.patch.object(driver_factory.DriverFactory, '__getitem__')
|
||||||
|
def test_start_registers_driver_specific_tasks(self, get_mock):
|
||||||
|
init_names = ['fake1']
|
||||||
|
expected_name = 'ironic.tests.unit.conductor.test_base_manager.task'
|
||||||
|
expected_name2 = 'ironic.tests.unit.conductor.test_base_manager.iface'
|
||||||
|
self.config(enabled_drivers=init_names)
|
||||||
|
|
||||||
|
class TestInterface(object):
|
||||||
|
@drivers_base.driver_periodic_task(spacing=100500)
|
||||||
|
def iface(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Driver(object):
|
||||||
|
core_interfaces = []
|
||||||
|
standard_interfaces = ['iface']
|
||||||
|
|
||||||
|
iface = TestInterface()
|
||||||
|
|
||||||
|
@drivers_base.driver_periodic_task(spacing=42)
|
||||||
|
def task(self, context):
|
||||||
|
pass
|
||||||
|
|
||||||
|
obj = Driver()
|
||||||
|
self.assertTrue(obj.task._periodic_enabled)
|
||||||
|
get_mock.return_value = mock.Mock(obj=obj)
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
driver_factory.DriverFactory()._extension_manager,
|
||||||
|
'names') as mock_names:
|
||||||
|
mock_names.return_value = init_names
|
||||||
|
self._start_service()
|
||||||
|
tasks = dict(self.service._periodic_tasks)
|
||||||
|
self.assertEqual(obj.task, tasks[expected_name])
|
||||||
|
self.assertEqual(obj.iface.iface, tasks[expected_name2])
|
||||||
|
self.assertEqual(42,
|
||||||
|
self.service._periodic_spacing[expected_name])
|
||||||
|
self.assertEqual(100500,
|
||||||
|
self.service._periodic_spacing[expected_name2])
|
||||||
|
self.assertIn(expected_name, self.service._periodic_last_run)
|
||||||
|
self.assertIn(expected_name2, self.service._periodic_last_run)
|
||||||
|
|
||||||
|
@mock.patch.object(driver_factory.DriverFactory, '__init__')
|
||||||
|
def test_start_fails_on_missing_driver(self, mock_df):
|
||||||
|
mock_df.side_effect = exception.DriverNotFound('test')
|
||||||
|
with mock.patch.object(self.dbapi, 'register_conductor') as mock_reg:
|
||||||
|
self.assertRaises(exception.DriverNotFound,
|
||||||
|
self.service.init_host)
|
||||||
|
self.assertTrue(mock_df.called)
|
||||||
|
self.assertFalse(mock_reg.called)
|
||||||
|
|
||||||
|
@mock.patch.object(base_manager, 'LOG')
|
||||||
|
@mock.patch.object(driver_factory, 'DriverFactory')
|
||||||
|
def test_start_fails_on_no_driver(self, df_mock, log_mock):
|
||||||
|
driver_factory_mock = mock.MagicMock(names=[])
|
||||||
|
df_mock.return_value = driver_factory_mock
|
||||||
|
self.assertRaises(exception.NoDriversLoaded,
|
||||||
|
self.service.init_host)
|
||||||
|
self.assertTrue(log_mock.error.called)
|
||||||
|
|
||||||
|
@mock.patch.object(eventlet.greenpool.GreenPool, 'waitall')
|
||||||
|
def test_del_host_waits_on_workerpool(self, wait_mock):
|
||||||
|
self._start_service()
|
||||||
|
self.service.del_host()
|
||||||
|
self.assertTrue(wait_mock.called)
|
||||||
|
|
||||||
|
|
||||||
|
class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||||
|
def test__conductor_service_record_keepalive(self):
|
||||||
|
self._start_service()
|
||||||
|
# avoid wasting time at the event.wait()
|
||||||
|
CONF.set_override('heartbeat_interval', 0, 'conductor')
|
||||||
|
with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
|
||||||
|
with mock.patch.object(self.service._keepalive_evt,
|
||||||
|
'is_set') as mock_is_set:
|
||||||
|
mock_is_set.side_effect = [False, True]
|
||||||
|
self.service._conductor_service_record_keepalive()
|
||||||
|
mock_touch.assert_called_once_with(self.hostname)
|
||||||
|
|
||||||
|
def test__conductor_service_record_keepalive_failed_db_conn(self):
|
||||||
|
self._start_service()
|
||||||
|
# avoid wasting time at the event.wait()
|
||||||
|
CONF.set_override('heartbeat_interval', 0, 'conductor')
|
||||||
|
with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
|
||||||
|
mock_touch.side_effect = [None, db_exception.DBConnectionError(),
|
||||||
|
None]
|
||||||
|
with mock.patch.object(self.service._keepalive_evt,
|
||||||
|
'is_set') as mock_is_set:
|
||||||
|
mock_is_set.side_effect = [False, False, False, True]
|
||||||
|
self.service._conductor_service_record_keepalive()
|
||||||
|
self.assertEqual(3, mock_touch.call_count)
|
@ -23,9 +23,7 @@ import datetime
|
|||||||
import eventlet
|
import eventlet
|
||||||
import mock
|
import mock
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_db import exception as db_exception
|
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
from oslo_utils import strutils
|
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
from oslo_versionedobjects import base as ovo_base
|
from oslo_versionedobjects import base as ovo_base
|
||||||
from oslo_versionedobjects import fields
|
from oslo_versionedobjects import fields
|
||||||
@ -53,287 +51,8 @@ from ironic.tests.unit.objects import utils as obj_utils
|
|||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
class _CommonMixIn(object):
|
@mgr_utils.mock_record_keepalive
|
||||||
@staticmethod
|
class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
def _create_node(**kwargs):
|
|
||||||
attrs = {'id': 1,
|
|
||||||
'uuid': uuidutils.generate_uuid(),
|
|
||||||
'power_state': states.POWER_OFF,
|
|
||||||
'target_power_state': None,
|
|
||||||
'maintenance': False,
|
|
||||||
'reservation': None}
|
|
||||||
attrs.update(kwargs)
|
|
||||||
node = mock.Mock(spec_set=objects.Node)
|
|
||||||
for attr in attrs:
|
|
||||||
setattr(node, attr, attrs[attr])
|
|
||||||
return node
|
|
||||||
|
|
||||||
def _create_task(self, node=None, node_attrs=None):
|
|
||||||
if node_attrs is None:
|
|
||||||
node_attrs = {}
|
|
||||||
if node is None:
|
|
||||||
node = self._create_node(**node_attrs)
|
|
||||||
task = mock.Mock(spec_set=['node', 'release_resources',
|
|
||||||
'spawn_after', 'process_event'])
|
|
||||||
task.node = node
|
|
||||||
return task
|
|
||||||
|
|
||||||
def _get_nodeinfo_list_response(self, nodes=None):
|
|
||||||
if nodes is None:
|
|
||||||
nodes = [self.node]
|
|
||||||
elif not isinstance(nodes, (list, tuple)):
|
|
||||||
nodes = [nodes]
|
|
||||||
return [tuple(getattr(n, c) for c in self.columns) for n in nodes]
|
|
||||||
|
|
||||||
def _get_acquire_side_effect(self, task_infos):
|
|
||||||
"""Helper method to generate a task_manager.acquire() side effect.
|
|
||||||
|
|
||||||
This accepts a list of information about task mocks to return.
|
|
||||||
task_infos can be a single entity or a list.
|
|
||||||
|
|
||||||
Each task_info can be a single entity, the task to return, or it
|
|
||||||
can be a tuple of (task, exception_to_raise_on_exit). 'task' can
|
|
||||||
be an exception to raise on __enter__.
|
|
||||||
|
|
||||||
Examples: _get_acquire_side_effect(self, task): Yield task
|
|
||||||
_get_acquire_side_effect(self, [task, enter_exception(),
|
|
||||||
(task2, exit_exception())])
|
|
||||||
Yield task on first call to acquire()
|
|
||||||
raise enter_exception() in __enter__ on 2nd call to
|
|
||||||
acquire()
|
|
||||||
Yield task2 on 3rd call to acquire(), but raise
|
|
||||||
exit_exception() on __exit__()
|
|
||||||
"""
|
|
||||||
tasks = []
|
|
||||||
exit_exceptions = []
|
|
||||||
if not isinstance(task_infos, list):
|
|
||||||
task_infos = [task_infos]
|
|
||||||
for task_info in task_infos:
|
|
||||||
if isinstance(task_info, tuple):
|
|
||||||
task, exc = task_info
|
|
||||||
else:
|
|
||||||
task = task_info
|
|
||||||
exc = None
|
|
||||||
tasks.append(task)
|
|
||||||
exit_exceptions.append(exc)
|
|
||||||
|
|
||||||
class FakeAcquire(object):
|
|
||||||
def __init__(fa_self, context, node_id, *args, **kwargs):
|
|
||||||
# We actually verify these arguments via
|
|
||||||
# acquire_mock.call_args_list(). However, this stores the
|
|
||||||
# node_id so we can assert we're returning the correct node
|
|
||||||
# in __enter__().
|
|
||||||
fa_self.node_id = node_id
|
|
||||||
|
|
||||||
def __enter__(fa_self):
|
|
||||||
task = tasks.pop(0)
|
|
||||||
if isinstance(task, Exception):
|
|
||||||
raise task
|
|
||||||
# NOTE(comstud): Not ideal to throw this into
|
|
||||||
# a helper, however it's the cleanest way
|
|
||||||
# to verify we're dealing with the correct task/node.
|
|
||||||
if strutils.is_int_like(fa_self.node_id):
|
|
||||||
self.assertEqual(fa_self.node_id, task.node.id)
|
|
||||||
else:
|
|
||||||
self.assertEqual(fa_self.node_id, task.node.uuid)
|
|
||||||
return task
|
|
||||||
|
|
||||||
def __exit__(fa_self, exc_typ, exc_val, exc_tb):
|
|
||||||
exc = exit_exceptions.pop(0)
|
|
||||||
if exc_typ is None and exc is not None:
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
return FakeAcquire
|
|
||||||
|
|
||||||
|
|
||||||
class _ServiceSetUpMixin(object):
|
|
||||||
def setUp(self):
|
|
||||||
super(_ServiceSetUpMixin, self).setUp()
|
|
||||||
self.hostname = 'test-host'
|
|
||||||
self.config(enabled_drivers=['fake'])
|
|
||||||
self.config(node_locked_retry_attempts=1, group='conductor')
|
|
||||||
self.config(node_locked_retry_interval=0, group='conductor')
|
|
||||||
self.service = manager.ConductorManager(self.hostname, 'test-topic')
|
|
||||||
mgr_utils.mock_the_extension_manager()
|
|
||||||
self.driver = driver_factory.get_driver("fake")
|
|
||||||
|
|
||||||
def _stop_service(self):
|
|
||||||
try:
|
|
||||||
objects.Conductor.get_by_hostname(self.context, self.hostname)
|
|
||||||
except exception.ConductorNotFound:
|
|
||||||
return
|
|
||||||
self.service.del_host()
|
|
||||||
|
|
||||||
def _start_service(self):
|
|
||||||
self.service.init_host()
|
|
||||||
self.addCleanup(self._stop_service)
|
|
||||||
|
|
||||||
|
|
||||||
def _mock_record_keepalive(func_or_class):
|
|
||||||
return mock.patch.object(
|
|
||||||
manager.ConductorManager,
|
|
||||||
'_conductor_service_record_keepalive',
|
|
||||||
lambda _: None)(func_or_class)
|
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
|
||||||
class StartStopTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|
||||||
def test_start_registers_conductor(self):
|
|
||||||
self.assertRaises(exception.ConductorNotFound,
|
|
||||||
objects.Conductor.get_by_hostname,
|
|
||||||
self.context, self.hostname)
|
|
||||||
self._start_service()
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
|
||||||
self.assertEqual(self.hostname, res['hostname'])
|
|
||||||
|
|
||||||
def test_start_clears_conductor_locks(self):
|
|
||||||
node = obj_utils.create_test_node(self.context,
|
|
||||||
reservation=self.hostname)
|
|
||||||
node.save()
|
|
||||||
self._start_service()
|
|
||||||
node.refresh()
|
|
||||||
self.assertIsNone(node.reservation)
|
|
||||||
|
|
||||||
def test_stop_unregisters_conductor(self):
|
|
||||||
self._start_service()
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
|
||||||
self.assertEqual(self.hostname, res['hostname'])
|
|
||||||
self.service.del_host()
|
|
||||||
self.assertRaises(exception.ConductorNotFound,
|
|
||||||
objects.Conductor.get_by_hostname,
|
|
||||||
self.context, self.hostname)
|
|
||||||
|
|
||||||
def test_stop_doesnt_unregister_conductor(self):
|
|
||||||
self._start_service()
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
|
||||||
self.assertEqual(self.hostname, res['hostname'])
|
|
||||||
self.service.del_host(deregister=False)
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
|
|
||||||
self.assertEqual(self.hostname, res['hostname'])
|
|
||||||
|
|
||||||
@mock.patch.object(manager.ConductorManager, 'init_host')
|
|
||||||
def test_stop_uninitialized_conductor(self, mock_init):
|
|
||||||
self._start_service()
|
|
||||||
self.service.del_host()
|
|
||||||
|
|
||||||
@mock.patch.object(driver_factory.DriverFactory, '__getitem__',
|
|
||||||
lambda *args: mock.MagicMock())
|
|
||||||
def test_start_registers_driver_names(self):
|
|
||||||
init_names = ['fake1', 'fake2']
|
|
||||||
restart_names = ['fake3', 'fake4']
|
|
||||||
|
|
||||||
df = driver_factory.DriverFactory()
|
|
||||||
with mock.patch.object(df._extension_manager, 'names') as mock_names:
|
|
||||||
# verify driver names are registered
|
|
||||||
self.config(enabled_drivers=init_names)
|
|
||||||
mock_names.return_value = init_names
|
|
||||||
self._start_service()
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context,
|
|
||||||
self.hostname)
|
|
||||||
self.assertEqual(init_names, res['drivers'])
|
|
||||||
|
|
||||||
# verify that restart registers new driver names
|
|
||||||
self.config(enabled_drivers=restart_names)
|
|
||||||
mock_names.return_value = restart_names
|
|
||||||
self._start_service()
|
|
||||||
res = objects.Conductor.get_by_hostname(self.context,
|
|
||||||
self.hostname)
|
|
||||||
self.assertEqual(restart_names, res['drivers'])
|
|
||||||
|
|
||||||
@mock.patch.object(driver_factory.DriverFactory, '__getitem__')
|
|
||||||
def test_start_registers_driver_specific_tasks(self, get_mock):
|
|
||||||
init_names = ['fake1']
|
|
||||||
expected_task_name = 'ironic.tests.unit.conductor.test_manager.task'
|
|
||||||
expected_task_name2 = 'ironic.tests.unit.conductor.test_manager.iface'
|
|
||||||
self.config(enabled_drivers=init_names)
|
|
||||||
|
|
||||||
class TestInterface(object):
|
|
||||||
@drivers_base.driver_periodic_task(spacing=100500)
|
|
||||||
def iface(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class Driver(object):
|
|
||||||
core_interfaces = []
|
|
||||||
standard_interfaces = ['iface']
|
|
||||||
|
|
||||||
iface = TestInterface()
|
|
||||||
|
|
||||||
@drivers_base.driver_periodic_task(spacing=42)
|
|
||||||
def task(self, context):
|
|
||||||
pass
|
|
||||||
|
|
||||||
obj = Driver()
|
|
||||||
self.assertTrue(obj.task._periodic_enabled)
|
|
||||||
get_mock.return_value = mock.Mock(obj=obj)
|
|
||||||
|
|
||||||
with mock.patch.object(
|
|
||||||
driver_factory.DriverFactory()._extension_manager,
|
|
||||||
'names') as mock_names:
|
|
||||||
mock_names.return_value = init_names
|
|
||||||
self._start_service()
|
|
||||||
tasks = dict(self.service._periodic_tasks)
|
|
||||||
self.assertEqual(obj.task, tasks[expected_task_name])
|
|
||||||
self.assertEqual(obj.iface.iface, tasks[expected_task_name2])
|
|
||||||
self.assertEqual(42,
|
|
||||||
self.service._periodic_spacing[expected_task_name])
|
|
||||||
self.assertEqual(100500,
|
|
||||||
self.service._periodic_spacing[expected_task_name2])
|
|
||||||
self.assertIn(expected_task_name, self.service._periodic_last_run)
|
|
||||||
self.assertIn(expected_task_name2, self.service._periodic_last_run)
|
|
||||||
|
|
||||||
@mock.patch.object(driver_factory.DriverFactory, '__init__')
|
|
||||||
def test_start_fails_on_missing_driver(self, mock_df):
|
|
||||||
mock_df.side_effect = exception.DriverNotFound('test')
|
|
||||||
with mock.patch.object(self.dbapi, 'register_conductor') as mock_reg:
|
|
||||||
self.assertRaises(exception.DriverNotFound,
|
|
||||||
self.service.init_host)
|
|
||||||
self.assertTrue(mock_df.called)
|
|
||||||
self.assertFalse(mock_reg.called)
|
|
||||||
|
|
||||||
@mock.patch.object(manager, 'LOG')
|
|
||||||
@mock.patch.object(driver_factory, 'DriverFactory')
|
|
||||||
def test_start_fails_on_no_driver(self, df_mock, log_mock):
|
|
||||||
driver_factory_mock = mock.MagicMock(names=[])
|
|
||||||
df_mock.return_value = driver_factory_mock
|
|
||||||
self.assertRaises(exception.NoDriversLoaded,
|
|
||||||
self.service.init_host)
|
|
||||||
self.assertTrue(log_mock.error.called)
|
|
||||||
|
|
||||||
@mock.patch.object(eventlet.greenpool.GreenPool, 'waitall')
|
|
||||||
def test_del_host_waits_on_workerpool(self, wait_mock):
|
|
||||||
self._start_service()
|
|
||||||
self.service.del_host()
|
|
||||||
self.assertTrue(wait_mock.called)
|
|
||||||
|
|
||||||
|
|
||||||
class KeepAliveTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|
||||||
def test__conductor_service_record_keepalive(self):
|
|
||||||
self._start_service()
|
|
||||||
# avoid wasting time at the event.wait()
|
|
||||||
CONF.set_override('heartbeat_interval', 0, 'conductor')
|
|
||||||
with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
|
|
||||||
with mock.patch.object(self.service._keepalive_evt,
|
|
||||||
'is_set') as mock_is_set:
|
|
||||||
mock_is_set.side_effect = [False, True]
|
|
||||||
self.service._conductor_service_record_keepalive()
|
|
||||||
mock_touch.assert_called_once_with(self.hostname)
|
|
||||||
|
|
||||||
def test__conductor_service_record_keepalive_failed_db_conn(self):
|
|
||||||
self._start_service()
|
|
||||||
# avoid wasting time at the event.wait()
|
|
||||||
CONF.set_override('heartbeat_interval', 0, 'conductor')
|
|
||||||
with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
|
|
||||||
mock_touch.side_effect = [None, db_exception.DBConnectionError(),
|
|
||||||
None]
|
|
||||||
with mock.patch.object(self.service._keepalive_evt,
|
|
||||||
'is_set') as mock_is_set:
|
|
||||||
mock_is_set.side_effect = [False, False, False, True]
|
|
||||||
self.service._conductor_service_record_keepalive()
|
|
||||||
self.assertEqual(3, mock_touch.call_count)
|
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
|
||||||
class ChangeNodePowerStateTestCase(_ServiceSetUpMixin,
|
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
def test_change_node_power_state_power_on(self):
|
def test_change_node_power_state_power_on(self):
|
||||||
@ -483,8 +202,9 @@ class ChangeNodePowerStateTestCase(_ServiceSetUpMixin,
|
|||||||
self.assertIsNone(node.last_error)
|
self.assertIsNone(node.last_error)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class UpdateNodeTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def test_update_node(self):
|
def test_update_node(self):
|
||||||
node = obj_utils.create_test_node(self.context, driver='fake',
|
node = obj_utils.create_test_node(self.context, driver='fake',
|
||||||
extra={'test': 'one'})
|
extra={'test': 'one'})
|
||||||
@ -564,8 +284,9 @@ class UpdateNodeTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.assertEqual(existing_driver, node.driver)
|
self.assertEqual(existing_driver, node.driver)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class VendorPassthruTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
@mock.patch.object(task_manager.TaskManager, 'spawn_after')
|
@mock.patch.object(task_manager.TaskManager, 'spawn_after')
|
||||||
def test_vendor_passthru_async(self, mock_spawn):
|
def test_vendor_passthru_async(self, mock_spawn):
|
||||||
@ -892,9 +613,9 @@ class VendorPassthruTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.assertFalse(test_method.called)
|
self.assertFalse(test_method.called)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
@mock.patch.object(images, 'is_whole_disk_image')
|
@mock.patch.object(images, 'is_whole_disk_image')
|
||||||
class ServiceDoNodeDeployTestCase(_ServiceSetUpMixin,
|
class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
def test_do_node_deploy_invalid_state(self, mock_iwdi):
|
def test_do_node_deploy_invalid_state(self, mock_iwdi):
|
||||||
mock_iwdi.return_value = False
|
mock_iwdi.return_value = False
|
||||||
@ -1178,8 +899,8 @@ class ServiceDoNodeDeployTestCase(_ServiceSetUpMixin,
|
|||||||
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
|
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DoNodeDeployTearDownTestCase(_ServiceSetUpMixin,
|
class DoNodeDeployTearDownTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
|
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
|
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
|
||||||
@ -1620,8 +1341,9 @@ class DoNodeDeployTearDownTestCase(_ServiceSetUpMixin,
|
|||||||
self.assertTrue(task.node.maintenance)
|
self.assertTrue(task.node.maintenance)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(DoNodeCleanTestCase, self).setUp()
|
super(DoNodeCleanTestCase, self).setUp()
|
||||||
self.config(clean_nodes=True, group='conductor')
|
self.config(clean_nodes=True, group='conductor')
|
||||||
@ -2095,8 +1817,9 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.service._get_node_next_clean_steps, task)
|
self.service._get_node_next_clean_steps, task)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DoNodeVerifyTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
|
@mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakePower.validate')
|
@mock.patch('ironic.drivers.modules.fake.FakePower.validate')
|
||||||
def test__do_node_verify(self, mock_validate, mock_get_power_state):
|
def test__do_node_verify(self, mock_validate, mock_get_power_state):
|
||||||
@ -2180,8 +1903,9 @@ class DoNodeVerifyTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.assertTrue(node.last_error)
|
self.assertTrue(node.last_error)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class MiscTestCase(_ServiceSetUpMixin, _CommonMixIn, tests_db_base.DbTestCase):
|
class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def test_get_driver_known(self):
|
def test_get_driver_known(self):
|
||||||
self._start_service()
|
self._start_service()
|
||||||
driver = self.service._get_driver('fake')
|
driver = self.service._get_driver('fake')
|
||||||
@ -2260,8 +1984,8 @@ class MiscTestCase(_ServiceSetUpMixin, _CommonMixIn, tests_db_base.DbTestCase):
|
|||||||
last_error=mock.ANY)
|
last_error=mock.ANY)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class ConsoleTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||||
def test_set_console_mode_worker_pool_full(self):
|
def test_set_console_mode_worker_pool_full(self):
|
||||||
node = obj_utils.create_test_node(self.context, driver='fake')
|
node = obj_utils.create_test_node(self.context, driver='fake')
|
||||||
self._start_service()
|
self._start_service()
|
||||||
@ -2411,8 +2135,9 @@ class ConsoleTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
|
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DestroyNodeTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
def test_destroy_node(self):
|
def test_destroy_node(self):
|
||||||
self._start_service()
|
self._start_service()
|
||||||
@ -2498,8 +2223,9 @@ class DestroyNodeTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
node.uuid)
|
node.uuid)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class UpdatePortTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class UpdatePortTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def test_update_port(self):
|
def test_update_port(self):
|
||||||
node = obj_utils.create_test_node(self.context, driver='fake')
|
node = obj_utils.create_test_node(self.context, driver='fake')
|
||||||
|
|
||||||
@ -2765,8 +2491,8 @@ class UpdatePortTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
exc.exc_info[0])
|
exc.exc_info[0])
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class RaidTestCases(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class RaidTestCases(mgr_utils.ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RaidTestCases, self).setUp()
|
super(RaidTestCases, self).setUp()
|
||||||
@ -3089,7 +2815,8 @@ class ManagerDoSyncPowerStateTestCase(tests_db_base.DbTestCase):
|
|||||||
@mock.patch.object(task_manager, 'acquire')
|
@mock.patch.object(task_manager, 'acquire')
|
||||||
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
||||||
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
||||||
class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
|
class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ManagerSyncPowerStatesTestCase, self).setUp()
|
super(ManagerSyncPowerStatesTestCase, self).setUp()
|
||||||
self.service = manager.ConductorManager('hostname', 'test-topic')
|
self.service = manager.ConductorManager('hostname', 'test-topic')
|
||||||
@ -3317,7 +3044,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
|
|||||||
@mock.patch.object(task_manager, 'acquire')
|
@mock.patch.object(task_manager, 'acquire')
|
||||||
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
||||||
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
||||||
class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
|
class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ManagerCheckDeployTimeoutsTestCase, self).setUp()
|
super(ManagerCheckDeployTimeoutsTestCase, self).setUp()
|
||||||
@ -3562,7 +3289,7 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn,
|
|||||||
self.assertFalse(mac_update_mock.called)
|
self.assertFalse(mac_update_mock.called)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class ManagerTestProperties(tests_db_base.DbTestCase):
|
class ManagerTestProperties(tests_db_base.DbTestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -3685,7 +3412,8 @@ class ManagerTestProperties(tests_db_base.DbTestCase):
|
|||||||
@mock.patch.object(task_manager, 'acquire')
|
@mock.patch.object(task_manager, 'acquire')
|
||||||
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
||||||
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
||||||
class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
|
class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ManagerSyncLocalStateTestCase, self).setUp()
|
super(ManagerSyncLocalStateTestCase, self).setUp()
|
||||||
@ -3883,8 +3611,8 @@ class StoreConfigDriveTestCase(tests_base.TestCase):
|
|||||||
self.assertEqual(expected_instance_info, self.node.instance_info)
|
self.assertEqual(expected_instance_info, self.node.instance_info)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class NodeInspectHardware(_ServiceSetUpMixin,
|
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
|
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
|
||||||
@ -4018,7 +3746,7 @@ class NodeInspectHardware(_ServiceSetUpMixin,
|
|||||||
@mock.patch.object(task_manager, 'acquire')
|
@mock.patch.object(task_manager, 'acquire')
|
||||||
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
||||||
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
|
||||||
class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
|
class ManagerCheckInspectTimeoutsTestCase(mgr_utils.CommonMixIn,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ManagerCheckInspectTimeoutsTestCase, self).setUp()
|
super(ManagerCheckInspectTimeoutsTestCase, self).setUp()
|
||||||
@ -4239,8 +3967,9 @@ class ManagerCheckInspectTimeoutsTestCase(_CommonMixIn,
|
|||||||
self.task.process_event.call_args_list)
|
self.task.process_event.call_args_list)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DestroyPortTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
class DestroyPortTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
|
tests_db_base.DbTestCase):
|
||||||
def test_destroy_port(self):
|
def test_destroy_port(self):
|
||||||
node = obj_utils.create_test_node(self.context, driver='fake')
|
node = obj_utils.create_test_node(self.context, driver='fake')
|
||||||
|
|
||||||
@ -4261,11 +3990,11 @@ class DestroyPortTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
|||||||
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
|
self.assertEqual(exception.NodeLocked, exc.exc_info[0])
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state')
|
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state')
|
||||||
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
|
||||||
@mock.patch.object(dbapi.IMPL, 'get_offline_conductors')
|
@mock.patch.object(dbapi.IMPL, 'get_offline_conductors')
|
||||||
class ManagerCheckDeployingStatusTestCase(_ServiceSetUpMixin,
|
class ManagerCheckDeployingStatusTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ManagerCheckDeployingStatusTestCase, self).setUp()
|
super(ManagerCheckDeployingStatusTestCase, self).setUp()
|
||||||
@ -4463,8 +4192,8 @@ class TestIndirectionApiConductor(tests_db_base.DbTestCase):
|
|||||||
target_version='1.0', version_manifest=fake_version_manifest)
|
target_version='1.0', version_manifest=fake_version_manifest)
|
||||||
|
|
||||||
|
|
||||||
@_mock_record_keepalive
|
@mgr_utils.mock_record_keepalive
|
||||||
class DoNodeTakeOverTestCase(_ServiceSetUpMixin,
|
class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin,
|
||||||
tests_db_base.DbTestCase):
|
tests_db_base.DbTestCase):
|
||||||
|
|
||||||
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
|
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
|
||||||
|
Loading…
Reference in New Issue
Block a user