Use conductor group for hash ring calculations

This changes the calculation for keys in the hash ring manager to be of
the form "<conductor_group>:<driver>", instead of just driver. This is
used when the RPC version pin is 1.47 or greater (1.47 was created to
handle this).

When finding an RPC topic, we use the conductor group marked on the node
as part of this calculation. However, this becomes a problem when we
don't have a node that we're looking up a topic for. In this case we
look for a conductor in any group which has the driver loaded, and use a
temporary hash ring that does not use conductor groups to find a
conductor.

This also begins the API work, as the API must be aware of the new hash
ring calculation. However, exposing the conductor_group field and adding
a microversion is left for a future patch.

Story: 2001795
Task: 22641
Change-Id: Iaf71348666b683518fc6ce4769112459d98938f2
This commit is contained in:
Jim Rollenhagen 2018-07-02 20:44:32 +00:00
parent 3b7e7fb3fc
commit 26fd55f7da
22 changed files with 365 additions and 140 deletions

View File

@ -119,13 +119,7 @@ def get_nodes_controller_reserved_names():
return _NODES_CONTROLLER_RESERVED_WORDS
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
def _hide_fields_in_newer_versions_part_one(obj):
if pecan.request.version.minor < versions.MINOR_3_DRIVER_INTERNAL_INFO:
obj.driver_internal_info = wsme.Unset
@ -153,6 +147,8 @@ def hide_fields_in_newer_versions(obj):
if pecan.request.version.minor < versions.MINOR_44_NODE_DEPLOY_STEP:
obj.deploy_step = wsme.Unset
def _hide_fields_in_newer_versions_part_two(obj):
if not api_utils.allow_resource_class():
obj.resource_class = wsme.Unset
@ -172,8 +168,21 @@ def hide_fields_in_newer_versions(obj):
if not api_utils.allow_bios_interface():
obj.bios_interface = wsme.Unset
# TODO(jroll) add a microversion here
obj.conductor_group = wsme.Unset
if not api_utils.allow_conductor_group():
obj.conductor_group = wsme.Unset
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
This is broken into two methods for cyclomatic complexity's sake.
"""
_hide_fields_in_newer_versions_part_one(obj)
_hide_fields_in_newer_versions_part_two(obj)
def update_state_in_older_versions(obj):
@ -1096,6 +1105,11 @@ class Node(base.APIBase):
# TODO(jroll) is there a less hacky way to do this?
if k == 'traits' and kwargs.get('traits') is not None:
value = [t['trait'] for t in kwargs['traits']['objects']]
# NOTE(jroll) this is special-cased to "" and not Unset,
# because it is used in hash ring calculations
elif k == 'conductor_group' and (k not in kwargs or
kwargs[k] is wtypes.Unset):
value = ''
else:
value = kwargs.get(k, wtypes.Unset)
setattr(self, k, value)
@ -1901,6 +1915,12 @@ class NodesController(rest.RestController):
and node.rescue_interface is not wtypes.Unset):
raise exception.NotAcceptable()
# NOTE(jroll) this is special-cased to "" and not Unset,
# because it is used in hash ring calculations
if (not api_utils.allow_conductor_group()
and node.conductor_group != ""):
raise exception.NotAcceptable()
# NOTE(deva): get_topic_for checks if node.driver is in the hash ring
# and raises NoValidHost if it is not.
# We need to ensure that node has a UUID before it can
@ -1981,6 +2001,10 @@ class NodesController(rest.RestController):
"changing the node's driver.")
raise exception.Invalid(msg)
conductor_group = api_utils.get_patch_values(patch, '/conductor_group')
if conductor_group and not api_utils.allow_conductor_group():
raise exception.NotAcceptable()
@METRICS.timer('NodesController.patch')
@wsme.validate(types.uuid, types.boolean, [NodePatchType])
@expose.expose(Node, types.uuid_or_name, types.boolean,

View File

@ -378,6 +378,8 @@ def check_allowed_fields(fields):
raise exception.NotAcceptable()
if 'rescue_interface' in fields and not allow_rescue_interface():
raise exception.NotAcceptable()
if 'conductor_group' in fields and not allow_conductor_group():
raise exception.NotAcceptable()
def check_allowed_portgroup_fields(fields):
@ -868,6 +870,14 @@ def allow_reset_interfaces():
versions.MINOR_45_RESET_INTERFACES)
def allow_conductor_group():
"""Check if passing a conductor_group for a node is allowed.
There is no version yet that allows this.
"""
return False
def get_request_return_fields(fields, detail, default_fields):
"""Calculate fields to return from an API request

View File

@ -28,14 +28,20 @@ class HashRingManager(object):
_hash_rings = None
_lock = threading.Lock()
def __init__(self):
def __init__(self, use_groups=True, cache=True):
self.dbapi = dbapi.get_instance()
self.updated_at = time.time()
self.use_groups = use_groups
self.cache = cache
@property
def ring(self):
interval = CONF.hash_ring_reset_interval
limit = time.time() - interval
if not self.cache:
return self._load_hash_rings()
# Hot path, no lock
if self.__class__._hash_rings is not None and self.updated_at >= limit:
return self.__class__._hash_rings
@ -49,11 +55,13 @@ class HashRingManager(object):
def _load_hash_rings(self):
rings = {}
d2c = self.dbapi.get_active_hardware_type_dict()
d2c = self.dbapi.get_active_hardware_type_dict(
use_groups=self.use_groups)
for driver_name, hosts in d2c.items():
rings[driver_name] = hashring.HashRing(
hosts, partitions=2 ** CONF.hash_partition_exponent)
return rings
@classmethod
@ -61,8 +69,10 @@ class HashRingManager(object):
with cls._lock:
cls._hash_rings = None
def __getitem__(self, driver_name):
def get_ring(self, driver_name, conductor_group):
try:
if self.use_groups:
return self.ring['%s:%s' % (conductor_group, driver_name)]
return self.ring[driver_name]
except KeyError:
raise exception.DriverNotFound(

View File

@ -116,7 +116,7 @@ RELEASE_MAPPING = {
},
'master': {
'api': '1.45',
'rpc': '1.46',
'rpc': '1.47',
'objects': {
'Node': ['1.26', '1.27'],
'Conductor': ['1.3'],

View File

@ -22,6 +22,7 @@ from futurist import rejection
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import versionutils
import six
from ironic.common import context as ironic_context
@ -29,6 +30,7 @@ from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import hash_ring
from ironic.common.i18n import _
from ironic.common import release_mappings as versions
from ironic.common import rpc
from ironic.common import states
from ironic.conductor import notification_utils as notify_utils
@ -107,7 +109,10 @@ class BaseConductorManager(object):
check_and_reject=rejection_func)
"""Executor for performing tasks async."""
self.ring_manager = hash_ring.HashRingManager()
# TODO(jroll) delete the use_groups argument and use the default
# in Stein.
self.ring_manager = hash_ring.HashRingManager(
use_groups=self._use_groups())
"""Consistent hash ring which maps drivers to conductors."""
# TODO(dtantsur): remove in Stein
@ -217,6 +222,14 @@ class BaseConductorManager(object):
self._started = True
def _use_groups(self):
release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version)
# NOTE(jroll) self.RPC_API_VERSION is actually defined in a subclass,
# but we only use this class from there.
version_cap = (release_ver['rpc'] if release_ver
else self.RPC_API_VERSION)
return versionutils.is_compatible('1.47', version_cap)
def _fail_transient_state(self, state, last_error):
"""Apply "fail" transition to nodes in a transient state.
@ -362,21 +375,23 @@ class BaseConductorManager(object):
Requests node set from and filters out nodes that are not
mapped to this conductor.
Yields tuples (node_uuid, driver, ...) where ... is derived from
fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
fields=['foo'] means yielding ('uuid', 'driver', 'foo').
Yields tuples (node_uuid, driver, conductor_group, ...) where ... is
derived from fields argument, e.g.: fields=None means yielding ('uuid',
'driver', 'conductor_group'), fields=['foo'] means yielding ('uuid',
'driver', 'conductor_group', 'foo').
:param fields: list of fields to fetch in addition to uuid and driver
:param fields: list of fields to fetch in addition to uuid, driver,
and conductor_group
:param kwargs: additional arguments to pass to dbapi when looking for
nodes
:return: generator yielding tuples of requested fields
"""
columns = ['uuid', 'driver'] + list(fields or ())
columns = ['uuid', 'driver', 'conductor_group'] + list(fields or ())
node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
for result in node_list:
if self._shutdown:
break
if self._mapped_to_this_conductor(*result[:2]):
if self._mapped_to_this_conductor(*result[:3]):
yield result
def _spawn_worker(self, func, *args, **kwargs):
@ -407,7 +422,7 @@ class BaseConductorManager(object):
{'err': e})
self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
def _mapped_to_this_conductor(self, node_uuid, driver):
def _mapped_to_this_conductor(self, node_uuid, driver, conductor_group):
"""Check that node is mapped to this conductor.
Note that because mappings are eventually consistent, it is possible
@ -416,7 +431,7 @@ class BaseConductorManager(object):
take out a lock.
"""
try:
ring = self.ring_manager[driver]
ring = self.ring_manager.get_ring(driver, conductor_group)
except exception.DriverNotFound:
return False
@ -468,7 +483,7 @@ class BaseConductorManager(object):
sort_dir='asc')
workers_count = 0
for node_uuid, driver in node_iter:
for node_uuid, driver, conductor_group in node_iter:
try:
with task_manager.acquire(context, node_uuid,
purpose='node state check') as task:
@ -507,7 +522,7 @@ class BaseConductorManager(object):
node_iter = self.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
for node_uuid, driver, conductor_group in node_iter:
try:
with task_manager.acquire(context, node_uuid, shared=False,
purpose='start console') as task:

View File

@ -100,7 +100,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
# NOTE(pas-ha): This also must be in sync with
# ironic.common.release_mappings.RELEASE_MAPPING['master']
RPC_API_VERSION = '1.46'
RPC_API_VERSION = '1.47'
target = messaging.Target(version=RPC_API_VERSION)
@ -1596,7 +1596,7 @@ class ConductorManager(base_manager.BaseConductorManager):
filters = {'maintenance': False}
node_iter = self.iter_nodes(fields=['id'], filters=filters)
for (node_uuid, driver, node_id) in node_iter:
for (node_uuid, driver, conductor_group, node_id) in node_iter:
try:
# NOTE(dtantsur): start with a shared lock, upgrade if needed
with task_manager.acquire(context, node_uuid,
@ -1685,7 +1685,7 @@ class ConductorManager(base_manager.BaseConductorManager):
filters = {'maintenance': True,
'fault': faults.POWER_FAILURE}
node_iter = self.iter_nodes(fields=['id'], filters=filters)
for (node_uuid, driver, node_id) in node_iter:
for (node_uuid, driver, conductor_group, node_id) in node_iter:
try:
with task_manager.acquire(context, node_uuid,
purpose='power failure recovery',
@ -1777,7 +1777,7 @@ class ConductorManager(base_manager.BaseConductorManager):
state_cleanup_required = []
for (node_uuid, driver, node_id, conductor_hostname,
for (node_uuid, driver, conductor_group, node_id, conductor_hostname,
maintenance, provision_state, target_power_state) in node_iter:
# NOTE(lucasagomes): Although very rare, this may lead to a
# race condition. By the time we release the lock the conductor
@ -1994,7 +1994,8 @@ class ConductorManager(base_manager.BaseConductorManager):
filters=filters)
workers_count = 0
for node_uuid, driver, node_id, conductor_affinity in node_iter:
for (node_uuid, driver, conductor_group, node_id,
conductor_affinity) in node_iter:
if conductor_affinity == self.conductor.id:
continue
@ -2661,7 +2662,8 @@ class ConductorManager(base_manager.BaseConductorManager):
"""Sends sensors data for nodes from synchronized queue."""
while not self._shutdown:
try:
node_uuid, driver, instance_uuid = nodes.get_nowait()
(node_uuid, driver, conductor_group,
instance_uuid) = nodes.get_nowait()
except queue.Empty:
break
# populate the message which will be sent to ceilometer

View File

@ -95,13 +95,14 @@ class ConductorAPI(object):
| 1.44 - Added add_node_traits and remove_node_traits.
| 1.45 - Added continue_node_deploy
| 1.46 - Added reset_interfaces to update_node
| 1.47 - Added support for conductor groups
"""
# NOTE(rloo): This must be in sync with manager.ConductorManager's.
# NOTE(pas-ha): This also must be in sync with
# ironic.common.release_mappings.RELEASE_MAPPING['master']
RPC_API_VERSION = '1.46'
RPC_API_VERSION = '1.47'
def __init__(self, topic=None):
super(ConductorAPI, self).__init__()
@ -117,8 +118,10 @@ class ConductorAPI(object):
else self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
use_groups = self.client.can_send_version('1.47')
# NOTE(deva): this is going to be buggy
self.ring_manager = hash_ring.HashRingManager()
self.ring_manager = hash_ring.HashRingManager(use_groups=use_groups)
def get_topic_for(self, node):
"""Get the RPC topic for the conductor service the node is mapped to.
@ -135,13 +138,15 @@ class ConductorAPI(object):
raise exception.TemporaryFailure()
try:
ring = self.ring_manager[node.driver]
ring = self.ring_manager.get_ring(node.driver,
node.conductor_group)
dest = ring.get_nodes(node.uuid.encode('utf-8'),
replicas=CONF.hash_distribution_replicas)
return '%s.%s' % (self.topic, dest.pop())
except exception.DriverNotFound:
reason = (_('No conductor service registered which supports '
'driver %s.') % node.driver)
'driver %(driver)s for conductor group "%(group)s".') %
{'driver': node.driver, 'group': node.conductor_group})
raise exception.NoValidHost(reason=reason)
def get_topic_for_driver(self, driver_name):
@ -156,9 +161,12 @@ class ConductorAPI(object):
:raises: DriverNotFound
"""
self.ring_manager.reset()
ring = self.ring_manager[driver_name]
# NOTE(jroll) we want to be able to route this to any conductor,
# regardless of groupings. We use a fresh, uncached hash ring that
# does not take groups into account.
local_ring_manager = hash_ring.HashRingManager(use_groups=False,
cache=False)
ring = local_ring_manager.get_ring(driver_name, '')
host = random.choice(list(ring.nodes))
return self.topic + "." + host

View File

@ -518,9 +518,10 @@ class Connection(object):
"""
@abc.abstractmethod
def get_active_hardware_type_dict(self):
def get_active_hardware_type_dict(self, use_groups=False):
"""Retrieve hardware types for the registered and active conductors.
:param use_groups: Whether to factor conductor_group into the keys.
:returns: A dict which maps hardware type names to the set of hosts
which support them. For example:

View File

@ -855,7 +855,7 @@ class Connection(api.Connection):
'powering process, their power state can be incorrect: '
'%(nodes)s', {'nodes': nodes})
def get_active_hardware_type_dict(self):
def get_active_hardware_type_dict(self, use_groups=False):
query = (model_query(models.ConductorHardwareInterfaces,
models.Conductor)
.join(models.Conductor))
@ -863,7 +863,12 @@ class Connection(api.Connection):
d2c = collections.defaultdict(set)
for iface_row, cdr_row in result:
d2c[iface_row['hardware_type']].add(cdr_row['hostname'])
hw_type = iface_row['hardware_type']
if use_groups:
key = '%s:%s' % (cdr_row['conductor_group'], hw_type)
else:
key = hw_type
d2c[key].add(cdr_row['hostname'])
return d2c
def get_offline_conductors(self):

View File

@ -805,7 +805,8 @@ class DracRAID(base.RAIDInterface):
fields = ['driver_internal_info']
node_list = manager.iter_nodes(fields=fields, filters=filters)
for (node_uuid, driver, driver_internal_info) in node_list:
for (node_uuid, driver, conductor_group,
driver_internal_info) in node_list:
try:
lock_purpose = 'checking async raid configuration jobs'
with task_manager.acquire(context, node_uuid,

View File

@ -131,7 +131,7 @@ class Inspector(base.InspectInterface):
filters = {'provision_state': states.INSPECTWAIT}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking hardware inspection status'
with task_manager.acquire(context, node_uuid,

View File

@ -432,7 +432,7 @@ class IRMCRAID(base.RAIDInterface):
'maintenance': False}
fields = ['raid_config']
node_list = manager.iter_nodes(fields=fields, filters=filters)
for (node_uuid, driver, raid_config) in node_list:
for (node_uuid, driver, conductor_group, raid_config) in node_list:
try:
lock_purpose = 'checking async RAID configuration tasks'
with task_manager.acquire(context, node_uuid,

View File

@ -58,7 +58,7 @@ class OneViewPeriodicTasks(object):
}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
for node_uuid, driver, conductor_group in node_iter:
node = objects.Node.get(context, node_uuid)
@ -116,7 +116,8 @@ class OneViewPeriodicTasks(object):
}
node_iter = manager.iter_nodes(fields=['maintenance_reason'],
filters=filters)
for node_uuid, driver, maintenance_reason in node_iter:
for (node_uuid, driver, conductor_group,
maintenance_reason) in node_iter:
if maintenance_reason == common.NODE_IN_USE_BY_ONEVIEW:
@ -183,7 +184,8 @@ class OneViewPeriodicTasks(object):
node_iter = manager.iter_nodes(fields=['driver_internal_info'],
filters=filters)
for node_uuid, driver, driver_internal_info in node_iter:
for (node_uuid, driver, conductor_group,
driver_internal_info) in node_iter:
node_oneview_error = driver_internal_info.get('oneview_error')
if node_oneview_error == common.SERVER_HARDWARE_ALLOCATION_ERROR:

View File

@ -76,7 +76,7 @@ class OneViewInspect(inspector.Inspector):
'driver': 'oneview'}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking hardware inspection status'
with task_manager.acquire(context, node_uuid,

View File

@ -256,6 +256,13 @@ class TestListNodes(test_api_base.BaseApiTest):
self._test_node_field_hidden_in_lower_version('deploy_step',
'1.43', '1.44')
def test_node_conductor_group_hidden_in_lower_version(self):
node = obj_utils.create_test_node(self.context)
data = self.get_json(
'/nodes/%s' % node.uuid,
headers={api_base.Version.string: '1.44'})
self.assertNotIn('conductor_group', data)
def test_get_one_custom_fields(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@ -393,6 +400,16 @@ class TestListNodes(test_api_base.BaseApiTest):
headers={api_base.Version.string: str(api_v1.max_version())})
self.assertIn('traits', response)
def test_get_conductor_group_fields_invalid_api_version(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
fields = 'conductor_group'
response = self.get_json(
'/nodes/%s?fields=%s' % (node.uuid, fields),
headers={api_base.Version.string: '1.43'},
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def test_detail(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@ -2474,6 +2491,20 @@ class TestPatch(test_api_base.BaseApiTest):
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
def test_update_conductor_group_old_api(self):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid())
self.mock_update_node.return_value = node
headers = {api_base.Version.string: '1.44'}
response = self.patch_json('/nodes/%s' % node.uuid,
[{'path': '/conductor_group',
'value': 'foogroup',
'op': 'add'}],
headers=headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
def _create_node_locally(node):
driver_factory.check_and_update_node_interfaces(node)
@ -2697,6 +2728,13 @@ class TestPost(test_api_base.BaseApiTest):
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cn_mock.call_args[0][0])
def test_create_node_specify_conductor_group_bad_version(self):
headers = {api_base.Version.string: '1.43'}
ndict = test_api_utils.post_get_test_node(conductor_group='foo')
response = self.post_json('/nodes', ndict, headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
def _test_jsontype_attributes(self, attr_name):
kwargs = {attr_name: {'str': 'foo', 'int': 123, 'float': 0.1,
'bool': True, 'list': [1, 2], 'none': None,

View File

@ -521,6 +521,9 @@ class TestApiUtils(base.TestCase):
mock_request.version.minor = 40
self.assertFalse(utils.allow_inspect_abort())
def test_allow_conductor_group(self):
self.assertFalse(utils.allow_conductor_group())
class TestNodeIdent(base.TestCase):

View File

@ -28,7 +28,8 @@ class HashRingManagerTestCase(db_base.DbTestCase):
def setUp(self):
super(HashRingManagerTestCase, self).setUp()
self.ring_manager = hash_ring.HashRingManager()
self.use_groups = False
self.ring_manager = hash_ring.HashRingManager(use_groups=False)
def register_conductors(self):
c1 = self.dbapi.register_conductor({
@ -39,40 +40,88 @@ class HashRingManagerTestCase(db_base.DbTestCase):
'hostname': 'host2',
'drivers': ['driver1'],
})
for c in (c1, c2):
c3 = self.dbapi.register_conductor({
'hostname': 'host3',
'drivers': ['driver1, driver2'],
'conductor_group': 'foogroup',
})
c4 = self.dbapi.register_conductor({
'hostname': 'host4',
'drivers': ['driver1'],
'conductor_group': 'foogroup',
})
c5 = self.dbapi.register_conductor({
'hostname': 'host5',
'drivers': ['driver1'],
'conductor_group': 'bargroup',
})
for c in (c1, c2, c3, c4, c5):
self.dbapi.register_conductor_hardware_interfaces(
c.id, 'hardware-type', 'deploy', ['iscsi', 'direct'], 'iscsi')
def test_hash_ring_manager_hardware_type_success(self):
self.register_conductors()
ring = self.ring_manager['hardware-type']
self.assertEqual(sorted(['host1', 'host2']), sorted(ring.nodes))
ring = self.ring_manager.get_ring('hardware-type', '')
self.assertEqual(sorted(['host1', 'host2', 'host3', 'host4', 'host5']),
sorted(ring.nodes))
def test_hash_ring_manager_hardware_type_success_groups(self):
# groupings should be ignored here
self.register_conductors()
ring = self.ring_manager.get_ring('hardware-type', 'foogroup')
self.assertEqual(sorted(['host1', 'host2', 'host3', 'host4', 'host5']),
sorted(ring.nodes))
def test_hash_ring_manager_driver_not_found(self):
self.register_conductors()
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'driver3')
self.ring_manager.get_ring,
'driver3', '')
def test_hash_ring_manager_no_refresh(self):
# If a new conductor is registered after the ring manager is
# initialized, it won't be seen. Long term this is probably
# undesirable, but today is the intended behavior.
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'hardware-type')
self.ring_manager.get_ring,
'hardware-type', '')
self.register_conductors()
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'hardware-type')
self.ring_manager.get_ring,
'hardware-type', '')
def test_hash_ring_manager_refresh(self):
CONF.set_override('hash_ring_reset_interval', 30)
# Initialize the ring manager to make _hash_rings not None, then
# hash ring will refresh only when time interval exceeded.
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'hardware-type')
self.ring_manager.get_ring,
'hardware-type', '')
self.register_conductors()
self.ring_manager.updated_at = time.time() - 31
self.ring_manager.__getitem__('hardware-type')
self.ring_manager.get_ring('hardware-type', '')
def test_hash_ring_manager_uncached(self):
ring_mgr = hash_ring.HashRingManager(cache=False,
use_groups=self.use_groups)
ring = ring_mgr.ring
self.assertIsNotNone(ring)
self.assertIsNone(hash_ring.HashRingManager._hash_rings)
class HashRingManagerWithGroupsTestCase(HashRingManagerTestCase):
def setUp(self):
super(HashRingManagerWithGroupsTestCase, self).setUp()
self.ring_manager = hash_ring.HashRingManager(use_groups=True)
self.use_groups = True
def test_hash_ring_manager_hardware_type_success(self):
self.register_conductors()
ring = self.ring_manager.get_ring('hardware-type', '')
self.assertEqual(sorted(['host1', 'host2']), sorted(ring.nodes))
def test_hash_ring_manager_hardware_type_success_groups(self):
self.register_conductors()
ring = self.ring_manager.get_ring('hardware-type', 'foogroup')
self.assertEqual(sorted(['host3', 'host4']), sorted(ring.nodes))

View File

@ -4198,9 +4198,12 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
self._start_service()
n = db_utils.get_test_node()
self.assertTrue(self.service._mapped_to_this_conductor(
n['uuid'], 'fake-hardware'))
n['uuid'], 'fake-hardware', ''))
self.assertFalse(self.service._mapped_to_this_conductor(
n['uuid'], 'fake-hardware', 'foogroup'))
self.assertFalse(self.service._mapped_to_this_conductor(n['uuid'],
'otherdriver'))
'otherdriver',
''))
@mock.patch.object(images, 'is_whole_disk_image')
def test_validate_dynamic_driver_interfaces(self, mock_iwdi):
@ -4281,8 +4284,9 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
def test_iter_nodes(self, mock_nodeinfo_list, mock_mapped,
mock_fail_if_state):
self._start_service()
self.columns = ['uuid', 'driver', 'id']
nodes = [self._create_node(id=i, driver='fake-hardware')
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
nodes = [self._create_node(id=i, driver='fake-hardware',
conductor_group='')
for i in range(2)]
mock_nodeinfo_list.return_value = self._get_nodeinfo_list_response(
nodes)
@ -4290,7 +4294,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
result = list(self.service.iter_nodes(fields=['id'],
filters=mock.sentinel.filters))
self.assertEqual([(nodes[0].uuid, 'fake-hardware', 0)], result)
self.assertEqual([(nodes[0].uuid, 'fake-hardware', '', 0)], result)
mock_nodeinfo_list.assert_called_once_with(
columns=self.columns, filters=mock.sentinel.filters)
expected_calls = [mock.call(mock.ANY, mock.ANY,
@ -4310,7 +4314,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
def test_iter_nodes_shutdown(self, mock_nodeinfo_list):
self._start_service()
self.columns = ['uuid', 'driver', 'id']
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
nodes = [self._create_node(driver='fake-hardware')]
mock_nodeinfo_list.return_value = self._get_nodeinfo_list_response(
nodes)
@ -4954,7 +4958,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_send_sensor_task(self, acquire_mock):
nodes = queue.Queue()
for i in range(5):
nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', None))
nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', '', None))
self._start_service()
CONF.set_override('send_sensor_data', True, group='conductor')
@ -4971,7 +4975,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(task_manager, 'acquire')
def test_send_sensor_task_shutdown(self, acquire_mock):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', None))
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
self.service._shutdown = True
CONF.set_override('send_sensor_data', True, group='conductor')
@ -4981,7 +4985,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_no_management(self, acquire_mock):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', None))
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
CONF.set_override('send_sensor_data', True, group='conductor')
@ -4999,7 +5003,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_maintenance(self, acquire_mock, debug_log):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', None))
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
CONF.set_override('send_sensor_data', True, group='conductor')
@ -5883,7 +5887,7 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
self.service.dbapi = self.dbapi
self.node = self._create_node()
self.filters = {'maintenance': False}
self.columns = ['uuid', 'driver', 'id']
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
def test_node_not_mapped(self, get_nodeinfo_mock,
mapped_mock, acquire_mock, sync_mock):
@ -5895,7 +5899,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
self.assertFalse(sync_mock.called)
@ -5912,7 +5917,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -5934,7 +5940,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -5955,7 +5962,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -5976,7 +5984,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -5996,7 +6005,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6014,7 +6024,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6032,7 +6043,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6077,7 +6089,7 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response(nodes))
mapped_mock.side_effect = lambda x, y: mapped_map[x]
mapped_mock.side_effect = lambda x, y, z: mapped_map[x]
acquire_mock.side_effect = self._get_acquire_side_effect(tasks)
sync_mock.side_effect = sync_results
@ -6089,7 +6101,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_calls = [mock.call(x.uuid, x.driver) for x in nodes]
mapped_calls = [mock.call(x.uuid, x.driver,
x.conductor_group) for x in nodes]
self.assertEqual(mapped_calls, mapped_mock.call_args_list)
acquire_calls = [mock.call(self.context, x.uuid,
purpose=mock.ANY,
@ -6122,7 +6135,7 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
self.task.driver = self.driver
self.filters = {'maintenance': True,
'fault': 'power failure'}
self.columns = ['uuid', 'driver', 'id']
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
def test_node_not_mapped(self, get_nodeinfo_mock,
mapped_mock, acquire_mock):
@ -6134,7 +6147,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
self.assertFalse(self.power.validate.called)
@ -6151,7 +6165,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6196,7 +6211,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6217,7 +6233,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6242,7 +6259,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY,
shared=True)
@ -6276,7 +6294,7 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.filters = {'reserved': False, 'maintenance': False,
'provisioned_before': 300,
'provision_state': states.DEPLOYWAIT}
self.columns = ['uuid', 'driver']
self.columns = ['uuid', 'driver', 'conductor_group']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
@ -6300,7 +6318,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_timeout(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
@ -6311,7 +6330,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with(
@ -6332,7 +6352,7 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -6350,7 +6370,7 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -6369,7 +6389,7 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -6391,8 +6411,10 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
self.assertEqual([mock.call(self.node.uuid, task.node.driver),
mock.call(self.node2.uuid, self.node2.driver)],
self.assertEqual([mock.call(self.node.uuid, task.node.driver,
task.node.conductor_group),
mock.call(self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
@ -6424,7 +6446,7 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -6451,7 +6473,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -6477,7 +6500,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
# Should only have ran 2.
self.assertEqual([mock.call(self.node.uuid, self.node.driver)] * 2,
self.assertEqual([mock.call(self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
@ -6602,7 +6626,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.filters = {'reserved': False,
'maintenance': False,
'provision_state': states.ACTIVE}
self.columns = ['uuid', 'driver', 'id', 'conductor_affinity']
self.columns = ['uuid', 'driver', 'conductor_group', 'id',
'conductor_affinity']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
@ -6615,7 +6640,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_already_mapped(self, get_nodeinfo_mock, mapped_mock,
@ -6630,7 +6656,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_good(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
@ -6641,7 +6668,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
@ -6670,7 +6698,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
# assert _mapped_to_this_conductor() gets called 2 times only
# instead of 3. When NoFreeConductorWorker is raised the loop
# should be broken
expected = [mock.call(self.node.uuid, self.node.driver)] * 2
expected = [mock.call(self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 2 times only instead of 3. When
@ -6699,7 +6728,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# assert _mapped_to_this_conductor() gets called 3 times
expected = [mock.call(self.node.uuid, self.node.driver)] * 3
expected = [mock.call(self.node.uuid, self.node.driver,
self.node.conductor_group)] * 3
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 3 times
@ -6730,7 +6760,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
# assert _mapped_to_this_conductor() gets called only once
# because of the worker limit
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
# assert acquire() gets called only once because of the worker limit
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@ -6985,7 +7016,7 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.filters = {'reserved': False,
'inspection_started_before': 300,
'provision_state': states.INSPECTWAIT}
self.columns = ['uuid', 'driver']
self.columns = ['uuid', 'driver', 'conductor_group']
def _assert_get_nodeinfo_args(self, get_nodeinfo_mock):
get_nodeinfo_mock.assert_called_once_with(
@ -7010,7 +7041,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test__check_inspect_timeout(self, get_nodeinfo_mock,
@ -7022,7 +7054,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
self.task.process_event.assert_called_with('fail', target_state=None)
@ -7040,7 +7073,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -7060,7 +7094,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -7081,7 +7116,7 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -7103,8 +7138,10 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
self.assertEqual([mock.call(self.node.uuid, task.node.driver),
mock.call(self.node2.uuid, self.node2.driver)],
self.assertEqual([mock.call(self.node.uuid, task.node.driver,
task.node.conductor_group),
mock.call(self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY),
@ -7131,7 +7168,7 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -7154,7 +7191,7 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
mapped_mock.assert_called_once_with(
self.node.uuid, self.node.driver)
self.node.uuid, self.node.driver, self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@ -7176,7 +7213,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
# Should only have ran 2.
self.assertEqual([mock.call(self.node.uuid, self.node.driver)] * 2,
self.assertEqual([mock.call(self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
purpose=mock.ANY)] * 2,
@ -7262,7 +7300,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware')
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
'')
mock_fail_if.assert_called_once_with(
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
@ -7284,7 +7323,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware')
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
'')
mock_fail_if.assert_called_once_with(
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
@ -7327,8 +7367,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
expected_calls = [mock.call(self.node.uuid, 'fake-hardware'),
mock.call(node2.uuid, 'fake-hardware')]
expected_calls = [mock.call(self.node.uuid, 'fake-hardware', ''),
mock.call(node2.uuid, 'fake-hardware', '')]
mock_mapped.assert_has_calls(expected_calls)
# Assert we skipped and didn't try to call _fail_if_in_state
self.assertFalse(mock_fail_if.called)
@ -7355,7 +7395,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.id)
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware')
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
'')
mock_fail_if.assert_called_once_with(
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
@ -7373,7 +7414,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware')
mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
'')
# assert node was released
self.assertIsNone(self.node.reservation)
# not changing states in maintenance

View File

@ -214,6 +214,21 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_one_ht_groups(
self, mock_utcnow):
h = 'fake-host'
ht = 'hardware-type'
group = 'foogroup'
key = '%s:%s' % (group, ht)
expected = {key: {h}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(hostname=h, drivers=[], hardware_types=[ht],
conductor_group=group)
result = self.dbapi.get_active_hardware_type_dict(use_groups=True)
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_many_ht(self, mock_utcnow):
h = 'fake-host'

View File

@ -66,7 +66,7 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self.node.save()
# mock manager
mock_manager = mock.Mock()
node_list = [(self.node.uuid, 'idrac',
node_list = [(self.node.uuid, 'idrac', '',
{'raid_config_job_ids': ['42']})]
mock_manager.iter_nodes.return_value = node_list
# mock task_manager.acquire
@ -85,7 +85,7 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
def test__query_raid_config_job_status_no_config_jobs(self, mock_acquire):
# mock manager
mock_manager = mock.Mock()
node_list = [(self.node.uuid, 'idrac', {})]
node_list = [(self.node.uuid, 'idrac', '', {})]
mock_manager.iter_nodes.return_value = node_list
# mock task_manager.acquire
task = mock.Mock(node=self.node, driver=mock.Mock(raid=self.raid))

View File

@ -68,7 +68,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
task.driver.raid = noop.NoRAID()
raid_object = irmc_raid.IRMCRAID()
@ -85,7 +85,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
# Set none target_raid_config input
task.node.target_raid_config = None
@ -103,7 +103,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
task.driver.raid._query_raid_config_fgi_status(mock_manager,
self.context)
@ -125,7 +125,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
task.driver.raid._query_raid_config_fgi_status(mock_manager,
self.context)
@ -140,7 +140,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
# Set provision state value
task.node.provision_state = 'cleaning'
@ -157,7 +157,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
self, mock_acquire, report_mock, fgi_mock, clean_fail_mock):
mock_manager = mock.Mock()
fgi_mock.return_value = 'completing'
node_list = [(self.node.uuid, 'irmc', self.raid_config)]
node_list = [(self.node.uuid, 'irmc', '', self.raid_config)]
mock_manager.iter_nodes.return_value = node_list
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
@ -188,7 +188,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
# Set provision state value
task.node.provision_state = 'clean wait'
@ -216,7 +216,7 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
# Set provision state value
task.node.provision_state = 'clean wait'
@ -245,8 +245,8 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
task = mock.Mock(node=self.node, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
node_list = [(self.node_2.uuid, 'irmc', raid_config_2),
(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node_2.uuid, 'irmc', '', raid_config_2),
(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
# Set provision state value
task.node.provision_state = 'clean wait'
@ -273,8 +273,8 @@ class iRMCPeriodicTaskTestCase(test_common.BaseIRMCTest):
raid_config_2 = self.raid_config.copy()
fgi_status_dict = {}
fgi_mock.side_effect = [{}, {'0': 'Idle', '1': 'Idle'}]
node_list = [(self.node_2.uuid, 'fake-hardware', raid_config_2),
(self.node.uuid, 'irmc', raid_config)]
node_list = [(self.node_2.uuid, 'fake-hardware', '', raid_config_2),
(self.node.uuid, 'irmc', '', raid_config)]
mock_manager.iter_nodes.return_value = node_list
task = mock.Mock(node=self.node_2, driver=self.driver)
mock_acquire.return_value = mock.MagicMock(

View File

@ -33,10 +33,10 @@ oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR
maintenance_reason = common.NODE_IN_USE_BY_ONEVIEW
driver_internal_info = {'oneview_error': oneview_error}
nodes_taken_by_oneview = [(1, 'oneview')]
nodes_freed_by_oneview = [(1, 'oneview', maintenance_reason)]
nodes_taken_on_cleanfail = [(1, 'oneview', driver_internal_info)]
nodes_taken_on_cleanfail_no_info = [(1, 'oneview', {})]
nodes_taken_by_oneview = [(1, 'oneview', '')]
nodes_freed_by_oneview = [(1, 'oneview', '', maintenance_reason)]
nodes_taken_on_cleanfail = [(1, 'oneview', '', driver_internal_info)]
nodes_taken_on_cleanfail_no_info = [(1, 'oneview', '', {})]
GET_POWER_STATE_RETRIES = 5