ironic: partition compute services by conductor group

This uses ironic’s conductor group feature to limit the subset of nodes
which a nova-compute service will manage. This allows for partitioning
nova-compute services to a particular location (building, aisle, rack,
etc), and provides a way for operators to manage the failure domain of a
given nova-compute service.

This adds two config options to the [ironic] config group:
* partition_key: the key to match with the conductor_group property on
  ironic nodes to limit the subset of nodes which can be managed.
* peer_list: a list of other compute service hostnames which manage the
  same partition_key, used when building the hash ring.

Change-Id: I1b184ff37948dc403fe38874613cd4d870c644fd
Implements: blueprint ironic-conductor-groups
This commit is contained in:
Jim Rollenhagen 2019-02-05 13:38:20 -05:00
parent 5283b464b5
commit 2728dec51a
7 changed files with 240 additions and 14 deletions

View File

@ -79,6 +79,28 @@ Related options:
min=0,
help='Timeout (seconds) to wait for node serial console state '
'changed. Set to 0 to disable timeout.'),
cfg.StrOpt(
'partition_key',
default=None,
mutable=True,
max_length=255,
regex=r'^[a-zA-Z0-9_.-]*$',
help='Case-insensitive key to limit the set of nodes that may be '
'managed by this service to the set of nodes in Ironic which '
'have a matching conductor_group property. If unset, all '
'available nodes will be eligible to be managed by this '
'service. Note that setting this to the empty string (``""``) '
'will match the default conductor group, and is different than '
'leaving the option unset.'),
cfg.ListOpt(
'peer_list',
default=[],
mutable=True,
help='List of hostnames for other nova-compute services with the same '
'partition_key config value. Nodes matching the partition_key '
'value will be distributed between this service and the other '
'services specified here. If partition_key is unset, this option '
'is ignored.'),
]
deprecated_opts = {

View File

@ -2314,6 +2314,11 @@ class VirtDriverNotReady(NovaException):
msg_fmt = _("Virt driver is not ready.")
class InvalidPeerList(NovaException):
msg_fmt = _("Configured nova-compute peer list for the ironic virt "
"driver is invalid on host %(host)s")
class InstanceDiskMappingFailed(NovaException):
msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to "
"the management partition from any Virtual I/O Server.")

View File

@ -81,12 +81,12 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
# nova.utils.get_ksa_adapter().get_endpoint()
self.get_ksa_adapter.assert_called_once_with(
'baremetal', ksa_auth=self.get_auth_plugin.return_value,
ksa_session='session', min_version=(1, 38),
ksa_session='session', min_version=(1, 46),
max_version=(1, ksa_disc.LATEST))
expected = {'session': 'session',
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval,
'os_ironic_api_version': ['1.38', '1.37'],
'os_ironic_api_version': ['1.46', '1.37'],
'endpoint':
self.get_ksa_adapter.return_value.get_endpoint.return_value}
mock_ir_cli.assert_called_once_with(1, **expected)
@ -106,13 +106,13 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
# nova.utils.get_endpoint_data
self.get_ksa_adapter.assert_called_once_with(
'baremetal', ksa_auth=self.get_auth_plugin.return_value,
ksa_session='session', min_version=(1, 38),
ksa_session='session', min_version=(1, 46),
max_version=(1, ksa_disc.LATEST))
# When get_endpoint_data raises any ServiceNotFound, None is returned.
expected = {'session': 'session',
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval,
'os_ironic_api_version': ['1.38', '1.37'],
'os_ironic_api_version': ['1.46', '1.37'],
'endpoint': None}
mock_ir_cli.assert_called_once_with(1, **expected)
@ -130,7 +130,7 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
expected = {'session': 'session',
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval,
'os_ironic_api_version': ['1.38', '1.37'],
'os_ironic_api_version': ['1.46', '1.37'],
'endpoint': endpoint}
mock_ir_cli.assert_called_once_with(1, **expected)

View File

@ -3254,9 +3254,11 @@ class HashRingTestCase(test.NoDBTestCase):
@mock.patch.object(hash_ring, 'HashRing')
@mock.patch.object(objects.ServiceList, 'get_all_computes_by_hv_type')
def _test__refresh_hash_ring(self, services, expected_hosts, mock_services,
mock_hash_ring):
mock_hash_ring, uncalled=None):
uncalled = uncalled or []
services = [_make_compute_service(host) for host in services]
is_up_calls = [mock.call(svc) for svc in services]
is_up_calls = [mock.call(svc) for svc in services
if svc.host not in uncalled]
self.flags(host='host1')
mock_services.return_value = services
mock_hash_ring.return_value = SENTINEL
@ -3299,6 +3301,66 @@ class HashRingTestCase(test.NoDBTestCase):
self.mock_is_up.side_effect = [True, True, False, True]
self._test__refresh_hash_ring(services, expected_hosts)
@mock.patch.object(ironic_driver.IronicDriver, '_can_send_version')
def test__refresh_hash_ring_peer_list(self, mock_can_send):
services = ['host1', 'host2', 'host3']
expected_hosts = {'host1', 'host2'}
self.mock_is_up.return_value = True
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=['host1', 'host2'], group='ironic')
self._test__refresh_hash_ring(services, expected_hosts,
uncalled=['host3'])
mock_can_send.assert_called_once_with(min_version='1.46')
@mock.patch.object(ironic_driver.IronicDriver, '_can_send_version')
def test__refresh_hash_ring_peer_list_old_api(self, mock_can_send):
mock_can_send.side_effect = (
exception.IronicAPIVersionNotAvailable(version='1.46'))
services = ['host1', 'host2', 'host3']
expected_hosts = {'host1', 'host2', 'host3'}
self.mock_is_up.return_value = True
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=['host1', 'host2'], group='ironic')
self._test__refresh_hash_ring(services, expected_hosts,
uncalled=['host3'])
mock_can_send.assert_called_once_with(min_version='1.46')
@mock.patch.object(ironic_driver, 'LOG', autospec=True)
def test__check_peer_list(self, mock_log):
self.flags(host='host1')
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=['host1', 'host2'], group='ironic')
ironic_driver._check_peer_list()
# happy path, nothing happens
self.assertFalse(mock_log.error.called)
self.assertFalse(mock_log.warning.called)
@mock.patch.object(ironic_driver, 'LOG', autospec=True)
def test__check_peer_list_empty(self, mock_log):
self.flags(host='host1')
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=[], group='ironic')
self.assertRaises(exception.InvalidPeerList,
ironic_driver._check_peer_list)
self.assertTrue(mock_log.error.called)
@mock.patch.object(ironic_driver, 'LOG', autospec=True)
def test__check_peer_list_missing_self(self, mock_log):
self.flags(host='host1')
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=['host2'], group='ironic')
self.assertRaises(exception.InvalidPeerList,
ironic_driver._check_peer_list)
self.assertTrue(mock_log.error.called)
@mock.patch.object(ironic_driver, 'LOG', autospec=True)
def test__check_peer_list_only_self(self, mock_log):
self.flags(host='host1')
self.flags(partition_key='not-none', group='ironic')
self.flags(peer_list=['host1'], group='ironic')
ironic_driver._check_peer_list()
self.assertTrue(mock_log.warning.called)
class NodeCacheTestCase(test.NoDBTestCase):
@ -3314,24 +3376,34 @@ class NodeCacheTestCase(test.NoDBTestCase):
self.host = 'host1'
self.flags(host=self.host)
@mock.patch.object(ironic_driver.IronicDriver, '_can_send_version')
@mock.patch.object(ironic_driver.IronicDriver, '_refresh_hash_ring')
@mock.patch.object(hash_ring.HashRing, 'get_nodes')
@mock.patch.object(ironic_driver.IronicDriver, '_get_node_list')
@mock.patch.object(objects.InstanceList, 'get_uuids_by_host')
def _test__refresh_cache(self, instances, nodes, hosts, mock_instances,
mock_nodes, mock_hosts, mock_hash_ring):
mock_nodes, mock_hosts, mock_hash_ring,
mock_can_send, partition_key=None,
can_send_146=True):
mock_instances.return_value = instances
mock_nodes.return_value = nodes
mock_hosts.side_effect = hosts
if not can_send_146:
mock_can_send.side_effect = (
exception.IronicAPIVersionNotAvailable(version='1.46'))
self.driver.node_cache = {}
self.driver.node_cache_time = None
kwargs = {}
if partition_key is not None and can_send_146:
kwargs['conductor_group'] = partition_key
self.driver._refresh_cache()
mock_hash_ring.assert_called_once_with(mock.ANY)
mock_instances.assert_called_once_with(mock.ANY, self.host)
mock_nodes.assert_called_once_with(fields=ironic_driver._NODE_FIELDS,
limit=0)
limit=0, **kwargs)
self.assertIsNotNone(self.driver.node_cache_time)
def test__refresh_cache(self):
@ -3352,6 +3424,49 @@ class NodeCacheTestCase(test.NoDBTestCase):
expected_cache = {n.uuid: n for n in nodes}
self.assertEqual(expected_cache, self.driver.node_cache)
def test__refresh_cache_partition_key(self):
# normal operation, one compute service
instances = []
nodes = [
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
]
hosts = [self.host, self.host, self.host]
partition_key = 'some-group'
self.flags(partition_key=partition_key, group='ironic')
self._test__refresh_cache(instances, nodes, hosts,
partition_key=partition_key)
expected_cache = {n.uuid: n for n in nodes}
self.assertEqual(expected_cache, self.driver.node_cache)
def test__refresh_cache_partition_key_old_api(self):
# normal operation, one compute service
instances = []
nodes = [
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
_get_cached_node(
uuid=uuidutils.generate_uuid(), instance_uuid=None),
]
hosts = [self.host, self.host, self.host]
partition_key = 'some-group'
self.flags(partition_key=partition_key, group='ironic')
self._test__refresh_cache(instances, nodes, hosts,
partition_key=partition_key,
can_send_146=False)
expected_cache = {n.uuid: n for n in nodes}
self.assertEqual(expected_cache, self.driver.node_cache)
def test__refresh_cache_multiple_services(self):
# normal operation, many compute services
instances = []

View File

@ -32,7 +32,7 @@ ironic = None
IRONIC_GROUP = nova.conf.ironic.ironic_group
# The API version required by the Ironic driver
IRONIC_API_VERSION = (1, 38)
IRONIC_API_VERSION = (1, 46)
# NOTE(TheJulia): This version should ALWAYS be the _last_ release
# supported version of the API version used by nova. If a feature
# needs 1.38 to be negotiated to operate properly, then the version

View File

@ -131,6 +131,27 @@ def _log_ironic_polling(what, node, instance):
instance=instance)
def _check_peer_list():
# these configs are mutable; need to check at runtime and init
if CONF.ironic.partition_key is not None:
peer_list = set(CONF.ironic.peer_list)
if not peer_list:
LOG.error('FATAL: Peer list is not configured in the '
'[ironic]/peer_list option; cannot map '
'ironic nodes to compute services.')
raise exception.InvalidPeerList(host=CONF.host)
if CONF.host not in peer_list:
LOG.error('FATAL: Peer list does not contain this '
'compute service hostname (%s); add it to '
'the [ironic]/peer_list option.', CONF.host)
raise exception.InvalidPeerList(host=CONF.host)
if set([CONF.host]) == peer_list:
LOG.warning('This compute service (%s) is the only service '
'present in the [ironic]/peer_list option. '
'Are you sure this should not include more '
'hosts?', CONF.host)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
@ -667,10 +688,37 @@ class IronicDriver(virt_driver.ComputeDriver):
return False
def _refresh_hash_ring(self, ctxt):
peer_list = None
# NOTE(jroll) if this is set, we need to limit the set of other
# compute services in the hash ring to hosts that are currently up
# and specified in the peer_list config option, as there's no way
# to check which partition_key other compute services are using.
if CONF.ironic.partition_key is not None:
try:
# NOTE(jroll) first we need to make sure the Ironic API can
# filter by conductor_group. If it cannot, limiting to
# peer_list could end up with a node being managed by multiple
# compute services.
self._can_send_version(min_version='1.46')
peer_list = set(CONF.ironic.peer_list)
# these configs are mutable; need to check at runtime and init.
# luckily, we run this method from init_host.
_check_peer_list()
LOG.debug('Limiting peer list to %s', peer_list)
except exception.IronicAPIVersionNotAvailable:
pass
# TODO(jroll) optimize this to limit to the peer_list
service_list = objects.ServiceList.get_all_computes_by_hv_type(
ctxt, self._get_hypervisor_type())
services = set()
for svc in service_list:
# NOTE(jroll) if peer_list is None, we aren't partitioning by
# conductor group, so we check all compute services for liveness.
# if we have a peer_list, don't check liveness for compute
# services that aren't in the list.
if peer_list is None or svc.host in peer_list:
is_up = self.servicegroup_api.service_is_up(svc)
if is_up:
services.add(svc.host)
@ -690,7 +738,31 @@ class IronicDriver(virt_driver.ComputeDriver):
instances = objects.InstanceList.get_uuids_by_host(ctxt, CONF.host)
node_cache = {}
for node in self._get_node_list(fields=_NODE_FIELDS, limit=0):
def _get_node_list(**kwargs):
return self._get_node_list(fields=_NODE_FIELDS, limit=0, **kwargs)
# NOTE(jroll) if partition_key is set, we need to limit nodes that
# can be managed to nodes that have a matching conductor_group
# attribute. If the API isn't new enough to support conductor groups,
# we fall back to managing all nodes. If it is new enough, we can
# filter it in the API.
partition_key = CONF.ironic.partition_key
if partition_key is not None:
try:
self._can_send_version(min_version='1.46')
nodes = _get_node_list(conductor_group=partition_key)
LOG.debug('Limiting manageable ironic nodes to conductor '
'group %s', partition_key)
except exception.IronicAPIVersionNotAvailable:
LOG.error('Required Ironic API version 1.46 is not '
'available to filter nodes by conductor group. '
'All nodes will be eligible to be managed by '
'this compute service.')
nodes = _get_node_list()
else:
nodes = _get_node_list()
for node in nodes:
# NOTE(jroll): we always manage the nodes for instances we manage
if node.instance_uuid in instances:
node_cache[node.uuid] = node

View File

@ -0,0 +1,12 @@
---
features:
- |
In deployments with Ironic, adds the ability for compute services to manage
a subset of Ironic nodes. If the ``[ironic]/partition_key`` configuration
option is set, the compute service will only consider nodes with a matching
``conductor_group`` attribute for management. Setting the
``[ironic]/peer_list`` configuration option allows this subset of nodes to
be distributed among the compute services specified to further reduce
failure domain. This feature is useful to co-locate nova-compute services
with ironic-conductor services managing the same nodes, or to better
control failure domain of a given compute service.