Browse Source

Merge "Bump compute rpcapi version and reduce Cyborg calls."

tags/21.0.0.0rc1
Zuul 3 months ago
committed by Gerrit Code Review
parent
commit
66965e48e1
7 changed files with 191 additions and 87 deletions
  1. +22
    -14
      nova/compute/manager.py
  2. +8
    -2
      nova/compute/rpcapi.py
  3. +9
    -5
      nova/conductor/manager.py
  4. +4
    -1
      nova/objects/service.py
  5. +99
    -57
      nova/tests/unit/compute/test_compute_mgr.py
  6. +36
    -1
      nova/tests/unit/compute/test_rpcapi.py
  7. +13
    -7
      nova/tests/unit/conductor/test_conductor.py

+ 22
- 14
nova/compute/manager.py View File

@@ -559,7 +559,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""

target = messaging.Target(version='5.10')
target = messaging.Target(version='5.11')

def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -2075,7 +2075,7 @@ class ComputeManager(manager.Manager):
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None):
node=None, limits=None, host_list=None, accel_uuids=None):

@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
@@ -2127,7 +2127,8 @@ class ComputeManager(manager.Manager):
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits, host_list)
block_device_mapping, node, limits, host_list,
accel_uuids)

def _check_device_tagging(self, requested_networks, block_device_mapping):
tagging_requested = False
@@ -2164,7 +2165,7 @@ class ComputeManager(manager.Manager):
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None, host_list=None):
node=None, limits=None, host_list=None, accel_uuids=None):

try:
LOG.debug('Starting instance...', instance=instance)
@@ -2194,7 +2195,7 @@ class ComputeManager(manager.Manager):
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties, request_spec)
filter_properties, request_spec, accel_uuids)
LOG.info('Took %0.2f seconds to build instance.',
timer.elapsed(), instance=instance)
return build_results.ACTIVE
@@ -2304,7 +2305,7 @@ class ComputeManager(manager.Manager):
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties,
request_spec=None):
request_spec=None, accel_uuids=None):

image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
@@ -2353,7 +2354,8 @@ class ComputeManager(manager.Manager):

with self._build_resources(context, instance,
requested_networks, security_groups, image_meta,
block_device_mapping, provider_mapping) as resources:
block_device_mapping, provider_mapping,
accel_uuids) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
@@ -2526,7 +2528,7 @@ class ComputeManager(manager.Manager):
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image_meta, block_device_mapping,
resource_provider_mapping):
resource_provider_mapping, accel_uuids):
resources = {}
network_info = None
try:
@@ -2596,7 +2598,7 @@ class ComputeManager(manager.Manager):
try:
if dp_name:
arqs = self._get_bound_arq_resources(
context, dp_name, instance)
context, dp_name, instance, accel_uuids)
except (Exception, eventlet.timeout.Timeout) as exc:
LOG.exception(exc.format_message())
self._build_resources_cleanup(instance, network_info)
@@ -2639,7 +2641,7 @@ class ComputeManager(manager.Manager):
# Call Cyborg to delete accelerator requests
compute_utils.delete_arqs_if_needed(context, instance)

def _get_bound_arq_resources(self, context, dp_name, instance):
def _get_bound_arq_resources(self, context, dp_name, instance, arq_uuids):
"""Get bound accelerator requests.

The ARQ binding was kicked off in the conductor as an async
@@ -2653,13 +2655,18 @@ class ComputeManager(manager.Manager):

:param dp_name: Device profile name. Caller ensures this is valid.
:param instance: instance object
:param arq_uuids: List of accelerator request (ARQ) UUIDs.
:returns: List of ARQs for which bindings have completed,
successfully or otherwise
"""

cyclient = cyborg.get_client(context)
arqs = cyclient.get_arqs_for_instance(instance.uuid)
events = [('accelerator-request-bound', arq['uuid']) for arq in arqs]
if arq_uuids is None:
arqs = cyclient.get_arqs_for_instance(instance.uuid)
arq_uuids = [arq['uuid'] for arq in arqs]
events = [('accelerator-request-bound', arq_uuid)
for arq_uuid in arq_uuids]

timeout = CONF.arq_binding_timeout
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout):
@@ -2674,11 +2681,12 @@ class ComputeManager(manager.Manager):

# Since a timeout in wait_for_instance_event will raise, we get
# here only if all binding events have been received.
if sorted(resolved_arqs) != sorted(arqs):
resolved_uuids = [arq['uuid'] for arq in resolved_arqs]
if sorted(resolved_uuids) != sorted(arq_uuids):
# Query Cyborg to get all.
arqs = cyclient.get_arqs_for_instance(instance.uuid)
else:
arqs = resolved_arqs # latter has the right arq.state
arqs = resolved_arqs
return arqs

def _cleanup_allocated_networks(self, context, instance,


+ 8
- 2
nova/compute/rpcapi.py View File

@@ -376,6 +376,8 @@ class ComputeAPI(object):
* 5.8 - Add confirm_snapshot_based_resize_at_source()
* 5.9 - Add revert_snapshot_based_resize_at_dest()
* 5.10 - Add finish_revert_snapshot_based_resize_at_source()
* 5.11 - Add accel_uuids (accelerator requests) parameter to
build_and_run_instance()
'''

VERSION_ALIASES = {
@@ -1418,7 +1420,7 @@ class ComputeAPI(object):
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None,
host_list=None):
host_list=None, accel_uuids=None):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
limits = limits.to_dict()
@@ -1434,9 +1436,13 @@ class ComputeAPI(object):
"node": node,
"limits": limits,
"host_list": host_list,
"accel_uuids": accel_uuids,
}
client = self.router.client(ctxt)
version = '5.0'
version = '5.11'
if not client.can_send_version(version):
kwargs.pop('accel_uuids')
version = '5.0'
cctxt = client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', **kwargs)



+ 9
- 5
nova/conductor/manager.py View File

@@ -840,7 +840,7 @@ class ComputeTaskManager(base.Base):
try:
resource_provider_mapping = (
local_reqspec.get_request_group_mapping())
self._create_and_bind_arqs(
accel_uuids = self._create_and_bind_arqs(
context, instance.uuid, instance.flavor.extra_specs,
host.nodename, resource_provider_mapping)
except Exception as exc:
@@ -858,7 +858,8 @@ class ComputeTaskManager(base.Base):
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host.nodename,
limits=host.limits, host_list=host_list)
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)

def _schedule_instances(self, context, request_spec,
instance_uuids=None, return_alternates=False):
@@ -1618,12 +1619,13 @@ class ComputeTaskManager(base.Base):
# this one.
continue

accel_uuids = []
try:
resource_provider_mapping = (
request_spec.get_request_group_mapping())
# Using nodename instead of hostname. See:
# http://lists.openstack.org/pipermail/openstack-discuss/2019-November/011044.html # noqa
self._create_and_bind_arqs(
accel_uuids = self._create_and_bind_arqs(
context, instance.uuid, instance.flavor.extra_specs,
host.nodename, resource_provider_mapping)
except Exception as exc:
@@ -1649,7 +1651,8 @@ class ComputeTaskManager(base.Base):
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host.service_host, node=host.nodename,
limits=host.limits, host_list=host_list)
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)

def _create_and_bind_arqs(self, context, instance_uuid, extra_specs,
hostname, resource_provider_mapping):
@@ -1660,7 +1663,7 @@ class ComputeTaskManager(base.Base):
"""
dp_name = extra_specs.get('accel:device_profile')
if not dp_name:
return
return []

LOG.debug('Calling Cyborg to get ARQs. dp_name=%s instance=%s',
dp_name, instance_uuid)
@@ -1677,6 +1680,7 @@ class ComputeTaskManager(base.Base):
for arq in arqs}
# Initiate Cyborg binding asynchronously
cyclient.bind_arqs(bindings=bindings)
return [arq['uuid'] for arq in arqs]

@staticmethod
def _map_instance_to_cell(context, instance, cell):


+ 4
- 1
nova/objects/service.py View File

@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)


# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 49
SERVICE_VERSION = 50


# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -180,6 +180,9 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '5.10'},
# Version 49: Compute now support server move operations with qos ports
{'compute_rpc': '5.10'},
# Version 50: Compute RPC v5.11:
# Add accel_uuids (accelerator requests) param to build_and_run_instance
{'compute_rpc': '5.11'},
)




+ 99
- 57
nova/tests/unit/compute/test_compute_mgr.py View File

@@ -5958,6 +5958,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.requested_networks = []
self.security_groups = []
self.block_device_mapping = []
self.accel_uuids = None
self.filter_properties = {'retry': {'num_attempts': 1,
'hosts': [[self.compute.host,
'fake-node']]}}
@@ -6035,12 +6036,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'_build_networks_for_instance')
@mock.patch.object(virt_driver.ComputeDriver,
'prepare_networks_before_block_device_mapping')
def _test_accel_build_resources(self, mock_prep_net, mock_build_net,
mock_prep_spawn, mock_prep_bd, mock_bdnames, mock_save):
def _test_accel_build_resources(self, accel_uuids,
mock_prep_net, mock_build_net, mock_prep_spawn,
mock_prep_bd, mock_bdnames, mock_save):

args = (self.context, self.instance, self.requested_networks,
self.security_groups, self.image, self.block_device_mapping,
self.resource_provider_mapping)
self.resource_provider_mapping, accel_uuids)

resources = []
with self.compute._build_resources(*args) as resources:
@@ -6053,7 +6055,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_accel_build_resources_no_device_profile(self, mock_get_arqs):
# If dp_name is None, accel path is a no-op.
self.instance.flavor.extra_specs = {}
self._test_accel_build_resources()
self._test_accel_build_resources(None)
mock_get_arqs.assert_not_called()

@mock.patch.object(nova.compute.manager.ComputeManager,
@@ -6064,11 +6066,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_list = fixtures.CyborgFixture.bound_arq_list
mock_get_arqs.return_value = arq_list
arq_uuids = [arq['uuid'] for arq in arq_list]

resources = self._test_accel_build_resources()
resources = self._test_accel_build_resources(arq_uuids)

mock_get_arqs.assert_called_once_with(self.context,
dp_name, self.instance)
dp_name, self.instance, arq_uuids)
self.assertEqual(sorted(resources['accel_info']), sorted(arq_list))

@mock.patch.object(virt_driver.ComputeDriver,
@@ -6083,7 +6086,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
exception.AcceleratorRequestOpFailed(op='get', msg=''))

self.assertRaises(exception.NovaException,
self._test_accel_build_resources)
self._test_accel_build_resources, None)
mock_clean_net.assert_called_once()

@mock.patch.object(nova.compute.manager.ComputeVirtAPI,
@@ -6100,13 +6103,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_events = [('accelerator-request-bound', arq['uuid'])
for arq in arq_list]
arq_uuids = [arq['uuid'] for arq in arq_list]

# get_arqs_for_instance() is called twice, once to get all ARQs
# for the instance, once to get only the resolved ARQs
mock_get_arqs.return_value = arq_list

ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance)
self.context, dp_name, self.instance, arq_uuids)

mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
mock_exit_wait_early.assert_called_once_with(arq_events)

mock_get_arqs.assert_has_calls([
mock.call(self.instance.uuid, only_resolved=True)])

self.assertEqual(sorted(ret_arqs), sorted(arq_list))

@mock.patch.object(nova.compute.manager.ComputeVirtAPI,
'exit_wait_early')
@mock.patch.object(nova.compute.manager.ComputeVirtAPI,
'wait_for_instance_event')
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'get_arqs_for_instance')
def test_arq_bind_wait_exit_early_no_arq_uuids(self, mock_get_arqs,
mock_wait_inst_ev, mock_exit_wait_early):
# If no ARQ UUIDs are passed in, call Cyborg to get the ARQs.
# Then, if bound ARQs available on first query, quit early.
dp_name = fixtures.CyborgFixture.dp_name
arq_list = fixtures.CyborgFixture.bound_arq_list
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_events = [('accelerator-request-bound', arq['uuid'])
for arq in arq_list]

mock_get_arqs.side_effect = [arq_list, arq_list]

ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance, arq_uuids=None)

mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
@@ -6132,19 +6164,19 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_events = [('accelerator-request-bound', arq['uuid'])
for arq in arq_list]
# get_arqs_for_instance gets called 3 times, returning the full
# ARQ list first, [] as resolved ARQs next, and the full list finally
mock_get_arqs.side_effect = [arq_list, [], arq_list]
arq_uuids = [arq['uuid'] for arq in arq_list]
# get_arqs_for_instance gets called 2 times, returning the
# resolved ARQs first, and the full list finally
mock_get_arqs.side_effect = [[], arq_list]

ret_arqs = self.compute._get_bound_arq_resources(
self.context, dp_name, self.instance)
self.context, dp_name, self.instance, arq_uuids)

mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
mock_exit_wait_early.assert_not_called()
self.assertEqual(sorted(ret_arqs), sorted(arq_list))
mock_get_arqs.assert_has_calls([
mock.call(self.instance.uuid),
mock.call(self.instance.uuid, only_resolved=True),
mock.call(self.instance.uuid)])

@@ -6162,18 +6194,19 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_events = [('accelerator-request-bound', arq['uuid'])
for arq in arq_list]
arq_uuids = [arq['uuid'] for arq in arq_list]

mock_get_arqs.return_value = arq_list
mock_wait_inst_ev.side_effect = eventlet_timeout.Timeout

self.assertRaises(eventlet_timeout.Timeout,
self.compute._get_bound_arq_resources,
self.context, dp_name, self.instance)
self.context, dp_name, self.instance, arq_uuids)

mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
mock_exit_wait_early.assert_not_called()
mock_get_arqs.assert_called_once_with(self.instance.uuid)
mock_get_arqs.assert_not_called()

@mock.patch.object(nova.compute.manager.ComputeVirtAPI,
'exit_wait_early')
@@ -6190,21 +6223,20 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance.flavor.extra_specs = {"accel:device_profile": dp_name}
arq_events = [('accelerator-request-bound', arq['uuid'])
for arq in arq_list]
arq_uuids = [arq['uuid'] for arq in arq_list]

mock_get_arqs.side_effect = [
arq_list,
exception.AcceleratorRequestOpFailed(op='', msg='')]
mock_get_arqs.side_effect = (
exception.AcceleratorRequestOpFailed(op='', msg=''))

self.assertRaises(exception.AcceleratorRequestOpFailed,
self.compute._get_bound_arq_resources,
self.context, dp_name, self.instance)
self.context, dp_name, self.instance, arq_uuids)

mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
mock_exit_wait_early.assert_not_called()
mock_get_arqs.assert_has_calls([
mock.call(self.instance.uuid),
mock.call(self.instance.uuid, only_resolved=True)])
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)

@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.save')
@@ -6266,7 +6298,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_bdnames, mock_build_net, mock_save):
args = (self.context, self.instance, self.requested_networks,
self.security_groups, self.image, self.block_device_mapping,
self.resource_provider_mapping)
self.resource_provider_mapping, self.accel_uuids)
mock_get_arqs.side_effect = (
exception.AcceleratorRequestOpFailed(op='get', msg=''))

@@ -6331,7 +6363,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)

# This test when sending an icehouse compatible rpc call to juno compute
# node, NetworkRequest object can load from three items tuple.
@@ -6397,7 +6429,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_clean_vol.assert_called_once_with(self.context,
@@ -6448,7 +6480,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_nil.assert_called_once_with(self.instance)
mock_build.assert_called_once_with(self.context,
[self.instance], self.image, self.filter_properties,
@@ -6474,7 +6506,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
self.limits, self.filter_properties,
self.accel_uuids)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
@@ -6532,7 +6565,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_build_ins.assert_called_once_with(self.context,
[instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
@@ -6576,7 +6609,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_cleanup_network.assert_called_once_with(
self.context, instance, self.requested_networks)
mock_build_ins.assert_called_once_with(self.context,
@@ -6630,7 +6663,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_cleanup_network.assert_called_once_with(
self.context, instance, self.requested_networks)
mock_build_ins.assert_called_once_with(self.context,
@@ -6675,7 +6708,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_build_run.assert_called_once_with(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits, {}, {})
self.block_device_mapping, self.node, self.limits, {}, {},
self.accel_uuids)
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_clean_vol.assert_called_once_with(self.context,
@@ -6724,7 +6758,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_nil.assert_called_once_with(self.instance)
mock_build.assert_called_once_with(self.context,
[self.instance], self.image, self.filter_properties,
@@ -6767,7 +6801,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_clean.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_nil.assert_called_once_with(self.instance)
@@ -6827,7 +6861,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
self.filter_properties, {}, self.accel_uuids)
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)

@@ -6963,7 +6997,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
self.limits, self.filter_properties,
self.accel_uuids)

mock_save.assert_has_calls([
mock.call(),
@@ -7088,7 +7123,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
self.limits, self.filter_properties, self.accel_uuids)

_validate_instance_group_policy.assert_called_once_with(
self.context, self.instance, {})
@@ -7187,7 +7222,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping,
self.node, self.limits, self.filter_properties)
self.node, self.limits, self.filter_properties,
self.accel_uuids)

mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
@@ -7198,7 +7234,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_build.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups,
test.MatchType(objects.ImageMeta), self.block_device_mapping,
self.resource_provider_mapping)
self.resource_provider_mapping, self.accel_uuids)

@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@@ -7220,7 +7256,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
@@ -7339,7 +7375,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
pass
except Exception as e:
self.assertIsInstance(e,
@@ -7367,7 +7403,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups, self.image,
self.block_device_mapping, self.resource_provider_mapping):
self.block_device_mapping, self.resource_provider_mapping,
self.accel_uuids):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
@@ -7398,7 +7435,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
pass
except Exception as e:
self.assertIsInstance(e, exc)
@@ -7429,7 +7466,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
fake_spawn()
except Exception as e:
self.assertEqual(test_exception, e)
@@ -7463,7 +7500,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
raise test.TestingException()
except Exception as e:
self.assertEqual(expected_exc, e)
@@ -7494,7 +7531,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
raise test.TestingException()
except exception.BuildAbortException:
pass
@@ -7522,7 +7559,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
raise test.TestingException()
except exception.BuildAbortException:
pass
@@ -7554,7 +7591,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping,
self.resource_provider_mapping):
self.resource_provider_mapping, self.accel_uuids):
fake_spawn()

self.assertTrue(mock_log.warning.called)
@@ -7766,7 +7803,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
self.filter_properties, self.accel_uuids)
expected_call = mock.call(self.context, self.instance,
'create.end', extra_usage_info={'message': u'Success'},
network_info=[])
@@ -7798,7 +7835,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
self.filter_properties, self.accel_uuids)

updates = {'vm_state': u'active', 'access_ip_v6':
netaddr.IPAddress('2001:db8:0:1:dcad:beff:feef:1'),
@@ -7838,7 +7875,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
self.limits, self.filter_properties, self.accel_uuids)
expected_call = mock.call(self.context, self.instance,
'create.error', fault=exc)
create_error_call = mock_notify.call_args_list[
@@ -7862,7 +7899,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties, request_spec)
self.limits, self.filter_properties, request_spec,
self.accel_uuids)

mock_networks.assert_called_once_with(
self.context, self.instance, self.requested_networks,
@@ -7908,7 +7946,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties, request_spec)
self.limits, self.filter_properties, request_spec,
self.accel_uuids)

mock_networks.assert_called_once_with(
self.context, self.instance, self.requested_networks,
@@ -7946,7 +7985,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties, request_spec)
self.limits, self.filter_properties, request_spec,
self.accel_uuids)

def test_build_with_resource_request_sriov_rp_wrongly_formatted_name(self):
request_spec = objects.RequestSpec(
@@ -7970,7 +8010,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties, request_spec)
self.limits, self.filter_properties, request_spec,
self.accel_uuids)

def test_build_with_resource_request_more_than_one_providers(self):
request_spec = objects.RequestSpec(
@@ -7989,7 +8030,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties, request_spec)
self.limits, self.filter_properties, request_spec,
self.accel_uuids)


class ComputeManagerErrorsOutMigrationTestCase(test.NoDBTestCase):


+ 36
- 1
nova/tests/unit/compute/test_rpcapi.py View File

@@ -933,13 +933,48 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
version='5.0')

def test_build_and_run_instance(self):
# With rpcapi 5.11, when a list of accel_uuids is passed as a param,
# that list must be passed to the client. That is tested in
# _test_compute_api with rpc_mock.assert, where expected_kwargs
# must have the accel_uuids.
accel_uuids = ['938af7f9-f136-4e5a-bdbe-3b6feab54311']
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
host_list=None, version='5.0')
host_list=None, accel_uuids=accel_uuids, version='5.11')

def test_build_and_run_instance_old_rpcapi(self):
# With rpcapi < 5.11, accel_uuids must be dropped in the client call.
ctxt = context.RequestContext('fake_user', 'fake_project')
compute_api = compute_rpcapi.ComputeAPI()
compute_api.router.client = mock.Mock()
mock_client = mock.MagicMock()
compute_api.router.client.return_value = mock_client
# Force can_send_version to False, so that 5.0 version is used.
mock_client.can_send_version.return_value = False
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
compute_api.build_and_run_instance(
ctxt, instance=self.fake_instance_obj,
host='host', image='image',
request_spec=self.fake_request_spec_obj,
filter_properties={},
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'])

mock_client.can_send_version.assert_called_once_with('5.11')
mock_client.prepare.assert_called_with(
server='host', version='5.0')
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'build_and_run_instance',
instance=self.fake_instance_obj,
image='image', request_spec=self.fake_request_spec_obj,
filter_properties={}, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None)

def test_quiesce_instance(self):
self._test_compute_api('quiesce_instance', 'call',


+ 13
- 7
nova/tests/unit/conductor/test_conductor.py View File

@@ -487,6 +487,7 @@ class _BaseTaskTestCase(object):
self.useFixture(cast_as_call.CastAsCall(self))

mock_getaz.return_value = 'myaz'
mock_create_bind_arqs.return_value = mock.sentinel

self.conductor.build_instances(self.context,
instances=instances,
@@ -522,7 +523,8 @@ class _BaseTaskTestCase(object):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node1', limits=None, host_list=sched_return[0]),
node='node1', limits=None, host_list=sched_return[0],
accel_uuids=mock.sentinel),
mock.call(self.context, instance=mock.ANY, host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec=fake_spec,
@@ -532,7 +534,8 @@ class _BaseTaskTestCase(object):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=None, host_list=sched_return[1])])
node='node2', limits=None, host_list=sched_return[1],
accel_uuids=mock.sentinel)])
mock_create_bind_arqs.assert_has_calls([
mock.call(self.context, instances[0].uuid,
instances[0].flavor.extra_specs, 'node1', mock.ANY),
@@ -1055,7 +1058,8 @@ class _BaseTaskTestCase(object):
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node1', limits=None,
host_list=expected_build_run_host_list)
host_list=expected_build_run_host_list,
accel_uuids=[])
mock_pop_inst_map.assert_not_called()
mock_destroy_build_req.assert_not_called()

@@ -1125,7 +1129,8 @@ class _BaseTaskTestCase(object):
objects.BlockDeviceMappingList),
node='node1',
limits=None,
host_list=expected_build_run_host_list)
host_list=expected_build_run_host_list,
accel_uuids=[])

mock_rp_mapping.assert_called_once_with(
test.MatchType(objects.RequestSpec),
@@ -1207,7 +1212,8 @@ class _BaseTaskTestCase(object):
objects.BlockDeviceMappingList),
node='node2',
limits=None,
host_list=expected_build_run_host_list)
host_list=expected_build_run_host_list,
accel_uuids=[])

# called only once when the claim succeeded
mock_rp_mapping.assert_called_once_with(
@@ -3386,7 +3392,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
security_groups='security_groups',
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node2', limits=None, host_list=[])
node='node2', limits=None, host_list=[], accel_uuids=[])

@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@@ -3446,7 +3452,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=None, host_list=[])
node='node2', limits=None, host_list=[], accel_uuids=[])

@mock.patch('nova.compute.utils.notify_about_compute_task_error')
@mock.patch('nova.objects.Instance.save')


Loading…
Cancel
Save