Allow evacuating server with port resource request

This patch adds support for evacuating a server with qos ports. To do
that this patch:
* collects the port resource requests from neutron before the scheduler
  is called to select the target of the evacuation.
* calculate the request group - provider mapping after the scheduler
  selected the target host
* update the InstancePCIRequest to drive the pci_claim to allocate VFs
  from the same PF as the bandwidth is allocated from by the scheduler
* update the binding profile of the qos ports to so that the allocation
  key of the binding profile points to the RPs the port is allocated
  from.

Note that evacuate does not have reschedule loop so we don't need any
extra logic for that.

The rebuild_instance RPC passes request spec to the compute since Queens
so no RPC or service version change was needed. Therefore no upgrade
related checks were introduced.

Change-Id: Id9ed7a82d42be8ffe760f03e6610b9e6f5a4287b
blueprint: support-move-ops-with-qos-ports-ussuri
This commit is contained in:
Balazs Gibizer 2019-10-14 13:08:24 +02:00 committed by Matt Riedemann
parent 05c60fe70e
commit ab509a0043
9 changed files with 431 additions and 24 deletions

View File

@ -3396,6 +3396,16 @@ class ComputeManager(manager.Manager):
allocations, rebuild_claim, scheduled_node, limits):
"""Helper to avoid deep nesting in the top-level method."""
request_group_resource_providers_mapping = None
if evacuate:
request_group_resource_providers_mapping = \
self._get_request_group_mapping(request_spec)
if request_group_resource_providers_mapping:
self._update_pci_request_spec_with_allocated_interface_name(
context, instance,
request_group_resource_providers_mapping)
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
limits=limits, image_meta=image_meta, migration=migration)
@ -3404,7 +3414,8 @@ class ComputeManager(manager.Manager):
self._do_rebuild_instance(
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations)
preserve_ephemeral, migration, request_spec, allocations,
request_group_resource_providers_mapping)
@staticmethod
def _get_image_name(image_meta):
@ -3417,7 +3428,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass,
orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral,
migration, request_spec, allocations):
migration, request_spec, allocations,
request_group_resource_providers_mapping):
orig_vm_state = instance.vm_state
if evacuate:
@ -3504,7 +3516,8 @@ class ComputeManager(manager.Manager):
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host, migration)
context, instance, self.host, migration,
provider_mappings=request_group_resource_providers_mapping)
# TODO(mriedem): Consider decorating setup_instance_network_on_host
# with @base_api.refresh_cache and then we wouldn't need this
# explicit call to get_instance_nw_info.

View File

@ -1077,9 +1077,16 @@ class ComputeTaskManager(base.Base):
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(gibi): We need to make sure that the
# requested_resources field is re calculated based on
# neutron ports.
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
try:
# if this is a rebuild of instance on the same host with
# new image.
@ -1091,6 +1098,7 @@ class ComputeTaskManager(base.Base):
request_spec.ensure_network_metadata(instance)
compute_utils.heal_reqspec_is_bfv(
context, request_spec, instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
@ -1098,9 +1106,19 @@ class ComputeTaskManager(base.Base):
selection = host_list[0]
host, node, limits = (selection.service_host,
selection.nodename, selection.limits)
if recreate:
scheduler_utils.fill_provider_mapping(
context, self.report_client, request_spec,
selection)
except (exception.NoValidHost,
exception.UnsupportedPolicyException,
exception.AllocationUpdateFailed) as ex:
exception.AllocationUpdateFailed,
# the next two can come from fill_provider_mapping and
# signals a software error.
NotImplementedError,
ValueError) as ex:
if migration:
migration.status = 'error'
migration.save()

View File

@ -522,8 +522,9 @@ class API(base_api.NetworkAPI):
self.network_rpcapi.migrate_instance_finish(context, **args)
def setup_instance_network_on_host(self, context, instance, host,
migration=None):
def setup_instance_network_on_host(
self, context, instance, host, migration=None,
provider_mappings=None):
"""Setup network for specified instance on host."""
self.migrate_instance_finish(
context, instance, {'source_compute': None, 'dest_compute': host},

View File

@ -355,8 +355,9 @@ class NetworkAPI(base.Base):
"""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host,
migration=None):
def setup_instance_network_on_host(
self, context, instance, host, migration=None,
provider_mappings=None):
"""Setup network for specified instance on host.
:param context: The request context.
@ -364,6 +365,8 @@ class NetworkAPI(base.Base):
:param host: The host which network should be setup for instance.
:param migration: The migration object if the instance is being
tracked with a migration.
:param provider_mappings: a dict of lists of resource provider uuids
keyed by port uuid
"""
raise NotImplementedError()

View File

@ -3272,11 +3272,12 @@ class API(base_api.NetworkAPI):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host,
migration=None):
def setup_instance_network_on_host(
self, context, instance, host, migration=None,
provider_mappings=None):
"""Setup network for specified instance on host."""
self._update_port_binding_for_instance(context, instance, host,
migration)
self._update_port_binding_for_instance(
context, instance, host, migration, provider_mappings)
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""

View File

@ -5592,6 +5592,8 @@ class PortResourceRequestBasedSchedulingTestBase(
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
self.compute1 = self._start_compute('host1')
self.compute1_rp_uuid = self._get_provider_uuid_by_host('host1')
self.compute1_service_id = self.admin_api.get_services(
host='host1', binary='nova-compute')[0]['id']
self.ovs_bridge_rp_per_host = {}
self.sriov_dev_rp_per_host = {}
self.flavor = self.api.get_flavors()[0]
@ -6992,6 +6994,308 @@ class ServerMoveWithPortResourceRequestTest(
# the port that doesn't have resource request
self.assertNotIn('binding:profile', updated_non_qos_port)
def _turn_off_api_check(self):
# The API actively rejecting the move operations with resource
# request so we have to turn off that check.
# TODO(gibi): Remove this when the move operations are supported and
# the API check is removed.
patcher = mock.patch(
'nova.api.openstack.common.'
'supports_port_resource_request_during_move',
return_value=True)
self.addCleanup(patcher.stop)
patcher.start()
def _check_allocation_during_evacuate(
self, server, flavor, source_compute_rp_uuid, dest_compute_rp_uuid,
non_qos_port, qos_port, qos_sriov_port):
# evacuate is the only case when the same consumer has allocation from
# two different RP trees so we need special checks
updated_non_qos_port = self.neutron.show_port(
non_qos_port['id'])['port']
updated_qos_port = self.neutron.show_port(qos_port['id'])['port']
updated_qos_sriov_port = self.neutron.show_port(
qos_sriov_port['id'])['port']
allocations = self.placement_api.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect two sets of allocations. One set for the source compute
# and one set for the dest compute. Each set we expect 3 allocations
# one for the compute RP according to the flavor, one for the ovs port
# and one for the SRIOV port.
self.assertEqual(6, len(allocations), allocations)
# 1. source compute allocation
compute_allocations = allocations[source_compute_rp_uuid]['resources']
self.assertEqual(
self._resources_from_flavor(flavor),
compute_allocations)
# 2. source ovs allocation
ovs_allocations = allocations[
self.ovs_bridge_rp_per_host[source_compute_rp_uuid]]['resources']
self.assertPortMatchesAllocation(qos_port, ovs_allocations)
# 3. source sriov allocation
sriov_allocations = allocations[
self.sriov_dev_rp_per_host[
source_compute_rp_uuid][self.PF2]]['resources']
self.assertPortMatchesAllocation(qos_sriov_port, sriov_allocations)
# 4. dest compute allocation
compute_allocations = allocations[dest_compute_rp_uuid]['resources']
self.assertEqual(
self._resources_from_flavor(flavor),
compute_allocations)
# 5. dest ovs allocation
ovs_allocations = allocations[
self.ovs_bridge_rp_per_host[dest_compute_rp_uuid]]['resources']
self.assertPortMatchesAllocation(qos_port, ovs_allocations)
# 6. dest SRIOV allocation
sriov_allocations = allocations[
self.sriov_dev_rp_per_host[
dest_compute_rp_uuid][self.PF2]]['resources']
self.assertPortMatchesAllocation(qos_sriov_port, sriov_allocations)
# the qos ports should have their binding pointing to the RPs in the
# dest compute RP tree
qos_binding_profile = updated_qos_port['binding:profile']
self.assertEqual(
self.ovs_bridge_rp_per_host[dest_compute_rp_uuid],
qos_binding_profile['allocation'])
qos_sriov_binding_profile = updated_qos_sriov_port['binding:profile']
self.assertEqual(
self.sriov_dev_rp_per_host[dest_compute_rp_uuid][self.PF2],
qos_sriov_binding_profile['allocation'])
# And we expect not to have any allocation set in the port binding for
# the port that doesn't have resource request
self.assertNotIn('binding:profile', updated_non_qos_port)
def _check_allocation_after_evacuation_source_recovered(
self, server, flavor, dest_compute_rp_uuid, non_qos_port,
qos_port, qos_sriov_port):
# check that source allocation is cleaned up and the dest allocation
# and the port bindings are not touched.
updated_non_qos_port = self.neutron.show_port(
non_qos_port['id'])['port']
updated_qos_port = self.neutron.show_port(qos_port['id'])['port']
updated_qos_sriov_port = self.neutron.show_port(
qos_sriov_port['id'])['port']
allocations = self.placement_api.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(3, len(allocations), allocations)
# 1. dest compute allocation
compute_allocations = allocations[dest_compute_rp_uuid]['resources']
self.assertEqual(
self._resources_from_flavor(flavor),
compute_allocations)
# 2. dest ovs allocation
ovs_allocations = allocations[
self.ovs_bridge_rp_per_host[dest_compute_rp_uuid]]['resources']
self.assertPortMatchesAllocation(qos_port, ovs_allocations)
# 3. dest SRIOV allocation
sriov_allocations = allocations[
self.sriov_dev_rp_per_host[
dest_compute_rp_uuid][self.PF2]]['resources']
self.assertPortMatchesAllocation(qos_sriov_port, sriov_allocations)
# the qos ports should have their binding pointing to the RPs in the
# dest compute RP tree
qos_binding_profile = updated_qos_port['binding:profile']
self.assertEqual(
self.ovs_bridge_rp_per_host[dest_compute_rp_uuid],
qos_binding_profile['allocation'])
qos_sriov_binding_profile = updated_qos_sriov_port['binding:profile']
self.assertEqual(
self.sriov_dev_rp_per_host[dest_compute_rp_uuid][self.PF2],
qos_sriov_binding_profile['allocation'])
# And we expect not to have any allocation set in the port binding for
# the port that doesn't have resource request
self.assertNotIn('binding:profile', updated_non_qos_port)
def test_evacuate_with_qos_port(self, host=None):
# TODO(gibi): remove this when evacuate is fully supported and
# therefore the check is removed from the api
self._turn_off_api_check()
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
qos_sriov_port = self.neutron.port_with_sriov_resource_request
server = self._create_server_with_ports(
non_qos_normal_port, qos_normal_port, qos_sriov_port)
# check that the server allocates from the current host properly
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# force source compute down
self.compute1.stop()
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'true'})
req = {'evacuate': {}}
if host:
req['evacuate']['host'] = host
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
self.api, server,
{'OS-EXT-SRV-ATTR:host': 'host2',
'status': 'ACTIVE'})
self._check_allocation_during_evacuate(
server, self.flavor_with_group_policy, self.compute1_rp_uuid,
self.compute2_rp_uuid, non_qos_normal_port, qos_normal_port,
qos_sriov_port)
# recover source compute
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'false'})
self.compute1 = self.restart_compute_service(self.compute1)
# check that source allocation is cleaned up and the dest allocation
# and the port bindings are not touched.
self._check_allocation_after_evacuation_source_recovered(
server, self.flavor_with_group_policy, self.compute2_rp_uuid,
non_qos_normal_port, qos_normal_port, qos_sriov_port)
self._delete_server_and_check_allocations(
qos_normal_port, qos_sriov_port, server)
def test_evacuate_with_target_host_with_qos_port(self):
self.test_evacuate_with_qos_port(host='host2')
def test_evacuate_with_qos_port_fails_recover_source_compute(self):
# TODO(gibi): remove this when evacuate is fully supported and
# therefore the check is removed from the api
self._turn_off_api_check()
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
qos_sriov_port = self.neutron.port_with_sriov_resource_request
server = self._create_server_with_ports(
non_qos_normal_port, qos_normal_port, qos_sriov_port)
# check that the server allocates from the current host properly
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# force source compute down
self.compute1.stop()
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'true'})
with mock.patch(
'nova.compute.resource_tracker.ResourceTracker.rebuild_claim',
side_effect=exception.ComputeResourcesUnavailable(
reason='test evacuate failure')):
req = {'evacuate': {}}
self.api.post_server_action(server['id'], req)
# Evacuate does not have reschedule loop so evacuate expected to
# simply fail and the server remains on the source host
server = self._wait_for_server_parameter(
self.api, server,
{'OS-EXT-SRV-ATTR:host': 'host1',
'status': 'ERROR'})
self._wait_for_migration_status(server, ['failed'])
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# recover source compute
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'false'})
self.compute1 = self.restart_compute_service(self.compute1)
# check again that even after source host recovery the source
# allocation is intact
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
self._delete_server_and_check_allocations(
qos_normal_port, qos_sriov_port, server)
def test_evacuate_with_qos_port_pci_update_fail(self):
# TODO(gibi): remove this when evacuate is fully supported and
# therefore the check is removed from the api
self._turn_off_api_check()
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
# _update_pci_request_spec_with_allocated_interface_name() to raise
# when the instance is evacuated to the host2.
rsp = self.placement_api.put(
'/resource_providers/%s'
% self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2],
{"name": "invalid-device-rp-name"})
self.assertEqual(200, rsp.status)
non_qos_port = self.neutron.port_1
qos_port = self.neutron.port_with_resource_request
qos_sriov_port = self.neutron.port_with_sriov_resource_request
server = self._create_server_with_ports(
non_qos_port, qos_port, qos_sriov_port)
# check that the server allocates from the current host properly
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_port, qos_port,
qos_sriov_port, self.flavor_with_group_policy)
# force source compute down
self.compute1.stop()
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
# _update_pci_request_spec_with_allocated_interface_name
self.api.post_server_action(server['id'], {'evacuate': {}})
server = self._wait_for_server_parameter(
self.api, server,
{'OS-EXT-SRV-ATTR:host': 'host1',
'OS-EXT-STS:task_state': None,
'status': 'ERROR'})
self._wait_for_migration_status(server, ['failed'])
self.assertIn(
'Build of instance %s aborted' % server['id'],
server['fault']['message'])
self._wait_for_action_fail_completion(
server, instance_actions.EVACUATE, 'compute_rebuild_instance',
self.admin_api)
fake_notifier.wait_for_versioned_notifications(
'instance.rebuild.error')
fake_notifier.wait_for_versioned_notifications(
'compute.exception')
# and the instance allocates from the source host
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_port, qos_port,
qos_sriov_port, self.flavor_with_group_policy)
class PortResourceRequestReSchedulingTest(
PortResourceRequestBasedSchedulingTestBase):

View File

@ -12790,6 +12790,12 @@ class EvacuateHostTestCase(BaseTestCase):
node = NODENAME
limits = {}
@mock.patch(
'nova.compute.manager.ComputeManager._get_request_group_mapping',
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.manager.ComputeManager'
'._update_pci_request_spec_with_allocated_interface_name')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@ -12797,7 +12803,8 @@ class EvacuateHostTestCase(BaseTestCase):
@mock.patch('nova.context.RequestContext.elevated', return_value=ctxt)
def _test_rebuild(mock_context, mock_setup_instance_network_on_host,
mock_setup_networks_on_host, mock_notify_rebuild,
mock_notify_action, vm_is_stopped=False):
mock_notify_action, mock_update_pci_req,
mock_get_mapping, vm_is_stopped=False):
orig_image_ref = None
image_ref = None
injected_files = None
@ -12830,9 +12837,13 @@ class EvacuateHostTestCase(BaseTestCase):
mock_setup_networks_on_host.assert_called_once_with(
ctxt, self.inst, self.inst.host)
mock_setup_instance_network_on_host.assert_called_once_with(
ctxt, self.inst, self.inst.host, migration)
ctxt, self.inst, self.inst.host, migration,
provider_mappings=mock.sentinel.mapping)
self.mock_get_allocs.assert_called_once_with(ctxt, self.inst.uuid)
mock_update_pci_req.assert_called_once_with(
ctxt, self.inst, mock.sentinel.mapping)
_test_rebuild(vm_is_stopped=vm_states_is_stopped)
def test_rebuild_on_host_updated_target(self):

View File

@ -5207,7 +5207,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
orig_sys_metadata, bdms,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations)
self.allocations,
mock.sentinel.mapping)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@ -5220,8 +5221,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertTrue(mock_notify_exists.called)
mock_setup.assert_called_once_with(self.context, instance,
mock.ANY)
mock_setup_inst.assert_called_once_with(self.context, instance,
mock.ANY, mock.ANY)
mock_setup_inst.assert_called_once_with(
self.context, instance, mock.ANY, mock.ANY,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
def test_rebuild_default_impl(self):
@ -5304,7 +5306,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
evacuate=False, on_shared_storage=None,
preserve_ephemeral=False, migration=objects.Migration(),
request_spec=objects.RequestSpec(),
allocations=self.allocations)
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping)
self.assertIn('Trusted image certificates provided on host',
six.text_type(ex))

View File

@ -1501,8 +1501,14 @@ class _BaseTaskTestCase(object):
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
'select_destinations'),
mock.patch('nova.scheduler.utils.fill_provider_mapping',
new_callable=mock.NonCallableMock),
mock.patch('nova.network.neutronv2.api.API.'
'get_requested_resource_for_instance',
new_callable=mock.NonCallableMock)
) as (rebuild_mock, select_dest_mock, fill_provider_mock,
get_resources_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
@ -1632,6 +1638,41 @@ class _BaseTaskTestCase(object):
migration = objects.Migration.get_by_id(self.context, migration.id)
self.assertEqual('error', migration.status)
def test_rebuild_instance_fill_provider_mapping_raises(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args, _ = self._prepare_rebuild_args(
{'host': None, 'recreate': True})
fake_spec = objects.RequestSpec()
rebuild_args['request_spec'] = fake_spec
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations'),
mock.patch.object(scheduler_utils, 'set_vm_state_and_notify'),
mock.patch.object(scheduler_utils, 'fill_provider_mapping',
side_effect=ValueError(
'No valid group - RP mapping is found'))
) as (rebuild_mock, sig_mock,
select_dest_mock, set_vm_state_and_notify_mock,
fill_mapping_mock):
self.assertRaises(ValueError,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
[inst_obj.uuid], return_objects=True,
return_alternates=False)
set_vm_state_and_notify_mock.assert_called_once_with(
self.context, inst_obj.uuid, 'compute_task', 'rebuild_server',
{'vm_state': vm_states.ERROR, 'task_state': None},
test.MatchType(ValueError), fake_spec)
self.assertFalse(rebuild_mock.called)
def test_rebuild_instance_evacuate_migration_record(self):
inst_obj = self._create_fake_instance_obj()
migration = objects.Migration(context=self.context,
@ -1681,7 +1722,13 @@ class _BaseTaskTestCase(object):
'select_destinations',
return_value=[[fake_selection]]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd):
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
mock.patch(
'nova.network.neutronv2.api.API.'
'get_requested_resource_for_instance',
return_value=[])
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd,
fill_rp_mapping_mock, get_req_res_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
@ -1696,6 +1743,12 @@ class _BaseTaskTestCase(object):
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
get_req_res_mock.assert_called_once_with(
self.context, inst_obj.uuid)
fill_rp_mapping_mock.assert_called_once_with(
self.context, self.conductor_manager.report_client,
fake_spec, fake_selection)
self.assertEqual('compute.instance.rebuild.scheduled',
fake_notifier.NOTIFICATIONS[0].event_type)
mock_notify.assert_called_once_with(