From 1a89c1d73b8230e437ed0e15656a2606a94926f8 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Fri, 22 Feb 2019 11:34:31 +0100 Subject: [PATCH] Use Selection object to fill request group mapping To fill the request group - resource provider mapping nova needs to look at the allocation that is made in placement as well as the traits of the used providers. So far these were explicit placement calls. However it turns out that the Selection object already contains the allocation data so one of the extra placement calls can be removed. This patch replaces the GET /allocations/ placement call from conductor build instance codepath with the usage of the Selection.allocation_request json blob. Closes-Bug: #1819430 Change-Id: Iecbee518444bd282ce5f6fd019db41a322f76a83 --- nova/conductor/manager.py | 42 +++++++------ nova/tests/unit/conductor/test_conductor.py | 69 +++++++-------------- 2 files changed, 47 insertions(+), 64 deletions(-) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 4079c3f16367..854e147ab5ce 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -677,7 +677,7 @@ class ComputeTaskManager(base.Base): # the allocation of the instance to another host # TODO(gibi): handle if the below call raises self._fill_provider_mapping( - context, instance.uuid, request_spec) + context, instance.uuid, request_spec, host) else: # Some deployments use different schedulers that do not # use Placement, so they will not have an @@ -1264,7 +1264,8 @@ class ComputeTaskManager(base.Base): with obj_target_cell(inst, cell0): inst.destroy() - def _fill_provider_mapping(self, context, instance_uuid, request_spec): + def _fill_provider_mapping( + self, context, instance_uuid, request_spec, host_selection): """Fills out the request group - resource provider mapping in the request spec. @@ -1276,26 +1277,26 @@ class ComputeTaskManager(base.Base): replaced with a simpler code that copies the group - RP mapping out from the Selection object returned by the scheduler's select_destinations call. + + :param context: The security context + :param instance_uuid: The UUID of the instance for which the provider + mapping is filled + :param request_spec: The RequestSpec object associated with the + operation + :param host_selection: The Selection object returned by the scheduler + for this operation """ # Exit early if this request spec does not require mappings. if not request_spec.maps_requested_resources: return - # TODO(mriedem): Could we use the Selection.allocation_request here - # to get the resource providers rather than making an API call to - # placement per instance being scheduled? Granted that is a - # PUT /allocations/{consumer_id} *request* payload rather than a - # *response* but at least currently they are in the same format and - # could make this faster. - allocs = self.report_client.get_allocs_for_consumer( - context, instance_uuid)['allocations'] - if not allocs: - # Technically out-of-tree scheduler drivers can still not create - # allocations in placement so move on if there are no allocations - # for the instance. - LOG.debug('No allocations found for instance after scheduling. ' - 'Assuming the scheduler driver is not using Placement.', - instance_uuid=instance_uuid) - return + + # Technically out-of-tree scheduler drivers can still not create + # allocations in placement but if request_spec.maps_requested_resources + # is not empty and the scheduling succeeded then placement has to be + # involved + ar = jsonutils.loads(host_selection.allocation_request) + allocs = ar['allocations'] + # TODO(mriedem): Short-term we can optimize this by passing a cache by # reference of the RP->traits mapping because if we are processing # a multi-create request we could have the same RPs being used for @@ -1308,6 +1309,9 @@ class ComputeTaskManager(base.Base): rp_uuid: self.report_client.get_provider_traits( context, rp_uuid).traits for rp_uuid in allocs} + # NOTE(gibi): The allocs dict is in the format of the PUT /allocations + # and that format can change. The current format can be detected from + # host_selection.allocation_request_version request_spec.map_requested_resources_to_providers( allocs, provider_traits) @@ -1431,7 +1435,7 @@ class ComputeTaskManager(base.Base): # map allocations to resource providers in the request spec. try: self._fill_provider_mapping( - context, instance.uuid, request_spec) + context, instance.uuid, request_spec, host) except Exception as exc: # If anything failed here we need to cleanup and bail out. with excutils.save_and_reraise_exception(): diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py index 98629d47bc81..b3d786ec7ac2 100644 --- a/nova/tests/unit/conductor/test_conductor.py +++ b/nova/tests/unit/conductor/test_conductor.py @@ -66,28 +66,28 @@ from nova.volume import cinder CONF = conf.CONF -fake_alloc1 = {"allocations": [ - {"resource_provider": {"uuid": uuids.host1}, +fake_alloc1 = {"allocations": { + uuids.host1: { "resources": {"VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100} - }]} -fake_alloc2 = {"allocations": [ - {"resource_provider": {"uuid": uuids.host2}, + }}} +fake_alloc2 = {"allocations": { + uuids.host2: { "resources": {"VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100} - }]} -fake_alloc3 = {"allocations": [ - {"resource_provider": {"uuid": uuids.host3}, + }}} +fake_alloc3 = {"allocations": { + uuids.host3: { "resources": {"VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100} - }]} + }}} fake_alloc_json1 = jsonutils.dumps(fake_alloc1) fake_alloc_json2 = jsonutils.dumps(fake_alloc2) fake_alloc_json3 = jsonutils.dumps(fake_alloc3) -fake_alloc_version = "1.23" +fake_alloc_version = "1.28" fake_selection1 = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json1, allocation_request_version=fake_alloc_version) @@ -992,7 +992,7 @@ class _BaseTaskTestCase(object): security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False, - host_lists=fake_host_lists1, + host_lists=copy.deepcopy(fake_host_lists1), request_spec=request_spec) expected_build_run_host_list = copy.copy(fake_host_lists1[0]) @@ -1018,7 +1018,8 @@ class _BaseTaskTestCase(object): host_list=expected_build_run_host_list) mock_rp_mapping.assert_called_once_with( - self.context, instance.uuid, mock.ANY) + self.context, instance.uuid, mock.ANY, + test.MatchType(objects.Selection)) actual_request_spec = mock_rp_mapping.mock_calls[0][1][2] self.assertEqual( rg1.resources, @@ -1655,11 +1656,12 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): @mock.patch('nova.availability_zones.get_host_availability_zone') @mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') @mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') - def _do_schedule_and_build_instances_test(self, params, - select_destinations, - build_and_run_instance, - get_az): - select_destinations.return_value = [[fake_selection1]] + def _do_schedule_and_build_instances_test( + self, params, select_destinations, build_and_run_instance, + get_az, host_list=None): + if not host_list: + host_list = copy.deepcopy(fake_host_lists1) + select_destinations.return_value = host_list get_az.return_value = 'myaz' details = {} @@ -2132,27 +2134,21 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get_provider_traits') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocs_for_consumer') @mock.patch('nova.objects.request_spec.RequestSpec.' 'map_requested_resources_to_providers') def test_schedule_and_build_instances_fill_request_spec( - self, mock_map, mock_get_allocs, mock_traits): + self, mock_map, mock_traits): # makes sure there is some request group in the spec to be mapped self.params['request_specs'][0].requested_resources = [ objects.RequestGroup()] - mock_get_allocs.return_value = { - 'allocations': {uuids.rp1: mock.sentinel.rp1_allocs}} mock_traits.return_value.traits = ['TRAIT1'] - instance_uuid = self._do_schedule_and_build_instances_test( - self.params) + self._do_schedule_and_build_instances_test(self.params) - mock_map.assert_called_once_with({uuids.rp1: mock.sentinel.rp1_allocs}, - {uuids.rp1: ['TRAIT1']}) - mock_get_allocs.assert_called_once_with(mock.ANY, instance_uuid) - mock_traits.assert_called_once_with(mock.ANY, uuids.rp1) + mock_map.assert_called_once_with(fake_alloc1['allocations'], + {uuids.host1: ['TRAIT1']}) + mock_traits.assert_called_once_with(mock.ANY, uuids.host1) @mock.patch('nova.conductor.manager.ComputeTaskManager.' '_cleanup_build_artifacts') @@ -2183,23 +2179,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): self.params['request_specs'][0].requested_resources = [] self._do_schedule_and_build_instances_test(self.params) - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_provider_traits', new_callable=mock.NonCallableMock) - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocs_for_consumer', return_value={'allocations': {}}) - @mock.patch('nova.objects.request_spec.RequestSpec.' - 'map_requested_resources_to_providers', - new_callable=mock.NonCallableMock) - def test_schedule_and_build_instances_fill_request_spec_no_allocs( - self, mock_map, mock_get_allocs, mock_traits): - """Tests to make sure _fill_provider_mapping handles a scheduler - driver which does not use placement (so there are no allocations). - """ - self.params['request_specs'][0].requested_resources = [ - objects.RequestGroup()] - self._do_schedule_and_build_instances_test(self.params) - mock_get_allocs.assert_called_once() - @mock.patch('nova.objects.CellMapping.get_by_uuid') def test_bury_in_cell0_no_cell0(self, mock_cm_get): mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0')