From 1ced5905716300b9dbc308c8d3b68f38386fb2c0 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Tue, 14 Aug 2018 18:41:03 +0200 Subject: [PATCH] Consumer gen: remove_provider_from_instance_allocation This patch changes the scheduler report client remove_provider_from_instance_allocation function to use placement API microversion 1.28 that includes consumer generation handling. If placement returns 409 conflict remove_provider_from_instance_allocation returns False in the same way as in case of other non 204 results. There are plenty of places in nova calling remove_provider_from_instance_allocation: * dropping allocation on the destination of a live migrate if pre check fails * compute host startup deletes allocation of an instance that was successfully evacuated from the host while it was down * rebuild fails on the destination of an evacuation and therefore the allocation on the destination is deleted * pre resize fails therefore allocation on the destination is deleted This patch does not change the external behavior of remove_provider_from_instance_allocation as it still returns False in caes of failure. Therefore this patch does not introduce new functional test for the above cases but rely on the existing coverage. However based on the other patches in this series the error handling in these cases is suspected to be imperfect. Blueprint: use-nested-allocation-candidates Change-Id: I328a18a723d0f593eea491f788a6e256d6e0c127 --- nova/scheduler/client/report.py | 25 +++-- .../unit/scheduler/client/test_report.py | 95 ++++++++++++------- 2 files changed, 72 insertions(+), 48 deletions(-) diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index 8de590950659..859f46325a5e 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -1854,7 +1854,8 @@ class SchedulerReportClient(object): url = '/allocations/%s' % consumer_uuid # Grab the "doubled-up" allocation that we will manipulate - r = self.get(url, global_request_id=context.global_id) + r = self.get(url, global_request_id=context.global_id, + version=CONSUMER_GENERATION_VERSION) if r.status_code != 200: LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s", consumer_uuid, r.status_code) @@ -1865,6 +1866,8 @@ class SchedulerReportClient(object): LOG.error("Expected to find current allocations for %s, but " "found none.", consumer_uuid) return False + else: + current_consumer_generation = r.json()['consumer_generation'] # If the host isn't in the current allocation for the instance, don't # do anything @@ -1880,27 +1883,20 @@ class SchedulerReportClient(object): instance_uuid=consumer_uuid) LOG.debug('Instance %s has resources on %i compute nodes', consumer_uuid, len(compute_providers)) - - new_allocs = [ - { - 'resource_provider': { - 'uuid': alloc_rp_uuid, - }, + new_allocs = { + alloc_rp_uuid: { 'resources': alloc['resources'], } for alloc_rp_uuid, alloc in current_allocs.items() if alloc_rp_uuid != rp_uuid - ] + } if len(compute_providers) == 1: # NOTE(danms): We are in a resize to same host scenario. Since we # are the only provider then we need to merge back in the doubled # allocation with our part subtracted peer_alloc = { - 'resource_provider': { - 'uuid': rp_uuid, - }, - 'resources': current_allocs[rp_uuid]['resources'] + 'resources': current_allocs[rp_uuid]['resources'], } LOG.debug('Original resources from same-host ' 'allocation: %s', peer_alloc['resources']) @@ -1908,15 +1904,16 @@ class SchedulerReportClient(object): resources, -1) LOG.debug('Subtracting old resources from same-host ' 'allocation: %s', peer_alloc['resources']) - new_allocs.append(peer_alloc) + new_allocs[rp_uuid] = peer_alloc payload = {'allocations': new_allocs} payload['project_id'] = project_id payload['user_id'] = user_id + payload['consumer_generation'] = current_consumer_generation LOG.debug("Sending updated allocation %s for instance %s after " "removing resources for %s.", new_allocs, consumer_uuid, rp_uuid) - r = self.put(url, payload, version='1.10', + r = self.put(url, payload, version=CONSUMER_GENERATION_VERSION, global_request_id=context.global_id) if r.status_code != 204: LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s", diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index db84a121d1a9..afa2365b9d6b 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -902,6 +902,9 @@ class TestPutAllocations(SchedulerReportClientTestCase): }, }, }, + 'consumer_generation': 1, + 'project_id': uuids.project_id, + 'user_id': uuids.user_id, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) @@ -916,31 +919,24 @@ class TestPutAllocations(SchedulerReportClientTestCase): expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... expected_payload = { - 'allocations': [ - { - 'resource_provider': { - 'uuid': uuids.destination, - }, + 'allocations': { + uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, - ], + }, + 'consumer_generation': 1, + 'project_id': project_id, + 'user_id': user_id } - expected_payload['project_id'] = project_id - expected_payload['user_id'] = user_id # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] - sort_by_uuid = lambda x: x['resource_provider']['uuid'] - expected_allocations = sorted(expected_payload['allocations'], - key=sort_by_uuid) - actual_allocations = sorted(actual_payload['allocations'], - key=sort_by_uuid) - self.assertEqual(expected_allocations, actual_allocations) + self.assertEqual(expected_payload, actual_payload) self.ks_adap_mock.put.assert_called_once_with( - expected_url, microversion='1.10', json=mock.ANY, + expected_url, microversion='1.28', json=mock.ANY, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) @@ -976,6 +972,9 @@ class TestPutAllocations(SchedulerReportClientTestCase): }, }, }, + 'consumer_generation': 1, + 'project_id': uuids.project_id, + 'user_id': uuids.user_id, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) @@ -990,39 +989,29 @@ class TestPutAllocations(SchedulerReportClientTestCase): expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... expected_payload = { - 'allocations': [ - { - 'resource_provider': { - 'uuid': uuids.shared_storage, - }, + 'allocations': { + uuids.shared_storage: { 'resources': { 'DISK_GB': 100, }, }, - { - 'resource_provider': { - 'uuid': uuids.destination, - }, + uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, - ], + }, + 'consumer_generation': 1, + 'project_id': project_id, + 'user_id': user_id } - expected_payload['project_id'] = project_id - expected_payload['user_id'] = user_id # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] - sort_by_uuid = lambda x: x['resource_provider']['uuid'] - expected_allocations = sorted(expected_payload['allocations'], - key=sort_by_uuid) - actual_allocations = sorted(actual_payload['allocations'], - key=sort_by_uuid) - self.assertEqual(expected_allocations, actual_allocations) + self.assertEqual(expected_payload, actual_payload) self.ks_adap_mock.put.assert_called_once_with( - expected_url, microversion='1.10', json=mock.ANY, + expected_url, microversion='1.28', json=mock.ANY, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) @@ -1051,6 +1040,9 @@ class TestPutAllocations(SchedulerReportClientTestCase): }, }, }, + 'consumer_generation': 1, + 'project_id': uuids.project_id, + 'user_id': uuids.user_id, } self.ks_adap_mock.get.return_value = get_resp_mock consumer_uuid = uuids.consumer_uuid @@ -1084,6 +1076,41 @@ class TestPutAllocations(SchedulerReportClientTestCase): self.assertFalse(res) + def test_remove_provider_from_inst_alloc_consumer_gen_conflict(self): + get_resp_mock = mock.Mock(status_code=200) + get_resp_mock.json.return_value = { + 'allocations': { + uuids.source: { + 'resource_provider_generation': 42, + 'resources': { + 'VCPU': 1, + 'MEMORY_MB': 1024, + }, + }, + uuids.destination: { + 'resource_provider_generation': 42, + 'resources': { + 'VCPU': 1, + 'MEMORY_MB': 1024, + }, + }, + }, + 'consumer_generation': 1, + 'project_id': uuids.project_id, + 'user_id': uuids.user_id, + } + self.ks_adap_mock.get.return_value = get_resp_mock + resp_mock = mock.Mock(status_code=409) + self.ks_adap_mock.put.return_value = resp_mock + consumer_uuid = uuids.consumer_uuid + project_id = uuids.project_id + user_id = uuids.user_id + res = self.client.remove_provider_from_instance_allocation( + self.context, consumer_uuid, uuids.source, user_id, project_id, + mock.Mock()) + + self.assertFalse(res) + class TestSetAndClearAllocations(SchedulerReportClientTestCase):