From a45daafd3111a6804eb5adafe4860db122cb09b8 Mon Sep 17 00:00:00 2001 From: songwenping Date: Sat, 11 Apr 2020 18:28:01 +0800 Subject: [PATCH] Delete resource provider in tree by top-down traversable order When delete nova service by "nova service-delete ", this will delete the compute node's resource provider in Placement. But if there are child resource provider, Placement will throw exception(CannotDeleteParentResourceProvider) and don't delete the resource provider. When we add the host service again, Placement cannot insert new compute node resource provider, and we cannot use the compute node any more. So we should delete sub resource provider in tree when delete resource provider. Modify unit test. Change-Id: Ide8732be6c047ad1b141b89df676783b2fa2f25a Closes-Bug: #1872385 --- nova/scheduler/client/report.py | 22 +++-- .../api_sample_tests/test_services.py | 19 ++++ .../unit/scheduler/client/test_report.py | 99 +++++++++++++++++-- 3 files changed, 124 insertions(+), 16 deletions(-) diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index 350825af146e..8e301fffbb89 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -2153,13 +2153,21 @@ class SchedulerReportClient(object): context, host, nodename) for instance_uuid in instance_uuids: self.delete_allocation_for_instance(context, instance_uuid) - try: - self._delete_provider(rp_uuid, global_request_id=context.global_id) - except (exception.ResourceProviderInUse, - exception.ResourceProviderDeletionFailed): - # TODO(efried): Raise these. Right now this is being left a no-op - # for backward compatibility. - pass + # Ensure to delete resource provider in tree by top-down + # traversable order. + rps_to_refresh = self.get_providers_in_tree(context, rp_uuid) + self._provider_tree.populate_from_iterable(rps_to_refresh) + provider_uuids = self._provider_tree.get_provider_uuids_in_tree( + rp_uuid) + for provider_uuid in provider_uuids[::-1]: + try: + self._delete_provider(provider_uuid, + global_request_id=context.global_id) + except (exception.ResourceProviderInUse, + exception.ResourceProviderDeletionFailed): + # TODO(efried): Raise these. Right now this is being + # left a no-op for backward compatibility. + pass def get_provider_by_name(self, context, name): """Queries the placement API for resource provider information matching diff --git a/nova/tests/functional/api_sample_tests/test_services.py b/nova/tests/functional/api_sample_tests/test_services.py index 7f6377d8bb57..7bb0b1da079e 100644 --- a/nova/tests/functional/api_sample_tests/test_services.py +++ b/nova/tests/functional/api_sample_tests/test_services.py @@ -143,6 +143,17 @@ class ServicesV253JsonTest(ServicesV211JsonTest): def fake_hm_destroy(context, host): return 1 + def fake_get_providers_in_tree(self, context, compute_node_id): + return [{ + 'uuid': compute_node_id, + 'name': 'test', + 'generation': 1, + 'parent_provider_uuid': None + }] + + def fake_get_provider_uuids_in_tree(context, compute_node_id): + return [compute_node_id] + self.stub_out('nova.db.api.service_get_by_uuid', db_service_get_by_uuid) self.stub_out('nova.db.api.compute_node_get_all_by_host', @@ -152,6 +163,14 @@ class ServicesV253JsonTest(ServicesV211JsonTest): fake_hm_get_by_host) self.stub_out('nova.objects.host_mapping.HostMapping._destroy_in_db', fake_hm_destroy) + self.stub_out( + 'nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree', + fake_get_providers_in_tree) + self.stub_out( + 'nova.compute.provider_tree.ProviderTree.' + 'get_provider_uuids_in_tree', + fake_get_provider_uuids_in_tree) def test_service_enable(self): """Enable an existing service.""" diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index cf916fda4938..1cb9f74d9b4d 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -3146,17 +3146,24 @@ class TestAssociations(SchedulerReportClientTestCase): class TestAllocations(SchedulerReportClientTestCase): + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree') @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete_allocation_for_instance") @mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node") def test_delete_resource_provider_cascade(self, mock_by_host, - mock_del_alloc, mock_delete): - self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) + mock_del_alloc, mock_delete, mock_get_rpt): cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) mock_by_host.return_value = [uuids.inst1, uuids.inst2] + mock_get_rpt.return_value = [{ + 'uuid': cn.uuid, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': None + }] resp_mock = mock.Mock(status_code=204) mock_delete.return_value = resp_mock self.client.delete_resource_provider(self.context, cn, cascade=True) @@ -3168,17 +3175,24 @@ class TestAllocations(SchedulerReportClientTestCase): exp_url, global_request_id=self.context.global_id) self.assertFalse(self.client._provider_tree.exists(uuids.cn)) + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree') @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete_allocation_for_instance") @mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node") def test_delete_resource_provider_no_cascade(self, mock_by_host, - mock_del_alloc, mock_delete): - self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) + mock_del_alloc, mock_delete, mock_get_rpt): self.client._association_refresh_time[uuids.cn] = mock.Mock() cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) + mock_get_rpt.return_value = [{ + 'uuid': cn.uuid, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': None + }] mock_by_host.return_value = [uuids.inst1, uuids.inst2] resp_mock = mock.Mock(status_code=204) mock_delete.return_value = resp_mock @@ -3189,14 +3203,21 @@ class TestAllocations(SchedulerReportClientTestCase): exp_url, global_request_id=self.context.global_id) self.assertNotIn(uuids.cn, self.client._association_refresh_time) + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree') @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch('nova.scheduler.client.report.LOG') - def test_delete_resource_provider_log_calls(self, mock_log, mock_delete): - # First, check a successful call - self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) + def test_delete_resource_provider_log_calls(self, mock_log, mock_delete, + get_rpt_mock): cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) + get_rpt_mock.return_value = [{ + 'uuid': cn.uuid, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': None + }] resp_mock = fake_requests.FakeResponse(204) mock_delete.return_value = resp_mock self.client.delete_resource_provider(self.context, cn) @@ -3220,15 +3241,75 @@ class TestAllocations(SchedulerReportClientTestCase): self.assertEqual(0, mock_log.info.call_count) self.assertEqual(1, mock_log.error.call_count) + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree') + @mock.patch("nova.scheduler.client.report.SchedulerReportClient." + "delete") + @mock.patch('nova.scheduler.client.report.LOG') + def test_delete_resource_providers_by_order(self, mock_log, mock_delete, + mock_get_rpt): + """Ensure that more than on RP is in the tree and that all of them + is gets deleted in the proper order. + """ + cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", + hypervisor_hostname="fake_hostname", ) + mock_get_rpt.return_value = [ + { + 'uuid': uuids.child1, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': cn.uuid + }, + { + 'uuid': uuids.gc1_1, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': uuids.child1 + }, + { + 'uuid': cn.uuid, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': None + } + ] + mock_delete.return_value = True + self.client.delete_resource_provider(self.context, cn) + self.assertEqual(3, mock_delete.call_count) + # Delete RP in correct order + mock_delete.assert_has_calls([ + mock.call('/resource_providers/%s' % uuids.gc1_1, + global_request_id=mock.ANY), + mock.call('/resource_providers/%s' % uuids.child1, + global_request_id=mock.ANY), + mock.call('/resource_providers/%s' % cn.uuid, + global_request_id=mock.ANY), + ]) + exp_url = "Deleted resource provider %s" + # Logging info in correct order: uuids.gc1_1, uuids.child1, cn.uuid + mock_log.assert_has_calls([ + mock.call.info(exp_url, uuids.gc1_1), + mock.call.info(exp_url, uuids.child1), + mock.call.info(exp_url, cn.uuid), + ]) + + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_providers_in_tree') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.delete', new=mock.Mock(side_effect=ks_exc.EndpointNotFound())) - def test_delete_resource_provider_placement_exception(self): + def test_delete_resource_provider_placement_exception(self, + mock_get_rpt): """Ensure that a ksa exception in delete_resource_provider raises through. """ - self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) + mock_get_rpt.return_value = [{ + 'uuid': cn.uuid, + 'name': mock.sentinel.name, + 'generation': 1, + 'parent_provider_uuid': None + }] self.assertRaises( ks_exc.ClientException, self.client.delete_resource_provider, self.context, cn)