From 0b92c7e741b5abb1df70cc1d3bb8f0d1989cf2f5 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 24 May 2019 17:08:20 -0400 Subject: [PATCH] Fix hard-delete of instance with soft-deleted referential constraints When hard-deleting an instance we first delete related records but the code was not taking into account soft-deleted related records which will result in the instance delete failing due to a referential constraint because soft-deleted records are not really deleted when it comes to table constraints. This fixes the issue by simply passing read_deleted='yes' to the related record model query in the hard delete path. The related unit test is updated to cover this scenario and a typo in that same test is also fixed here. Change-Id: I9fc5a9cc40ceffc450c1fde1df7fb36581e9cc94 Closes-Bug: #1830438 --- nova/db/sqlalchemy/api.py | 6 +++++- nova/tests/unit/db/test_db_api.py | 18 ++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea4b7d6884c2..2c119a3f292b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1792,7 +1792,11 @@ def instance_destroy(context, instance_uuid, constraint=None, key = 'instance_uuid' if model not in filtered_by_uuid else 'uuid' filter_ = {key: instance_uuid} if hard_delete: - model_query(context, model).filter_by(**filter_).delete() + # We need to read any soft-deleted related records to make sure + # and clean those up as well otherwise we can fail with ForeignKey + # constraint errors when hard deleting the instance. + model_query(context, model, read_deleted='yes').filter_by( + **filter_).delete() else: model_query(context, model).filter_by(**filter_).soft_delete() diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index 4da143e49ade..d1c7f845250f 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -3070,13 +3070,27 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): bdm_values = { 'instance_uuid': uuid, - 'device_name': 'fake_device', + 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', } block_dev = block_device.BlockDeviceDict(bdm_values) db.block_device_mapping_create(self.ctxt, block_dev, legacy=False) + # Crate a second BDM that is soft-deleted to simulate that the + # volume was detached and the BDM was deleted before the instance + # was hard destroyed. + bdm2_values = { + 'instance_uuid': uuid, + 'device_name': '/dev/vdb', + 'source_type': 'volume', + 'destination_type': 'volume', + } + block_dev2 = block_device.BlockDeviceDict(bdm2_values) + bdm2 = db.block_device_mapping_create( + self.ctxt, block_dev2, legacy=False) + db.block_device_mapping_destroy(self.ctxt, bdm2.id) + migration_values = { "status": "finished", "instance_uuid": uuid, @@ -3122,7 +3136,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): self.assertEqual(0, len(instance_faults[uuid])) inst_bdms = db.block_device_mapping_get_all_by_instance(ctxt, uuid) self.assertEqual(0, len(inst_bdms)) - filters = {"isntance_uuid": uuid} + filters = {"instance_uuid": uuid} inst_migrations = db.migration_get_all_by_filters(ctxt, filters) self.assertEqual(0, len(inst_migrations)) vifs = db.virtual_interface_get_by_instance(ctxt, uuid)