From 3aa00b087884a278e747e839db0707cf39561382 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Tue, 20 Jul 2021 17:15:54 +0200 Subject: [PATCH] Delete attachment on remove_export failure When deleting an attachment, if the driver's remove_export or the detach_volume method call fails in the cinder driver, then the attachment status is changed to error_detaching but the REST API call doesn't fail. The end result is: - Volume status is "available" - Volume attach_status is "detached" - There is a volume_attachment record for the volume - The volume may still be exported in the backend The volume still being exported in the storage array is not a problem, since the next attach-detach cycle will give it another opportunity for it to succeed, and we also do the export on volume deletion. So in the end leaving the attachment in error_detaching status doesn't have any use and creates confusion. This patch removes the attachment record when on an attachment delete request if the error happens on remove_export or detach_volume calls. This doesn't change how the REST API attachment delete operation behaves, the change is that there will not be a leftover attachment record with the volume in available and detached status. Closes-Bug: #1935057 Change-Id: I442a42b0c098775935a799876ad8efbe141829ad --- .../attachments/test_attachments_manager.py | 25 +++++++++++++++++++ cinder/volume/manager.py | 14 +++-------- ...re_leaves_attachment-24e0c648269b0177.yaml | 6 +++++ 3 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml diff --git a/cinder/tests/unit/attachments/test_attachments_manager.py b/cinder/tests/unit/attachments/test_attachments_manager.py index bc3429b6a25..0aa87661f16 100644 --- a/cinder/tests/unit/attachments/test_attachments_manager.py +++ b/cinder/tests/unit/attachments/test_attachments_manager.py @@ -145,6 +145,31 @@ class AttachmentManagerTestCase(test.TestCase): self.context, attachment_ref.id) + def test_attachment_delete_remove_export_fail(self): + """attachment_delete removes attachment on remove_export failure.""" + self.mock_object(self.manager.driver, 'remove_export', + side_effect=Exception) + # Report that the connection is not shared + self.mock_object(self.manager, '_connection_terminate', + return_value=False) + + vref = tests_utils.create_volume(self.context, status='in-use', + attach_status='attached') + values = {'volume_id': vref.id, 'volume_host': vref.host, + 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} + attach = db.volume_attach(self.context, values) + # Confirm the volume OVO has the attachment before the deletion + vref.refresh() + self.assertEqual(1, len(vref.volume_attachment)) + + self.manager.attachment_delete(self.context, attach.id, vref) + + # Attachment has been removed from the DB + self.assertRaises(exception.VolumeAttachmentNotFound, + db.volume_attachment_get, self.context, attach.id) + # Attachment has been removed from the volume OVO attachment list + self.assertEqual(0, len(vref.volume_attachment)) + def test_attachment_delete_multiple_attachments(self): volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 401fbd639e0..56c202dd432 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -4949,16 +4949,10 @@ class VolumeManager(manager.CleanableManager, if has_shared_connection is not None and not has_shared_connection: self.driver.remove_export(context.elevated(), vref) except Exception: - # FIXME(jdg): Obviously our volume object is going to need some - # changes to deal with multi-attach and figuring out how to - # represent a single failed attach out of multiple attachments - - # TODO(jdg): object method here - attachment.attach_status = \ - fields.VolumeAttachStatus.ERROR_DETACHING - attachment.save() - else: - vref.finish_detach(attachment_id) + # Failures on detach_volume and remove_export are not considered + # failures in terms of detaching the volume. + pass + vref.finish_detach(attachment_id) self._notify_about_volume_usage(context, vref, "detach.end") # Replication group API (Tiramisu) diff --git a/releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml b/releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml new file mode 100644 index 00000000000..cde05ed17a6 --- /dev/null +++ b/releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + `Bug #1935057 `_: Fixed + sometimes on a detach volume may end in available and detached yet have an + attachment in error_detaching.