From caf1d292dae225445119729d3a462267d860181a Mon Sep 17 00:00:00 2001 From: Ameed Ashour Date: Wed, 24 Jan 2018 09:32:24 -0500 Subject: [PATCH] Detach volumes when VM creation fails If the boot-volume creation fails, the data volume is left in state "in-use", attached to the server which is now in "error" state. The user can't detach the volume because of the server's error state. They can delete the server, which then leaves the volume apparently attached to a server that no longer exists, which is being fixed separately: https://review.openstack.org/#/c/340614/ The only way out of this is to ask an administrator to reset the state of the data volume (this option is not available to regular users by default policy). This change fixes the problem in the compute service such that when the creation fails, compute manager detaches the created volumes before putting the VM into error state. Then you can delete the instance without care about attached volumes. Change-Id: I8b1c05317734e14ea73dc868941351bb31210bf0 Closes-bug: #1633249 (cherry picked from commit 61f6751a1807d3c3ee76d0351d17a82c6e1a915a) --- nova/compute/manager.py | 39 ++++++++++---- nova/tests/unit/compute/test_compute.py | 2 +- nova/tests/unit/compute/test_compute_mgr.py | 57 +++++++++++++++++---- 3 files changed, 77 insertions(+), 21 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 75b3e66e49c5..93364b097206 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1849,7 +1849,7 @@ class ComputeManager(manager.Manager): instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) - self._cleanup_volumes(context, instance.uuid, + self._cleanup_volumes(context, instance, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info(), @@ -1909,7 +1909,7 @@ class ComputeManager(manager.Manager): LOG.exception(e.format_message(), instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) - self._cleanup_volumes(context, instance.uuid, + self._cleanup_volumes(context, instance, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) @@ -1923,7 +1923,7 @@ class ComputeManager(manager.Manager): instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) - self._cleanup_volumes(context, instance.uuid, + self._cleanup_volumes(context, instance, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) @@ -2424,14 +2424,27 @@ class ComputeManager(manager.Manager): self.host, action=fields.NotificationAction.SHUTDOWN, phase=fields.NotificationPhase.END, bdms=bdms) - def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True): + def _cleanup_volumes(self, context, instance, bdms, raise_exc=True, + detach=True): exc_info = None - for bdm in bdms: - LOG.debug("terminating bdm %s", bdm, - instance_uuid=instance_uuid) + if detach and bdm.volume_id: + try: + LOG.debug("Detaching volume: %s", bdm.volume_id, + instance_uuid=instance.uuid) + destroy = bdm.delete_on_termination + self._detach_volume(context, bdm, instance, + destroy_bdm=destroy) + except Exception as exc: + exc_info = sys.exc_info() + LOG.warning('Failed to detach volume: %(volume_id)s ' + 'due to %(exc)s', + {'volume_id': bdm.volume_id, 'exc': exc}) + if bdm.volume_id and bdm.delete_on_termination: try: + LOG.debug("Deleting volume: %s", bdm.volume_id, + instance_uuid=instance.uuid) self.volume_api.delete(context, bdm.volume_id) except Exception as exc: exc_info = sys.exc_info() @@ -2483,8 +2496,14 @@ class ComputeManager(manager.Manager): # future to set an instance fault the first time # and to only ignore the failure if the instance # is already in ERROR. - self._cleanup_volumes(context, instance.uuid, bdms, - raise_exc=False) + + # NOTE(ameeda): The volumes already detached during the above + # _shutdown_instance() call and this is why + # detach is not requested from _cleanup_volumes() + # in this case + + self._cleanup_volumes(context, instance, bdms, + raise_exc=False, detach=False) # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED @@ -7384,7 +7403,7 @@ class ComputeManager(manager.Manager): try: self._shutdown_instance(context, instance, bdms, notify=False) - self._cleanup_volumes(context, instance.uuid, bdms) + self._cleanup_volumes(context, instance, bdms) except Exception as e: LOG.warning("Periodic cleanup failed to delete " "instance: %s", diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index e968bcc94afa..d838df7ede32 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -6954,7 +6954,7 @@ class ComputeTestCase(BaseTestCase, mock_shutdown.assert_has_calls([ mock.call(ctxt, inst1, bdms, notify=False), mock.call(ctxt, inst2, bdms, notify=False)]) - mock_cleanup.assert_called_once_with(ctxt, inst2['uuid'], bdms) + mock_cleanup.assert_called_once_with(ctxt, inst2, bdms) mock_get_uuid.assert_has_calls([ mock.call(ctxt, inst1.uuid, use_slave=True), mock.call(ctxt, inst2.uuid, use_slave=True)]) diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index f620fd97f41a..6002c3f503b2 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -277,7 +277,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): self.assertFalse(mock_log.error.called) @mock.patch('nova.compute.utils.notify_about_instance_action') - def test_delete_instance_without_info_cache(self, mock_notify): + @mock.patch('nova.compute.manager.ComputeManager.' + '_detach_volume') + def test_delete_instance_without_info_cache(self, mock_detach, + mock_notify): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, @@ -3759,7 +3762,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): self.assertRaises(test.TestingException, do_test) set_error.assert_called_once_with(self.context, instance) - def test_cleanup_volumes(self): + @mock.patch('nova.compute.manager.ComputeManager.' + '_detach_volume') + def test_cleanup_volumes(self, mock_detach): instance = fake_instance.fake_instance_obj(self.context) bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', @@ -3772,11 +3777,17 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): with mock.patch.object(self.compute.volume_api, 'delete') as volume_delete: - self.compute._cleanup_volumes(self.context, instance.uuid, bdms) + self.compute._cleanup_volumes(self.context, instance, bdms) + calls = [mock.call(self.context, bdm, instance, + destroy_bdm=bdm.delete_on_termination) + for bdm in bdms] + self.assertEqual(calls, mock_detach.call_args_list) volume_delete.assert_called_once_with(self.context, bdms[1].volume_id) - def test_cleanup_volumes_exception_do_not_raise(self): + @mock.patch('nova.compute.manager.ComputeManager.' + '_detach_volume') + def test_cleanup_volumes_exception_do_not_raise(self, mock_detach): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', @@ -3790,12 +3801,17 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): with mock.patch.object(self.compute.volume_api, 'delete', side_effect=[test.TestingException(), None]) as volume_delete: - self.compute._cleanup_volumes(self.context, instance.uuid, bdms, + self.compute._cleanup_volumes(self.context, instance, bdms, raise_exc=False) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) + calls = [mock.call(self.context, bdm, instance, + destroy_bdm=True) for bdm in bdms] + self.assertEqual(calls, mock_detach.call_args_list) - def test_cleanup_volumes_exception_raise(self): + @mock.patch('nova.compute.manager.ComputeManager.' + '_detach_volume') + def test_cleanup_volumes_exception_raise(self, mock_detach): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', @@ -3810,10 +3826,31 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): 'delete', side_effect=[test.TestingException(), None]) as volume_delete: self.assertRaises(test.TestingException, - self.compute._cleanup_volumes, self.context, instance.uuid, + self.compute._cleanup_volumes, self.context, instance, bdms) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) + calls = [mock.call(self.context, bdm, instance, + destroy_bdm=bdm.delete_on_termination) + for bdm in bdms] + self.assertEqual(calls, mock_detach.call_args_list) + + @mock.patch('nova.compute.manager.ComputeManager._detach_volume', + side_effect=exception.CinderConnectionFailed(reason='idk')) + def test_cleanup_volumes_detach_fails_raise_exc(self, mock_detach): + instance = fake_instance.fake_instance_obj(self.context) + bdms = block_device_obj.block_device_make_list( + self.context, + [fake_block_device.FakeDbBlockDeviceDict( + {'volume_id': uuids.volume_id, + 'source_type': 'volume', + 'destination_type': 'volume', + 'delete_on_termination': False})]) + self.assertRaises(exception.CinderConnectionFailed, + self.compute._cleanup_volumes, self.context, + instance, bdms) + mock_detach.assert_called_once_with( + self.context, bdms[0], instance, destroy_bdm=False) def test_stop_instance_task_state_none_power_state_shutdown(self): # Tests that stop_instance doesn't puke when the instance power_state @@ -4664,7 +4701,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): mock_clean_net.assert_called_once_with(self.context, self.instance, self.requested_networks) mock_clean_vol.assert_called_once_with(self.context, - self.instance.uuid, self.block_device_mapping, raise_exc=False) + self.instance, self.block_device_mapping, raise_exc=False) mock_add.assert_called_once_with(self.context, self.instance, mock.ANY, mock.ANY) mock_nil.assert_called_once_with(self.instance) @@ -4901,7 +4938,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): mock_clean_net.assert_called_once_with(self.context, self.instance, self.requested_networks) mock_clean_vol.assert_called_once_with(self.context, - self.instance.uuid, self.block_device_mapping, + self.instance, self.block_device_mapping, raise_exc=False) mock_add.assert_called_once_with(self.context, self.instance, mock.ANY, mock.ANY, fault_message=mock.ANY) @@ -5049,7 +5086,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): self._assert_build_instance_update(mock_save) if cleanup_volumes: mock_clean_vol.assert_called_once_with(self.context, - self.instance.uuid, self.block_device_mapping, + self.instance, self.block_device_mapping, raise_exc=False) if nil_out_host_and_node: mock_nil.assert_called_once_with(self.instance)