diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 87e3c6c74..17278fd8d 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -531,6 +531,7 @@ class VolumeTestCase(base.BaseVolumeTestCase): def test_create_driver_not_initialized_rescheduling(self): self.volume.driver._initialized = False + mock_delete = self.mock_object(self.volume.driver, 'delete_volume') volume = tests_utils.create_volume( self.context, @@ -548,6 +549,10 @@ class VolumeTestCase(base.BaseVolumeTestCase): # allocated_capacity tracking. self.assertEqual({}, self.volume.stats['pools']) + # NOTE(dulek): As we've rescheduled, make sure delete_volume was + # called. + self.assertTrue(mock_delete.called) + db.volume_destroy(context.get_admin_context(), volume_id) def test_create_non_cinder_exception_rescheduling(self): @@ -3708,12 +3713,16 @@ class VolumeTestCase(base.BaseVolumeTestCase): self.mock_object(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) + mock_delete = self.mock_object(self.volume.driver, 'delete_volume') self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image) # NOTE(dulek): Rescheduling should not occur, so lets assert that # allocated_capacity is incremented. self.assertDictEqual(self.volume.stats['pools'], {'_pool0': {'allocated_capacity_gb': 1}}) + # NOTE(dulek): As we haven't rescheduled, make sure no delete_volume + # was called. + self.assertFalse(mock_delete.called) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.brick_get_connector') diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py index 592bd9d41..d9de3fa48 100644 --- a/cinder/volume/flows/manager/create_volume.py +++ b/cinder/volume/flows/manager/create_volume.py @@ -58,7 +58,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): this volume elsewhere. """ - def __init__(self, reschedule_context, db, scheduler_rpcapi, + def __init__(self, reschedule_context, db, driver, scheduler_rpcapi, do_reschedule): requires = ['filter_properties', 'request_spec', 'volume', 'context'] @@ -67,6 +67,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): self.do_reschedule = do_reschedule self.scheduler_rpcapi = scheduler_rpcapi self.db = db + self.driver = driver self.reschedule_context = reschedule_context # These exception types will trigger the volume to be set into error # status rather than being rescheduled. @@ -151,6 +152,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): LOG.debug("Volume %s: re-scheduled", volume.id) + # NOTE(dulek): Here we should be sure that rescheduling occurred and + # host field will be erased. Just in case volume was already created at + # the backend, we attempt to delete it. + try: + self.driver.delete_volume(volume) + except Exception: + # Most likely the volume weren't created at the backend. We can + # safely ignore this. + pass + def revert(self, context, result, flow_failures, volume, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will @@ -943,9 +954,8 @@ def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume, # status when reverting the flow. Meanwhile, no need to revert process of # ExtractVolumeRefTask. do_reschedule = allow_reschedule and request_spec and retry - volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, - scheduler_rpcapi, - do_reschedule)) + volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver, + scheduler_rpcapi, do_reschedule)) LOG.debug("Volume reschedule parameters: %(allow)s " "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})