Fix snapshot stuck in 'deleting' after reboot c-v

When cascade deleting volume and snapshot, reboot
cinder-volume, snapshot will be stuck in 'deleting'
after it, and it's more worse that user can't delete
this snapshot even reset its status, since no deleting
volume is exitsing any more after init host.

Fix this bug by using cascade deleting volume
when init host, as any volume that is in deleting
status has already passed API checks and should
be deleted with all its snapshots.

Change-Id: Ic47290b8b7004b9e37d096f52af025931ab13c6c
Closes-Bug: #1586604
This commit is contained in:
wanghao 2016-05-28 17:19:26 +08:00
parent d669449cd2
commit bc9d737c87
2 changed files with 14 additions and 2 deletions

View File

@ -4567,6 +4567,16 @@ class VolumeTestCase(BaseVolumeTestCase):
self.context,
snap)
def test_init_host_clears_deleting_snapshots(self):
"""Test that init_host will delete a snapshot stuck in deleting."""
volume = tests_utils.create_volume(self.context, status='deleting',
size=1, host=CONF.host)
snapshot = tests_utils.create_snapshot(self.context,
volume.id, status='deleting')
self.volume.init_host()
self.assertRaises(exception.VolumeNotFound, volume.refresh)
self.assertRaises(exception.SnapshotNotFound, snapshot.refresh)
@ddt.ddt
class VolumeMigrationTestCase(BaseVolumeTestCase):

View File

@ -442,10 +442,12 @@ class VolumeManager(manager.SchedulerDependentManager):
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
volume['id'], volume=volume,
cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
self.delete_volume(ctxt, volume['id'], volume=volume,
cascade=True)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)