Garbage Remains when Attached Volume is Migrated with NFS Driver
Garbage volume remained when an attached NFS volume was migrated to another NFS backend. An error message was generated as the attached volume was not detached before being migrated. The root cause of the issue is that status_update['status'] is only set for retype and not for migration from one NFS backend to another NFS backend. This patch proposes to fix the aforementioned issue where migration fails from one NFS backend to another NFS backend. Change-Id: I657a46d98b1e2a52cec128308cbbf39166c3625b Closes-bug: 1391172
This commit is contained in:
@@ -2828,14 +2828,15 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
self.stubs.Set(self.volume.driver, 'attach_volume',
|
||||
lambda *args, **kwargs: None)
|
||||
|
||||
self.volume.migrate_volume_completion(self.context,
|
||||
old_volume['id'],
|
||||
new_volume['id'])
|
||||
with mock.patch.object(self.volume.driver, 'detach_volume') as detach:
|
||||
self.volume.migrate_volume_completion(self.context, old_volume[
|
||||
'id'], new_volume['id'])
|
||||
|
||||
volume = db.volume_get(elevated, old_volume['id'])
|
||||
self.assertEqual(volume['status'], status)
|
||||
self.assertEqual(volume['attached_host'], attached_host)
|
||||
self.assertEqual(volume['instance_uuid'], instance_uuid)
|
||||
self.assertEqual(status == 'in-use', detach.called)
|
||||
|
||||
def test_migrate_volume_completion_retype_available(self):
|
||||
self._test_migrate_volume_completion('available', retyping=True)
|
||||
|
||||
@@ -1093,9 +1093,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
new_volume = self.db.volume_get(ctxt, new_volume_id)
|
||||
rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
status_update = {}
|
||||
if volume['status'] == 'retyping':
|
||||
status_update = {'status': self._get_original_status(volume)}
|
||||
orig_volume_status = self._get_original_status(volume)
|
||||
|
||||
if error:
|
||||
msg = _("migrate_volume_completion is cleaning up an error "
|
||||
@@ -1104,9 +1102,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
'vol2': new_volume['id']})
|
||||
new_volume['migration_status'] = None
|
||||
rpcapi.delete_volume(ctxt, new_volume)
|
||||
updates = {'migration_status': None}
|
||||
if status_update:
|
||||
updates.update(status_update)
|
||||
updates = {'migration_status': None, 'status': orig_volume_status}
|
||||
self.db.volume_update(ctxt, volume_id, updates)
|
||||
return volume_id
|
||||
|
||||
@@ -1115,7 +1111,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
|
||||
# Delete the source volume (if it fails, don't fail the migration)
|
||||
try:
|
||||
if status_update.get('status') == 'in-use':
|
||||
if orig_volume_status == 'in-use':
|
||||
self.detach_volume(ctxt, volume_id)
|
||||
self.delete_volume(ctxt, volume_id)
|
||||
except Exception as ex:
|
||||
@@ -1130,16 +1126,14 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
new_volume)
|
||||
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
|
||||
self.db.volume_destroy(ctxt, new_volume_id)
|
||||
if status_update.get('status') == 'in-use':
|
||||
updates = {'migration_status': 'completing'}
|
||||
updates.update(status_update)
|
||||
if orig_volume_status == 'in-use':
|
||||
updates = {'migration_status': 'completing',
|
||||
'status': orig_volume_status}
|
||||
else:
|
||||
updates = {'migration_status': None}
|
||||
self.db.volume_update(ctxt, volume_id, updates)
|
||||
|
||||
if 'in-use' in (status_update.get('status'), volume['status']):
|
||||
# NOTE(jdg): if we're passing the ref here, why are we
|
||||
# also passing in the various fields from that ref?
|
||||
if orig_volume_status == 'in-use':
|
||||
rpcapi.attach_volume(ctxt,
|
||||
volume,
|
||||
volume['instance_uuid'],
|
||||
|
||||
Reference in New Issue
Block a user