Fix retyping attached volumes requiring migration
Modifies retype method so that volume db entry only updated if the volume isn't migrated. The migrate_volume_completion method is made responsible for detaching the old volume and attaching the new one. This takes the requirement from nova because the calls need to be done in a certain order which isn't guaranteed when performed by nova. attach_volume now marks the migration_status as complete because it is the final action by cinder. Change-Id: Ia15636893be4f0077a4f75e746a8ab1a8798c44b Partial-Bug: 1316079
This commit is contained in:
@@ -2566,7 +2566,10 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
volume_type_id=old_vol_type['id'])
|
volume_type_id=old_vol_type['id'])
|
||||||
if snap:
|
if snap:
|
||||||
self._create_snapshot(volume['id'], size=volume['size'])
|
self._create_snapshot(volume['id'], size=volume['size'])
|
||||||
host_obj = {'host': 'newhost', 'capabilities': {}}
|
if driver or diff_equal:
|
||||||
|
host_obj = {'host': CONF.host, 'capabilities': {}}
|
||||||
|
else:
|
||||||
|
host_obj = {'host': 'newhost', 'capabilities': {}}
|
||||||
|
|
||||||
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
|
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
|
||||||
QUOTAS.add_volume_type_opts(self.context,
|
QUOTAS.add_volume_type_opts(self.context,
|
||||||
@@ -2607,10 +2610,15 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
volumes_in_use = 0
|
volumes_in_use = 0
|
||||||
|
|
||||||
# check properties
|
# check properties
|
||||||
if not exc:
|
if driver or diff_equal:
|
||||||
self.assertEqual(volume['volume_type_id'], vol_type['id'])
|
self.assertEqual(volume['volume_type_id'], vol_type['id'])
|
||||||
self.assertEqual(volume['status'], 'available')
|
self.assertEqual(volume['status'], 'available')
|
||||||
self.assertEqual(volume['host'], 'newhost')
|
self.assertEqual(volume['host'], CONF.host)
|
||||||
|
self.assertEqual(volumes_in_use, 1)
|
||||||
|
elif not exc:
|
||||||
|
self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
|
||||||
|
self.assertEqual(volume['status'], 'retyping')
|
||||||
|
self.assertEqual(volume['host'], CONF.host)
|
||||||
self.assertEqual(volumes_in_use, 1)
|
self.assertEqual(volumes_in_use, 1)
|
||||||
else:
|
else:
|
||||||
self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
|
self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
|
||||||
|
|||||||
@@ -578,7 +578,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
volume_metadata.get('attached_mode') != mode):
|
volume_metadata.get('attached_mode') != mode):
|
||||||
msg = _("being attached by different mode")
|
msg = _("being attached by different mode")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
elif volume['status'] != "available":
|
elif (not volume['migration_status'] and
|
||||||
|
volume['status'] != "available"):
|
||||||
msg = _("status must be available or attaching")
|
msg = _("status must be available or attaching")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
@@ -633,6 +634,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
instance_uuid,
|
instance_uuid,
|
||||||
host_name_sanitized,
|
host_name_sanitized,
|
||||||
mountpoint)
|
mountpoint)
|
||||||
|
if volume['migration_status']:
|
||||||
|
self.db.volume_update(context, volume_id,
|
||||||
|
{'migration_status': None})
|
||||||
self._notify_about_volume_usage(context, volume, "attach.end")
|
self._notify_about_volume_usage(context, volume, "attach.end")
|
||||||
return do_attach()
|
return do_attach()
|
||||||
|
|
||||||
@@ -1011,6 +1015,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
# Delete the source volume (if it fails, don't fail the migration)
|
# Delete the source volume (if it fails, don't fail the migration)
|
||||||
try:
|
try:
|
||||||
|
if status_update['status'] == 'in-use':
|
||||||
|
self.detach_volume(ctxt, volume_id)
|
||||||
self.delete_volume(ctxt, volume_id)
|
self.delete_volume(ctxt, volume_id)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
|
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
|
||||||
@@ -1018,10 +1024,20 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
|
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
|
||||||
self.db.volume_destroy(ctxt, new_volume_id)
|
self.db.volume_destroy(ctxt, new_volume_id)
|
||||||
updates = {'migration_status': None}
|
|
||||||
if status_update:
|
if status_update:
|
||||||
|
updates = {'migration_status': 'completing'}
|
||||||
updates.update(status_update)
|
updates.update(status_update)
|
||||||
|
else:
|
||||||
|
updates = {'migration_status': None}
|
||||||
self.db.volume_update(ctxt, volume_id, updates)
|
self.db.volume_update(ctxt, volume_id, updates)
|
||||||
|
|
||||||
|
if status_update:
|
||||||
|
rpcapi.attach_volume(ctxt,
|
||||||
|
volume,
|
||||||
|
volume['instance_uuid'],
|
||||||
|
volume['attached_host'],
|
||||||
|
volume['mountpoint'],
|
||||||
|
'rw')
|
||||||
return volume['id']
|
return volume['id']
|
||||||
|
|
||||||
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
|
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
|
||||||
@@ -1279,11 +1295,11 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
_retype_error(context, volume_id, old_reservations,
|
_retype_error(context, volume_id, old_reservations,
|
||||||
new_reservations, status_update)
|
new_reservations, status_update)
|
||||||
|
else:
|
||||||
self.db.volume_update(context, volume_id,
|
self.db.volume_update(context, volume_id,
|
||||||
{'volume_type_id': new_type_id,
|
{'volume_type_id': new_type_id,
|
||||||
'host': host['host'],
|
'host': host['host'],
|
||||||
'status': status_update['status']})
|
'status': status_update['status']})
|
||||||
|
|
||||||
if old_reservations:
|
if old_reservations:
|
||||||
QUOTAS.commit(context, old_reservations, project_id=project_id)
|
QUOTAS.commit(context, old_reservations, project_id=project_id)
|
||||||
|
|||||||
Reference in New Issue
Block a user