Merge "compute: Use source_bdms to reset attachment_ids during LM rollback" into stable/rocky

This commit is contained in:
Zuul 2020-09-19 16:58:07 +00:00 committed by Gerrit Code Review
commit 4fb32d4c6d
3 changed files with 34 additions and 24 deletions

View File

@ -6492,7 +6492,7 @@ class ComputeManager(manager.Manager):
# save current attachment so we can detach it on success, # save current attachment so we can detach it on success,
# or restore it on a rollback. # or restore it on a rollback.
# NOTE(mdbooth): This data is no longer used by the source # NOTE(mdbooth): This data is no longer used by the source
# host since change I0390c9ff. We can't remove it until we # host since change Ibe9215c0. We can't remove it until we
# are sure the source host has been upgraded. # are sure the source host has been upgraded.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \ migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id bdm.attachment_id
@ -7284,24 +7284,17 @@ class ComputeManager(manager.Manager):
self.compute_rpcapi.remove_volume_connection( self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest) context, instance, bdm.volume_id, dest)
if bdm.attachment_id: source_bdm = source_bdms_by_volid[bdm.volume_id]
# 3.44 cinder api flow. Set the bdm's if bdm.attachment_id and source_bdm.attachment_id:
# attachment_id to the old attachment of the source # NOTE(lyarwood): 3.44 cinder api flow. Delete the
# host. If old_attachments is not there, then # attachment used by the destination and reset the bdm
# there was an error before the new attachment was made. # attachment_id to the old attachment of the source host.
# TODO(lyarwood): migrate_data.old_vol_attachment_ids can self.volume_api.attachment_delete(context,
# be removed now as we can lookup the original bdm.attachment_id)
# attachment_ids from the source_bdms list here. bdm.attachment_id = source_bdm.attachment_id
old_attachments = migrate_data.old_vol_attachment_ids \
if 'old_vol_attachment_ids' in migrate_data else None
if old_attachments and bdm.volume_id in old_attachments:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
# NOTE(lyarwood): Rollback the connection_info stored within # NOTE(lyarwood): Rollback the connection_info stored within
# the BDM to that used by the source and not the destination. # the BDM to that used by the source and not the destination.
source_bdm = source_bdms_by_volid[bdm.volume_id]
bdm.connection_info = source_bdm.connection_info bdm.connection_info = source_bdm.connection_info
bdm.save() bdm.save()

View File

@ -109,7 +109,7 @@ class LiveMigrateData(obj_base.NovaObject):
# old_vol_attachment_ids is a dict used to store the old attachment_ids # old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The # for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id. # key is the volume_id, and the value is the attachment_id.
# TODO(mdbooth): This field was made redundant by change I0390c9ff. We # TODO(mdbooth): This field was made redundant by change Ibe9215c0. We
# should eventually remove it. # should eventually remove it.
'old_vol_attachment_ids': fields.DictOfStringsField(), 'old_vol_attachment_ids': fields.DictOfStringsField(),
# wait_for_vif_plugged is set in pre_live_migration on the destination # wait_for_vif_plugged is set in pre_live_migration on the destination

View File

@ -6277,6 +6277,7 @@ class ComputeTestCase(BaseTestCase,
# cleanup # cleanup
db.instance_destroy(c, instance['uuid']) db.instance_destroy(c, instance['uuid'])
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(fake.FakeDriver, 'get_instance_disk_info') @mock.patch.object(fake.FakeDriver, 'get_instance_disk_info')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration') @mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(objects.ComputeNode, @mock.patch.object(objects.ComputeNode,
@ -6289,7 +6290,8 @@ class ComputeTestCase(BaseTestCase,
def test_live_migration_exception_rolls_back(self, mock_save, def test_live_migration_exception_rolls_back(self, mock_save,
mock_rollback, mock_remove, mock_rollback, mock_remove,
mock_get_bdms, mock_get_bdms,
mock_get_node, mock_pre, mock_get_disk): mock_get_node, mock_pre, mock_get_disk,
mock_attachment_delete):
# Confirm exception when pre_live_migration fails. # Confirm exception when pre_live_migration fails.
c = context.get_admin_context() c = context.get_admin_context()
@ -6307,20 +6309,26 @@ class ComputeTestCase(BaseTestCase,
# All the fake BDMs we've generated, in order # All the fake BDMs we've generated, in order
fake_bdms = [] fake_bdms = []
# A list of the attachment_ids returned by gen_fake_bdms
fake_attachment_ids = []
def gen_fake_bdms(obj, instance): def gen_fake_bdms(obj, instance):
# generate a unique fake connection_info every time we're called, # generate a unique fake connection_info and attachment_id every
# simulating connection_info being mutated elsewhere. # time we're called, simulating attachment_id and connection_info
# being mutated elsewhere.
bdms = objects.BlockDeviceMappingList(objects=[ bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping( objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict( **fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_1, {'volume_id': uuids.volume_id_1,
'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume', 'source_type': 'volume',
'connection_info': 'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()), jsonutils.dumps(uuidutils.generate_uuid()),
'destination_type': 'volume'})), 'destination_type': 'volume'})),
objects.BlockDeviceMapping( objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict( **fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_2, {'volume_id': uuids.volume_id_2,
'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume', 'source_type': 'volume',
'connection_info': 'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()), jsonutils.dumps(uuidutils.generate_uuid()),
@ -6328,6 +6336,7 @@ class ComputeTestCase(BaseTestCase,
]) ])
for bdm in bdms: for bdm in bdms:
bdm.save = mock.Mock() bdm.save = mock.Mock()
fake_attachment_ids.append(bdm.attachment_id)
fake_bdms.append(bdms) fake_bdms.append(bdms)
return bdms return bdms
@ -6379,10 +6388,13 @@ class ComputeTestCase(BaseTestCase,
# BDMs with unique connection_info every time it's called. These are # BDMs with unique connection_info every time it's called. These are
# stored in fake_bdms in the order they were generated. We assert here # stored in fake_bdms in the order they were generated. We assert here
# that the last BDMs generated (in _rollback_live_migration) now have # that the last BDMs generated (in _rollback_live_migration) now have
# the same connection_info as the first BDMs generated (before calling # the same connection_info and attachment_id as the first BDMs
# pre_live_migration), and that we saved them. # generated (before calling pre_live_migration), and that we saved
# them.
self.assertGreater(len(fake_bdms), 1) self.assertGreater(len(fake_bdms), 1)
for source_bdm, final_bdm in zip(fake_bdms[0], fake_bdms[-1]): for source_bdm, final_bdm in zip(fake_bdms[0], fake_bdms[-1]):
self.assertEqual(source_bdm.attachment_id,
final_bdm.attachment_id)
self.assertEqual(source_bdm.connection_info, self.assertEqual(source_bdm.connection_info,
final_bdm.connection_info) final_bdm.connection_info)
final_bdm.save.assert_called() final_bdm.save.assert_called()
@ -6395,6 +6407,11 @@ class ComputeTestCase(BaseTestCase,
destroy_disks=True, destroy_disks=True,
migrate_data=test.MatchType( migrate_data=test.MatchType(
migrate_data_obj.XenapiLiveMigrateData)) migrate_data_obj.XenapiLiveMigrateData))
# Assert that the final attachment_ids returned by
# BlockDeviceMappingList.get_by_instance_uuid are then deleted.
mock_attachment_delete.assert_has_calls([
mock.call(c, fake_attachment_ids.pop()),
mock.call(c, fake_attachment_ids.pop())], any_order=True)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration') @mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(compute_rpcapi.ComputeAPI, @mock.patch.object(compute_rpcapi.ComputeAPI,