Browse Source

compute: Use source_bdms to reset attachment_ids during LM rollback

As we now provide the original source bdms during rollback we can also
use them to reset any volume attachment ids to the ids used by the
source compute host.

Change-Id: Ibe9215c07a1ee00e0e121c69bcf7ee1b1b80fae0
(cherry picked from commit 0eea5041c4)
(cherry picked from commit 8b865378b8)
changes/24/749224/3
Lee Yarwood 3 years ago
parent
commit
b8a1f08bc8
  1. 25
      nova/compute/manager.py
  2. 2
      nova/objects/migrate_data.py
  3. 31
      nova/tests/unit/compute/test_compute.py

25
nova/compute/manager.py

@ -6492,7 +6492,7 @@ class ComputeManager(manager.Manager):
# save current attachment so we can detach it on success,
# or restore it on a rollback.
# NOTE(mdbooth): This data is no longer used by the source
# host since change I0390c9ff. We can't remove it until we
# host since change Ibe9215c0. We can't remove it until we
# are sure the source host has been upgraded.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id
@ -7284,24 +7284,17 @@ class ComputeManager(manager.Manager):
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
if bdm.attachment_id:
# 3.44 cinder api flow. Set the bdm's
# attachment_id to the old attachment of the source
# host. If old_attachments is not there, then
# there was an error before the new attachment was made.
# TODO(lyarwood): migrate_data.old_vol_attachment_ids can
# be removed now as we can lookup the original
# attachment_ids from the source_bdms list here.
old_attachments = migrate_data.old_vol_attachment_ids \
if 'old_vol_attachment_ids' in migrate_data else None
if old_attachments and bdm.volume_id in old_attachments:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
source_bdm = source_bdms_by_volid[bdm.volume_id]
if bdm.attachment_id and source_bdm.attachment_id:
# NOTE(lyarwood): 3.44 cinder api flow. Delete the
# attachment used by the destination and reset the bdm
# attachment_id to the old attachment of the source host.
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = source_bdm.attachment_id
# NOTE(lyarwood): Rollback the connection_info stored within
# the BDM to that used by the source and not the destination.
source_bdm = source_bdms_by_volid[bdm.volume_id]
bdm.connection_info = source_bdm.connection_info
bdm.save()

2
nova/objects/migrate_data.py

@ -109,7 +109,7 @@ class LiveMigrateData(obj_base.NovaObject):
# old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id.
# TODO(mdbooth): This field was made redundant by change I0390c9ff. We
# TODO(mdbooth): This field was made redundant by change Ibe9215c0. We
# should eventually remove it.
'old_vol_attachment_ids': fields.DictOfStringsField(),
# wait_for_vif_plugged is set in pre_live_migration on the destination

31
nova/tests/unit/compute/test_compute.py

@ -6277,6 +6277,7 @@ class ComputeTestCase(BaseTestCase,
# cleanup
db.instance_destroy(c, instance['uuid'])
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(fake.FakeDriver, 'get_instance_disk_info')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(objects.ComputeNode,
@ -6289,7 +6290,8 @@ class ComputeTestCase(BaseTestCase,
def test_live_migration_exception_rolls_back(self, mock_save,
mock_rollback, mock_remove,
mock_get_bdms,
mock_get_node, mock_pre, mock_get_disk):
mock_get_node, mock_pre, mock_get_disk,
mock_attachment_delete):
# Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
@ -6307,20 +6309,26 @@ class ComputeTestCase(BaseTestCase,
# All the fake BDMs we've generated, in order
fake_bdms = []
# A list of the attachment_ids returned by gen_fake_bdms
fake_attachment_ids = []
def gen_fake_bdms(obj, instance):
# generate a unique fake connection_info every time we're called,
# simulating connection_info being mutated elsewhere.
# generate a unique fake connection_info and attachment_id every
# time we're called, simulating attachment_id and connection_info
# being mutated elsewhere.
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_1,
'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume',
'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()),
'destination_type': 'volume'})),
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_2,
'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume',
'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()),
@ -6328,6 +6336,7 @@ class ComputeTestCase(BaseTestCase,
])
for bdm in bdms:
bdm.save = mock.Mock()
fake_attachment_ids.append(bdm.attachment_id)
fake_bdms.append(bdms)
return bdms
@ -6379,10 +6388,13 @@ class ComputeTestCase(BaseTestCase,
# BDMs with unique connection_info every time it's called. These are
# stored in fake_bdms in the order they were generated. We assert here
# that the last BDMs generated (in _rollback_live_migration) now have
# the same connection_info as the first BDMs generated (before calling
# pre_live_migration), and that we saved them.
# the same connection_info and attachment_id as the first BDMs
# generated (before calling pre_live_migration), and that we saved
# them.
self.assertGreater(len(fake_bdms), 1)
for source_bdm, final_bdm in zip(fake_bdms[0], fake_bdms[-1]):
self.assertEqual(source_bdm.attachment_id,
final_bdm.attachment_id)
self.assertEqual(source_bdm.connection_info,
final_bdm.connection_info)
final_bdm.save.assert_called()
@ -6395,6 +6407,11 @@ class ComputeTestCase(BaseTestCase,
destroy_disks=True,
migrate_data=test.MatchType(
migrate_data_obj.XenapiLiveMigrateData))
# Assert that the final attachment_ids returned by
# BlockDeviceMappingList.get_by_instance_uuid are then deleted.
mock_attachment_delete.assert_has_calls([
mock.call(c, fake_attachment_ids.pop()),
mock.call(c, fake_attachment_ids.pop())], any_order=True)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(compute_rpcapi.ComputeAPI,

Loading…
Cancel
Save