Merge "Refactor volume connection cleanup out of _post_live_migration" into stable/train

This commit is contained in:
Zuul 2023-03-02 10:47:03 +00:00 committed by Gerrit Code Review
commit 44bc089ac6
3 changed files with 61 additions and 98 deletions

View File

@ -7540,6 +7540,58 @@ class ComputeManager(manager.Manager):
return (do_cleanup, destroy_disks)
def _post_live_migration_remove_source_vol_connections(
self, context, instance, source_bdms):
"""Disconnect volume connections from the source host during
_post_live_migration.
:param context: nova auth RequestContext
:param instance: Instance object being live migrated
:param source_bdms: BlockDeviceMappingList representing the attached
volumes with connection_info set for the source host
"""
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in source_bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
# If it does, we need to handle it gracefully so that the call
# to post_live_migration_at_destination - where we set instance
# host and task state - still happens. We need to rethink the
# current approach of setting instance host and task state
# AFTER a whole bunch of things that could fail in unhandled
# ways, but that is left as a TODO(artom).
try:
if bdm.attachment_id is None:
# Prior to cinder v3.44:
# We don't want to actually mark the volume detached,
# or delete the bdm, just remove the connection from
# this host.
#
# remove the volume connection without detaching from
# hypervisor because the instance is not running
# anymore on the current host
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
else:
# cinder v3.44 api flow - delete the old attachment
# for the source host
self.volume_api.attachment_delete(context,
bdm.attachment_id)
except Exception as e:
if bdm.attachment_id is None:
LOG.error('Connection for volume %s not terminated on '
'source host %s during post_live_migration: '
'%s', bdm.volume_id, self.host,
six.text_type(e), instance=instance)
else:
LOG.error('Volume attachment %s not deleted on source '
'host %s during post_live_migration: %s',
bdm.attachment_id, self.host,
six.text_type(e), instance=instance)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance, dest,
@ -7571,47 +7623,9 @@ class ComputeManager(manager.Manager):
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in source_bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
# If it does, we need to handle it gracefully so that the call
# to post_live_migration_at_destination - where we set instance
# host and task state - still happens. We need to rethink the
# current approach of setting instance host and task state
# AFTER a whole bunch of things that could fail in unhandled
# ways, but that is left as a TODO(artom).
try:
if bdm.attachment_id is None:
# Prior to cinder v3.44:
# We don't want to actually mark the volume detached,
# or delete the bdm, just remove the connection from
# this host.
#
# remove the volume connection without detaching from
# hypervisor because the instance is not running
# anymore on the current host
self.volume_api.terminate_connection(ctxt,
bdm.volume_id,
connector)
else:
# cinder v3.44 api flow - delete the old attachment
# for the source host
self.volume_api.attachment_delete(ctxt,
bdm.attachment_id)
except Exception as e:
if bdm.attachment_id is None:
LOG.error('Connection for volume %s not terminated on '
'source host %s during post_live_migration: '
'%s', bdm.volume_id, self.host,
six.text_type(e), instance=instance)
else:
LOG.error('Volume attachment %s not deleted on source '
'host %s during post_live_migration: %s',
bdm.attachment_id, self.host,
six.text_type(e), instance=instance)
# Disconnect volumes from this (the source) host.
self._post_live_migration_remove_source_vol_connections(
ctxt, instance, source_bdms)
# Releasing vlan.
# (not necessary in current implementation?)

View File

@ -6658,9 +6658,6 @@ class ComputeTestCase(BaseTestCase,
'state_description': 'migrating',
'state': power_state.PAUSED},
ctxt=c)
instance.update({'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
instance.save()
bdms = block_device_obj.block_device_make_list(c,
[fake_block_device.FakeDbBlockDeviceDict({
@ -6672,29 +6669,16 @@ class ComputeTestCase(BaseTestCase,
])
with test.nested(
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance'),
mock.patch.object(self.compute,
'_get_instance_block_device_info'),
mock.patch.object(self.compute.driver, 'get_volume_connector'),
mock.patch.object(cinder.API, 'terminate_connection'),
) as (
migrate_instance_start, post_live_migration_at_destination,
setup_networks_on_host, clear_events_for_instance,
get_instance_volume_block_device_info, get_volume_connector,
get_volume_connector,
terminate_connection,
):
get_volume_connector.return_value = 'fake-connector'
self.compute._post_live_migration(c, instance, 'dest_host',
migrate_data=mock.MagicMock(),
source_bdms=bdms)
self.compute._post_live_migration_remove_source_vol_connections(
c, instance, bdms)
terminate_connection.assert_called_once_with(
c, uuids.volume_id, 'fake-connector')

View File

@ -9387,52 +9387,17 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid})
vol_bdm.attachment_id = uuids.attachment
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.migration = objects.Migration(uuid=uuids.migration,
dest_node=instance.node,
source_node='src')
image_bdm.attachment_id = uuids.attachment3
@mock.patch.object(instance, 'get_network_info')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
@mock.patch.object(migrate_data.migration, 'save',
new=lambda: None)
@mock.patch.object(self.compute.reportclient,
'get_allocations_for_consumer_by_provider')
@mock.patch.object(vol_bdm, 'save')
@mock.patch.object(self.compute, 'update_available_resource')
@mock.patch.object(self.compute.volume_api, 'attachment_delete')
@mock.patch.object(self.compute, '_get_instance_block_device_info')
@mock.patch.object(self.compute, 'compute_rpcapi')
@mock.patch.object(self.compute, 'driver')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
def _test(mock_net_api, mock_notify, mock_driver,
mock_rpc, mock_get_bdm_info, mock_attach_delete,
mock_update_resource, mock_bdm_save, mock_ga,
mock_clean, mock_notify_action, mock_get_nw_info):
self._mock_rt()
def _test(mock_attach_delete):
bdms = objects.BlockDeviceMappingList(objects=[vol_bdm, image_bdm])
self.compute._post_live_migration(self.context,
instance,
dest_host,
migrate_data=migrate_data,
source_bdms=bdms)
self.compute._post_live_migration_remove_source_vol_connections(
self.context, instance, bdms)
mock_attach_delete.assert_called_once_with(
self.context, uuids.attachment)
mock_clean.assert_called_once_with(self.context, instance.uuid)
mock_notify_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='live_migration_post', phase='start'),
mock.call(self.context, instance, 'fake-mini',
action='live_migration_post', phase='end')])
self.assertEqual(2, mock_notify_action.call_count)
mock_get_nw_info.assert_called()
_test()