Merge "Revert "Revert resize: wait for events according to hybrid plug"" into stable/xena

This commit is contained in:
Zuul 2022-04-01 01:52:56 +00:00 committed by Gerrit Code Review
commit af3d43d71d
10 changed files with 52 additions and 286 deletions

View File

@ -605,8 +605,7 @@
- nova-lvm
- nova-multi-cell
- nova-next
- nova-ovs-hybrid-plug:
voting: false
- nova-ovs-hybrid-plug
- nova-tox-validate-backport:
voting: false
- nova-tox-functional-centos8-py36

View File

@ -4803,8 +4803,18 @@ class ComputeManager(manager.Manager):
self.host, instance=instance)
# TODO(mriedem): Calculate provider mappings when we support
# cross-cell resize/migrate with ports having resource requests.
self._finish_revert_resize_network_migrate_finish(
ctxt, instance, migration, provider_mappings=None)
# NOTE(hanrong): we need to change migration.dest_compute to
# source host temporarily.
# "network_api.migrate_instance_finish" will setup the network
# for the instance on the destination host. For revert resize,
# the instance will back to the source host, the setup of the
# network for instance should be on the source host. So set
# the migration.dest_compute to source host at here.
with utils.temporary_mutation(
migration, dest_compute=migration.source_compute
):
self.network_api.migrate_instance_finish(
ctxt, instance, migration, provider_mappings=None)
network_info = self.network_api.get_instance_nw_info(ctxt, instance)
# Remember that prep_snapshot_based_resize_at_source destroyed the
@ -4896,50 +4906,6 @@ class ComputeManager(manager.Manager):
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute, request_spec)
def _finish_revert_resize_network_migrate_finish(
self, context, instance, migration, provider_mappings):
"""Causes port binding to be updated. In some Neutron or port
configurations - see NetworkModel.get_bind_time_events() - we
expect the vif-plugged event from Neutron immediately and wait for it.
The rest of the time, the event is expected further along in the
virt driver, so we don't wait here.
:param context: The request context.
:param instance: The instance undergoing the revert resize.
:param migration: The Migration object of the resize being reverted.
:param provider_mappings: a dict of list of resource provider uuids
keyed by port uuid
:raises: eventlet.timeout.Timeout or
exception.VirtualInterfacePlugException.
"""
network_info = instance.get_network_info()
events = []
deadline = CONF.vif_plugging_timeout
if deadline and network_info:
events = network_info.get_bind_time_events(migration)
if events:
LOG.debug('Will wait for bind-time events: %s', events)
error_cb = self._neutron_failed_migration_callback
try:
with self.virtapi.wait_for_instance_event(instance, events,
deadline=deadline,
error_callback=error_cb):
# NOTE(hanrong): we need to change migration.dest_compute to
# source host temporarily.
# "network_api.migrate_instance_finish" will setup the network
# for the instance on the destination host. For revert resize,
# the instance will back to the source host, the setup of the
# network for instance should be on the source host. So set
# the migration.dest_compute to source host at here.
with utils.temporary_mutation(
migration, dest_compute=migration.source_compute):
self.network_api.migrate_instance_finish(
context, instance, migration, provider_mappings)
except eventlet.timeout.Timeout:
with excutils.save_and_reraise_exception():
LOG.error('Timeout waiting for Neutron events: %s', events,
instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@ -4997,8 +4963,18 @@ class ComputeManager(manager.Manager):
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
self._finish_revert_resize_network_migrate_finish(
context, instance, migration, provider_mappings)
# NOTE(hanrong): we need to change migration.dest_compute to
# source host temporarily. "network_api.migrate_instance_finish"
# will setup the network for the instance on the destination host.
# For revert resize, the instance will back to the source host, the
# setup of the network for instance should be on the source host.
# So set the migration.dest_compute to source host at here.
with utils.temporary_mutation(
migration, dest_compute=migration.source_compute):
self.network_api.migrate_instance_finish(context,
instance,
migration,
provider_mappings)
network_info = self.network_api.get_instance_nw_info(context,
instance)
@ -5075,8 +5051,7 @@ class ComputeManager(manager.Manager):
# the provider mappings. If the instance has ports with
# resource request then the port update will fail in
# _update_port_binding_for_instance() called via
# _finish_revert_resize_network_migrate_finish() in
# finish_revert_resize.
# migrate_instance_finish() in finish_revert_resize.
provider_mappings = None
return provider_mappings
@ -8287,8 +8262,8 @@ class ComputeManager(manager.Manager):
return migrate_data
@staticmethod
def _neutron_failed_migration_callback(event_name, instance):
msg = ('Neutron reported failure during migration '
def _neutron_failed_live_migration_callback(event_name, instance):
msg = ('Neutron reported failure during live migration '
'with %(event)s for instance %(uuid)s')
msg_args = {'event': event_name, 'uuid': instance.uuid}
if CONF.vif_plugging_is_fatal:
@ -8384,7 +8359,7 @@ class ComputeManager(manager.Manager):
disk = None
deadline = CONF.vif_plugging_timeout
error_cb = self._neutron_failed_migration_callback
error_cb = self._neutron_failed_live_migration_callback
# In order to avoid a race with the vif plugging that the virt
# driver does on the destination host, we register our events
# to wait for before calling pre_live_migration. Then if the

View File

@ -478,17 +478,6 @@ class VIF(Model):
'ips': ips}
return []
def has_bind_time_event(self, migration):
"""Returns whether this VIF's network-vif-plugged external event will
be sent by Neutron at "bind-time" - in other words, as soon as the port
binding is updated. This is in the context of updating the port binding
to a host that already has the instance in a shutoff state - in
practice, this means reverting either a cold migration or a
non-same-host resize.
"""
return (self.is_hybrid_plug_enabled() and not
migration.is_same_host())
@property
def has_live_migration_plug_time_event(self):
"""Returns whether this VIF's network-vif-plugged external event will
@ -557,13 +546,6 @@ class NetworkInfo(list):
def json(self):
return jsonutils.dumps(self)
def get_bind_time_events(self, migration):
"""Returns a list of external events for any VIFs that have
"bind-time" events during cold migration.
"""
return [('network-vif-plugged', vif['id'])
for vif in self if vif.has_bind_time_event(migration)]
def get_live_migration_plug_time_events(self):
"""Returns a list of external events for any VIFs that have
"plug-time" events during live migration.
@ -571,13 +553,6 @@ class NetworkInfo(list):
return [('network-vif-plugged', vif['id'])
for vif in self if vif.has_live_migration_plug_time_event]
def get_plug_time_events(self, migration):
"""Returns a list of external events for any VIFs that have
"plug-time" events during cold migration.
"""
return [('network-vif-plugged', vif['id'])
for vif in self if not vif.has_bind_time_event(migration)]
def has_port_with_allocation(self):
return any(vif.has_allocation() for vif in self)

View File

@ -202,9 +202,6 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
def instance(self, instance):
self._cached_instance = instance
def is_same_host(self):
return self.source_compute == self.dest_compute
@property
def is_live_migration(self):
return self.migration_type == fields.MigrationType.LIVE_MIGRATION

View File

@ -5801,9 +5801,7 @@ class ComputeTestCase(BaseTestCase,
old_vm_state = vm_states.ACTIVE
else:
old_vm_state = vm_states.STOPPED
params = {'vm_state': old_vm_state,
'info_cache': objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([]))}
params = {'vm_state': old_vm_state}
instance = self._create_fake_instance_obj(params)
request_spec = objects.RequestSpec()
@ -5956,9 +5954,7 @@ class ComputeTestCase(BaseTestCase,
def fake(*args, **kwargs):
pass
params = {'info_cache': objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([]))}
instance = self._create_fake_instance_obj(params)
instance = self._create_fake_instance_obj()
request_spec = objects.RequestSpec()
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)

View File

@ -6045,86 +6045,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, fake_instance, fake_bdm)
block_stats.assert_called_once_with(fake_instance, 'vda')
def _test_finish_revert_resize_network_migrate_finish(
self, vifs, events, migration=None):
instance = fake_instance.fake_instance_obj(self.context)
instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo(vifs))
if migration is None:
migration = objects.Migration(
source_compute='fake-source',
dest_compute='fake-dest')
def fake_migrate_instance_finish(
context, instance, migration, mapping):
# NOTE(artom) This looks weird, but it's checking that the
# temporaty_mutation() context manager did its job.
self.assertEqual(migration.dest_compute, migration.source_compute)
with test.nested(
mock.patch.object(self.compute.virtapi,
'wait_for_instance_event'),
mock.patch.object(self.compute.network_api,
'migrate_instance_finish',
side_effect=fake_migrate_instance_finish)
) as (mock_wait, mock_migrate_instance_finish):
self.compute._finish_revert_resize_network_migrate_finish(
self.context, instance, migration, mock.sentinel.mapping)
mock_wait.assert_called_once_with(
instance, events, deadline=CONF.vif_plugging_timeout,
error_callback=self.compute._neutron_failed_migration_callback)
mock_migrate_instance_finish.assert_called_once_with(
self.context, instance, migration, mock.sentinel.mapping)
def test_finish_revert_resize_network_migrate_finish_wait(self):
"""Test that we wait for bind-time events if we have a hybrid-plugged
VIF.
"""
self._test_finish_revert_resize_network_migrate_finish(
[network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
network_model.VIF(id=uuids.normal_vif,
details={'ovs_hybrid_plug': False})],
[('network-vif-plugged', uuids.hybrid_vif)])
def test_finish_revert_resize_network_migrate_finish_same_host(self):
"""Test that we're not waiting for any events if its a same host
resize revert.
"""
migration = objects.Migration(
source_compute='fake-source', dest_compute='fake-source')
self._test_finish_revert_resize_network_migrate_finish(
[network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
network_model.VIF(id=uuids.normal_vif,
details={'ovs_hybrid_plug': False})],
[], migration=migration
)
def test_finish_revert_resize_network_migrate_finish_dont_wait(self):
"""Test that we're not waiting for any events if we don't have any
hybrid-plugged VIFs.
"""
self._test_finish_revert_resize_network_migrate_finish(
[network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': False}),
network_model.VIF(id=uuids.normal_vif,
details={'ovs_hybrid_plug': False})],
[])
def test_finish_revert_resize_network_migrate_finish_no_vif_timeout(self):
"""Test that we're not waiting for any events if vif_plugging_timeout
is 0.
"""
self.flags(vif_plugging_timeout=0)
self._test_finish_revert_resize_network_migrate_finish(
[network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
network_model.VIF(id=uuids.normal_vif,
details={'ovs_hybrid_plug': True})],
[])
@mock.patch('nova.compute.manager.LOG')
def test_cache_images_unsupported(self, mock_log):
r = self.compute.cache_images(self.context, ['an-image'])
@ -8871,8 +8791,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
do_finish_revert_resize()
@mock.patch.object(objects.Instance, 'drop_migration_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_finish_revert_resize_network_migrate_finish')
@mock.patch('nova.network.neutron.API.migrate_instance_finish')
@mock.patch('nova.scheduler.utils.'
'fill_provider_mapping_based_on_allocation')
@mock.patch('nova.compute.manager.ComputeManager._revert_allocation')
@ -8886,7 +8805,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
def test_finish_revert_resize_recalc_group_rp_mapping(
self, mock_get_bdms, mock_notify_action, mock_notify_usage,
mock_set_instance_info, mock_instance_save, mock_revert_allocation,
mock_fill_provider_mapping, mock_network_migrate_finish,
mock_fill_provider_mapping, mock_migrate_instance_finish,
mock_drop_migration_context):
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
@ -8903,8 +8822,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock.sentinel.allocation)
@mock.patch.object(objects.Instance, 'drop_migration_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_finish_revert_resize_network_migrate_finish')
@mock.patch('nova.network.neutron.API.migrate_instance_finish')
@mock.patch('nova.scheduler.utils.'
'fill_provider_mapping_based_on_allocation')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@ -8921,7 +8839,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self, mock_get_bdms, mock_notify_action, mock_notify_usage,
mock_set_instance_info, mock_instance_save, mock_revert_allocation,
mock_get_allocations, mock_fill_provider_mapping,
mock_network_migrate_finish, mock_drop_migration_context):
mock_migrate_instance_finish, mock_drop_migration_context):
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
mock_get_allocations.return_value = mock.sentinel.allocation
@ -8935,7 +8853,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_get_allocations.assert_not_called()
mock_fill_provider_mapping.assert_not_called()
mock_network_migrate_finish.assert_called_once_with(
mock_migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration, None)
def test_confirm_resize_deletes_allocations_and_update_scheduler(self):
@ -12120,8 +12038,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.manager.ComputeManager.'
'_update_volume_attachments')
@mock.patch('nova.compute.manager.ComputeManager.'
'_finish_revert_resize_network_migrate_finish')
@mock.patch('nova.network.neutron.API.migrate_instance_finish')
@mock.patch('nova.compute.manager.ComputeManager.'
'_get_instance_block_device_info')
@mock.patch('nova.objects.Instance.drop_migration_context')
@ -12131,7 +12048,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
'_complete_volume_attachments')
def test_finish_revert_snapshot_based_resize_at_source(
self, mock_complete_attachments, mock_update_after_spawn,
mock_drop_mig_context, mock_get_bdi, mock_net_migrate_finish,
mock_drop_mig_context, mock_get_bdi, mock_migrate_instance_finish,
mock_update_attachments, mock_get_bdms, mock_revert_allocs,
mock_inst_save):
"""Happy path test for finish_revert_snapshot_based_resize_at_source.
@ -12171,7 +12088,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_update_attachments.assert_called_once_with(
self.context, self.instance, mock_get_bdms.return_value)
# Assert that port bindings were updated to point at the source host.
mock_net_migrate_finish.assert_called_once_with(
mock_migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration,
provider_mappings=None)
# Assert the driver finished reverting the migration.
@ -12196,8 +12113,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.manager.ComputeManager.'
'_update_volume_attachments')
@mock.patch('nova.compute.manager.ComputeManager.'
'_finish_revert_resize_network_migrate_finish')
@mock.patch('nova.network.neutron.API.migrate_instance_finish')
@mock.patch('nova.compute.manager.ComputeManager.'
'_get_instance_block_device_info')
@mock.patch('nova.objects.Instance.drop_migration_context')
@ -12208,7 +12124,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
side_effect=test.TestingException('vol complete failed'))
def test_finish_revert_snapshot_based_resize_at_source_driver_fails(
self, mock_complete_attachments, mock_update_after_spawn,
mock_drop_mig_context, mock_get_bdi, mock_net_migrate_finish,
mock_drop_mig_context, mock_get_bdi, mock_migrate_instance_finish,
mock_update_attachments, mock_get_bdms, mock_revert_allocs,
mock_inst_save):
"""Test for _finish_revert_snapshot_based_resize_at_source where the
@ -12262,7 +12178,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_update_attachments.assert_called_once_with(
self.context, self.instance, mock_get_bdms.return_value)
# Assert that port bindings were updated to point at the source host.
mock_net_migrate_finish.assert_called_once_with(
mock_migrate_instance_finish.assert_called_once_with(
self.context, self.instance, self.migration,
provider_mappings=None)
# Assert final DB cleanup for the instance did not happen.

View File

@ -19,7 +19,6 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova.network import model
from nova import objects
from nova import test
from nova.tests.unit import fake_network_cache_model
from nova.virt import netutils
@ -855,34 +854,6 @@ iface eth1 inet static
libvirt_virt_type='lxc')
self.assertEqual(expected, template)
def test_get_events(self):
network_info = model.NetworkInfo([
model.VIF(
id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
model.VIF(
id=uuids.normal_vif,
details={'ovs_hybrid_plug': False})])
same_host = objects.Migration(source_compute='fake-host',
dest_compute='fake-host')
diff_host = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2')
# Same-host migrations will have all events be plug-time.
self.assertCountEqual(
[('network-vif-plugged', uuids.normal_vif),
('network-vif-plugged', uuids.hybrid_vif)],
network_info.get_plug_time_events(same_host))
# Same host migration will have no plug-time events.
self.assertEqual([], network_info.get_bind_time_events(same_host))
# Diff-host migration + OVS hybrid plug = bind-time events
self.assertEqual(
[('network-vif-plugged', uuids.hybrid_vif)],
network_info.get_bind_time_events(diff_host))
# Diff-host migration + normal OVS = plug-time events
self.assertEqual(
[('network-vif-plugged', uuids.normal_vif)],
network_info.get_plug_time_events(diff_host))
def test_has_port_with_allocation(self):
network_info = model.NetworkInfo([])
self.assertFalse(network_info.has_port_with_allocation())

View File

@ -359,14 +359,6 @@ class _TestMigrationObject(object):
mig = objects.Migration.get_by_uuid(self.context, uuidsentinel.mig)
self.assertEqual(uuidsentinel.mig, mig.uuid)
def test_is_same_host(self):
same_host = objects.Migration(source_compute='fake-host',
dest_compute='fake-host')
diff_host = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2')
self.assertTrue(same_host.is_same_host())
self.assertFalse(diff_host.is_same_host())
class TestMigrationObject(test_objects._LocalTest,
_TestMigrationObject):

View File

@ -19152,10 +19152,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(0, domain.resume.call_count)
def _test_create_guest_with_network__events(
self, neutron_failure=None, power_on=True, events=None,
self, neutron_failure=None, power_on=True,
):
generated_events = []
events_passed_to_prepare = []
def wait_timeout():
event = mock.MagicMock()
@ -19173,7 +19172,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
m.event_name = '%s-%s' % (name, tag)
m.wait.side_effect = wait_timeout
generated_events.append(m)
events_passed_to_prepare.append((name, tag))
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
@ -19192,8 +19190,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_create(cleanup, create, plug_vifs):
domain = drvr._create_guest_with_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on,
external_events=events)
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
@ -19208,9 +19205,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_create()
if events and CONF.vif_plugging_timeout:
self.assertEqual(events_passed_to_prepare, events)
elif CONF.vif_plugging_timeout and power_on:
if CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged', uuids.vif_1),
mock.call(instance, 'network-vif-plugged', uuids.vif_2)])
@ -19226,15 +19221,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_create_guest_with_network__events_neutron(self):
self._test_create_guest_with_network__events()
def test_create_guest_with_network__events_passed_in(self):
self._test_create_guest_with_network__events(
events=[('network-vif-plugged', uuids.fake_vif)])
def test_create_guest_with_network__events_passed_in_0_timeout(self):
self.flags(vif_plugging_timeout=0)
self._test_create_guest_with_network__events(
events=[('network-vif-plugged', uuids.fake_vif)])
def test_create_guest_with_network_events_neutron_power_off(self):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_guest_with_network__events(power_on=False)
@ -22222,7 +22208,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_restore_vtpm.assert_not_called()
mock_delete_vtpm.assert_not_called()
def _test_finish_revert_migration(self, power_on, migration):
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
@ -22239,14 +22225,11 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def fake_create_guest_with_network(
_self, context, xml, instance, network_info, block_device_info,
power_on=None, vifs_already_plugged=None, post_xml_callback=None,
external_events=None, cleanup_instance_dir=False,
cleanup_instance_disks=False,
cleanup_instance_dir=False, cleanup_instance_disks=False,
):
self.fake_create_guest_called = True
self.assertEqual(powered_on, power_on)
self.assertFalse(vifs_already_plugged)
self.assertEqual(self.events_passed_to_fake_create,
external_events)
return mock.MagicMock()
def fake_get_info(self, instance):
@ -22285,51 +22268,22 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
f = open(libvirt_xml_path, 'w')
f.close()
network_info = network_model.NetworkInfo(
[network_model.VIF(id=uuids.normal_vif),
network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True})])
if migration.is_same_host():
# Same host is all plug-time
self.events_passed_to_fake_create = [
('network-vif-plugged', uuids.normal_vif),
('network-vif-plugged', uuids.hybrid_vif)]
else:
# For different host migration only non-hybrid plug
# ("normal") VIFs "emit" plug-time events.
self.events_passed_to_fake_create = [
('network-vif-plugged', uuids.normal_vif)]
with mock.patch.object(
self.drvr, '_get_all_assigned_mediated_devices',
return_value={}
) as mock_get_a_mdevs:
self.drvr.finish_revert_migration(
self.context, instance, network_info, migration,
power_on=power_on)
self.context, instance, network_model.NetworkInfo(),
objects.Migration(), power_on=power_on)
self.assertTrue(self.fake_create_guest_called)
mock_get_a_mdevs.assert_called_once_with(mock.ANY)
def test_finish_revert_migration_power_on(self):
migration = objects.Migration(id=42, source_compute='fake-host1',
dest_compute='fake-host2')
self._test_finish_revert_migration(power_on=True, migration=migration)
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
migration = objects.Migration(id=42, source_compute='fake-host1',
dest_compute='fake-host2')
self._test_finish_revert_migration(power_on=False, migration=migration)
def test_finish_revert_migration_same_host(self):
migration = objects.Migration(id=42, source_compute='fake-host',
dest_compute='fake-host')
self._test_finish_revert_migration(power_on=True, migration=migration)
def test_finish_revert_migration_diff_host(self):
migration = objects.Migration(id=42, source_compute='fake-host1',
dest_compute='fake-host2')
self._test_finish_revert_migration(power_on=True, migration=migration)
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):

View File

@ -11238,18 +11238,9 @@ class LibvirtDriver(driver.ComputeDriver):
instance.image_meta,
block_device_info=block_device_info,
mdevs=mdevs)
# NOTE(artom) In some Neutron or port configurations we've already
# waited for vif-plugged events in the compute manager's
# _finish_revert_resize_network_migrate_finish(), right after updating
# the port binding. For any ports not covered by those "bind-time"
# events, we wait for "plug-time" events here.
events = network_info.get_plug_time_events(migration)
if events:
LOG.debug('Instance is using plug-time events: %s', events,
instance=instance)
self._create_guest_with_network(
context, xml, instance, network_info, block_device_info,
power_on=power_on, external_events=events)
power_on=power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(