libvirt: use dest host port bindings during pre_live_migration
This uses the destination host port binding information to plug vifs on the destination host during pre_live_migration. This gets "turned on" later in the series when conductor sets the migrate_data.vifs field. Part of blueprint neutron-new-port-binding-api Change-Id: Ia803978401a0a9e8e28af006ffda1f4897ecd9f0
This commit is contained in:
parent
5b554ed90d
commit
a81811e111
@ -12039,6 +12039,31 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
mock_sleep.assert_has_calls([mock.call(1)] * 2)
|
||||
self.assertEqual(2, mock_sleep.call_count)
|
||||
|
||||
def test_pre_live_migration_plug_vifs_with_dest_port_bindings(self):
|
||||
"""Tests that we use the LibvirtLiveMigrateData.vifs destination host
|
||||
port binding details when plugging VIFs during pre_live_migration.
|
||||
"""
|
||||
source_vif = network_model.VIF(
|
||||
id=uuids.port_id, type=network_model.VIF_TYPE_OVS,
|
||||
vnic_type=network_model.VNIC_TYPE_NORMAL, details={'foo': 'bar'},
|
||||
profile={'binding:host_id': 'fake-source-host'})
|
||||
migrate_vifs = [objects.VIFMigrateData(
|
||||
port_id=uuids.port_id, vnic_type=network_model.VNIC_TYPE_NORMAL,
|
||||
vif_type=network_model.VIF_TYPE_OVS, vif_details={'bar': 'baz'},
|
||||
profile={'binding:host_id': 'fake-dest-host'},
|
||||
host='fake-dest-host', source_vif=source_vif)]
|
||||
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
||||
vifs=migrate_vifs)
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
instance = objects.Instance()
|
||||
network_info = mock.NonCallableMock()
|
||||
with mock.patch.object(drvr, 'plug_vifs') as plug_vifs:
|
||||
drvr._pre_live_migration_plug_vifs(
|
||||
instance, network_info, migrate_data)
|
||||
expected_network_info = network_model.NetworkInfo([
|
||||
migrate_vifs[0].get_dest_vif()])
|
||||
plug_vifs.assert_called_once_with(instance, expected_network_info)
|
||||
|
||||
def test_pre_live_migration_image_not_created_with_shared_storage(self):
|
||||
migrate_data_set = [{'is_shared_block_storage': False,
|
||||
'is_shared_instance_path': True,
|
||||
|
@ -7562,16 +7562,27 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
if os.path.exists(instance_dir):
|
||||
shutil.rmtree(instance_dir)
|
||||
|
||||
def _pre_live_migration_plug_vifs(self, instance, network_info):
|
||||
def _pre_live_migration_plug_vifs(self, instance, network_info,
|
||||
migrate_data):
|
||||
# We call plug_vifs before the compute manager calls
|
||||
# ensure_filtering_rules_for_instance, to ensure bridge is set up
|
||||
# Retry operation is necessary because continuously request comes,
|
||||
# concurrent request occurs to iptables, then it complains.
|
||||
LOG.debug('Plugging VIFs before live migration.', instance=instance)
|
||||
if 'vifs' in migrate_data and migrate_data.vifs:
|
||||
LOG.debug('Plugging VIFs using destination host port bindings '
|
||||
'before live migration.', instance=instance)
|
||||
# Plug VIFs using destination host port binding information.
|
||||
vif_plug_nw_info = network_model.NetworkInfo([])
|
||||
for migrate_vif in migrate_data.vifs:
|
||||
vif_plug_nw_info.append(migrate_vif.get_dest_vif())
|
||||
else:
|
||||
LOG.debug('Plugging VIFs before live migration.',
|
||||
instance=instance)
|
||||
vif_plug_nw_info = network_info
|
||||
max_retry = CONF.live_migration_retry_count
|
||||
for cnt in range(max_retry):
|
||||
try:
|
||||
self.plug_vifs(instance, network_info)
|
||||
self.plug_vifs(instance, vif_plug_nw_info)
|
||||
break
|
||||
except processutils.ProcessExecutionError:
|
||||
if cnt == max_retry - 1:
|
||||
@ -7680,7 +7691,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
self._connect_volume(context, connection_info, instance,
|
||||
allow_native_luks=allow_native_luks)
|
||||
|
||||
self._pre_live_migration_plug_vifs(instance, network_info)
|
||||
self._pre_live_migration_plug_vifs(
|
||||
instance, network_info, migrate_data)
|
||||
|
||||
# Store server_listen and latest disk device info
|
||||
if not migrate_data:
|
||||
|
Loading…
Reference in New Issue
Block a user