Set migrate_data.vifs only when using multiple port bindings

In the rocky cycle nova was enhanced to support the multiple
port binding live migration workflow when neutron supports
the binding-extended API extension.
When the migration_data object was extended to support
multiple port bindings, populating the vifs field was used
as a sentinel to indicate that the new workflow should
be used.

In the train release
I734cc01dce13f9e75a16639faf890ddb1661b7eb
(SR-IOV Live migration indirect port support)
broke the semantics of the migrate_data object by
unconditionally populating the vifs field

This change restores the rocky semantics, which are depended
on by several parts of the code base, by only conditionally
populating vifs if neutron supports multiple port bindings.

Co-Authored-By: Sean Mooney <work@seanmooney.info>
Change-Id: Ia00277ac8a68a635db85f9e0ce2c6d8df396e0d8
Closes-Bug: #1888395
This commit is contained in:
root 2020-07-18 00:32:54 -04:00 committed by Sean Mooney
parent 71bc6fc9b8
commit b8f3be6b3c
5 changed files with 110 additions and 23 deletions

View File

@ -7650,15 +7650,18 @@ class ComputeManager(manager.Manager):
LOG.info('Destination was ready for NUMA live migration, '
'but source is either too old, or is set to an '
'older upgrade level.', instance=instance)
# Create migrate_data vifs
migrate_data.vifs = \
migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info())
# Claim PCI devices for VIFs on destination (if needed)
port_id_to_pci = self._claim_pci_for_instance_vifs(ctxt, instance)
# Update migrate VIFs with the newly claimed PCI devices
self._update_migrate_vifs_profile_with_pci(migrate_data.vifs,
port_id_to_pci)
if self.network_api.supports_port_binding_extension(ctxt):
# Create migrate_data vifs
migrate_data.vifs = \
migrate_data_obj.\
VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info())
# Claim PCI devices for VIFs on destination (if needed)
port_id_to_pci = self._claim_pci_for_instance_vifs(
ctxt, instance)
# Update migrate VIFs with the newly claimed PCI devices
self._update_migrate_vifs_profile_with_pci(
migrate_data.vifs, port_id_to_pci)
finally:
self.driver.cleanup_live_migration_destination_check(ctxt,
dest_check_data)
@ -7826,8 +7829,12 @@ class ComputeManager(manager.Manager):
# determine if it should wait for a 'network-vif-plugged' event
# from neutron before starting the actual guest transfer in the
# hypervisor
using_multiple_port_bindings = (
'vifs' in migrate_data and migrate_data.vifs)
migrate_data.wait_for_vif_plugged = (
CONF.compute.live_migration_wait_for_vif_plug)
CONF.compute.live_migration_wait_for_vif_plug and
using_multiple_port_bindings
)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,

View File

@ -12,6 +12,9 @@
import fixtures
from lxml import etree
from urllib import parse as urlparse
from nova import context
from nova.network import constants as neutron_constants
from nova.network import neutron
@ -67,6 +70,40 @@ class TestLiveMigrationWithoutMultiplePortBindings(
kB_mem=10740000)})
self.ctxt = context.get_admin_context()
# TODO(sean-k-mooney): remove this when it is part of ServersTestBase
self.useFixture(fixtures.MonkeyPatch(
'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3',
self._migrate_stub))
def _migrate_stub(self, domain, destination, params, flags):
"""Stub out migrateToURI3."""
src_hostname = domain._connection.hostname
dst_hostname = urlparse.urlparse(destination).netloc
# In a real live migration, libvirt and QEMU on the source and
# destination talk it out, resulting in the instance starting to exist
# on the destination. Fakelibvirt cannot do that, so we have to
# manually create the "incoming" instance on the destination
# fakelibvirt.
dst = self.computes[dst_hostname]
dst.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
src = self.computes[src_hostname]
conn = src.driver._host.get_connection()
# because migrateToURI3 is spawned in a background thread, this method
# does not block the upper nova layers. Because we don't want nova to
# think the live migration has finished until this method is done, the
# last thing we do is make fakelibvirt's Domain.jobStats() return
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_live_migrate(self):
server = self._create_server(
@ -86,14 +123,7 @@ class TestLiveMigrationWithoutMultiplePortBindings(
}
)
# FIXME(sean-k-mooney): this should succeed but because of bug #188395
# it will fail.
# self._wait_for_server_parameter(
# server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
# because of the bug the migration will fail in pre_live_migrate so
# the vm should still be active on the start_host
self._wait_for_server_parameter(
server, {'OS-EXT-SRV-ATTR:host': 'start_host', 'status': 'ACTIVE'})
server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
self.assertIn(msg, self.stdlog.logger.output)
self.assertNotIn(msg, self.stdlog.logger.output)

View File

@ -18,6 +18,7 @@
"""Tests for compute service."""
import datetime
import fixtures as std_fixtures
from itertools import chain
import operator
import sys
@ -6133,13 +6134,19 @@ class ComputeTestCase(BaseTestCase,
return fake_network.fake_get_instance_nw_info(self)
self.stub_out('nova.network.neutron.API.get_instance_nw_info', stupid)
self.useFixture(
std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
fake_notifier.NOTIFICATIONS = []
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False)
vifs = migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
stupid())
migrate_data.vifs = vifs
mock_pre.return_value = migrate_data
with mock.patch.object(self.compute.network_api,

View File

@ -15,6 +15,7 @@
import contextlib
import copy
import datetime
import fixtures as std_fixtures
import time
from cinderclient import exceptions as cinder_exception
@ -3374,20 +3375,48 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_event.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
CONF.host, instance.uuid, graceful_exit=False)
return result
def test_check_can_live_migrate_destination_success(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_fail(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
def test_check_can_live_migrate_destination_contins_vifs(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertIn('vifs', migrate_data)
self.assertIsNotNone(migrate_data.vifs)
def test_check_can_live_migrate_destination_no_binding_extended(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: False))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertNotIn('vifs', migrate_data)
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
def test_dest_can_numa_live_migrate(self):

View File

@ -0,0 +1,14 @@
---
fixes:
- |
In the Rocky (18.0.0) release support was added to nova to use neutron's
multiple port binding feature when the binding-extended API extension
is available. In the Train (20.0.0) release the SR-IOV live migration
feature broke the semantics of the vifs field in the ``migration_data``
object that signals if the new multiple port binding workflow should
be used by always populating it even when the ``binding-extended`` API
extension is not present. This broke live migration for any deployment
that did not support the optional ``binding-extended`` API extension.
The Rocky behavior has now been restored enabling live migration
using the single port binding workflow when multiple port bindings
are not available.