Browse Source

Merge "Set migrate_data.vifs only when using multiple port bindings" into stable/train

stable/train
Zuul 1 week ago
committed by Gerrit Code Review
parent
commit
b095ed1cf3
7 changed files with 140 additions and 23 deletions
  1. +17
    -10
      nova/compute/manager.py
  2. +50
    -9
      nova/tests/functional/regressions/test_bug_1888395.py
  3. +10
    -0
      nova/tests/unit/compute/test_compute.py
  4. +32
    -3
      nova/tests/unit/compute/test_compute_mgr.py
  5. +12
    -1
      nova/tests/unit/virt/libvirt/fakelibvirt.py
  6. +5
    -0
      nova/tests/unit/virt/test_virt_drivers.py
  7. +14
    -0
      releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml

+ 17
- 10
nova/compute/manager.py View File

@ -6832,15 +6832,18 @@ class ComputeManager(manager.Manager):
LOG.info('Destination was ready for NUMA live migration, '
'but source is either too old, or is set to an '
'older upgrade level.', instance=instance)
# Create migrate_data vifs
migrate_data.vifs = \
migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info())
# Claim PCI devices for VIFs on destination (if needed)
port_id_to_pci = self._claim_pci_for_instance_vifs(ctxt, instance)
# Update migrate VIFs with the newly claimed PCI devices
self._update_migrate_vifs_profile_with_pci(migrate_data.vifs,
port_id_to_pci)
if self.network_api.supports_port_binding_extension(ctxt):
# Create migrate_data vifs
migrate_data.vifs = \
migrate_data_obj.\
VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info())
# Claim PCI devices for VIFs on destination (if needed)
port_id_to_pci = self._claim_pci_for_instance_vifs(
ctxt, instance)
# Update migrate VIFs with the newly claimed PCI devices
self._update_migrate_vifs_profile_with_pci(
migrate_data.vifs, port_id_to_pci)
finally:
self.driver.cleanup_live_migration_destination_check(ctxt,
dest_check_data)
@ -7008,8 +7011,12 @@ class ComputeManager(manager.Manager):
# determine if it should wait for a 'network-vif-plugged' event
# from neutron before starting the actual guest transfer in the
# hypervisor
using_multiple_port_bindings = (
'vifs' in migrate_data and migrate_data.vifs)
migrate_data.wait_for_vif_plugged = (
CONF.compute.live_migration_wait_for_vif_plug)
CONF.compute.live_migration_wait_for_vif_plug and
using_multiple_port_bindings
)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,


+ 50
- 9
nova/tests/functional/regressions/test_bug_1888395.py View File

@ -13,6 +13,9 @@
import fixtures
import mock
from lxml import etree
import six.moves.urllib.parse as urlparse
from nova import context
from nova.network.neutronv2 import api as neutron
from nova.network.neutronv2 import constants as neutron_constants
@ -20,6 +23,7 @@ from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
from nova.tests.unit.virt.libvirt import fake_os_brick_connector
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import guest as libvirt_guest
class TestLiveMigrationWithoutMultiplePortBindings(
@ -83,8 +87,51 @@ class TestLiveMigrationWithoutMultiplePortBindings(
self.computes[host] = compute
self.ctxt = context.get_admin_context()
# TODO(sean-k-mooney): remove this when it is part of ServersTestBase
self.useFixture(fixtures.MonkeyPatch(
'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3',
self._migrate_stub))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.migration._update_serial_xml',
self._update_serial_xml_stub))
def _update_serial_xml_stub(self, xml_doc, migrate_data):
return xml_doc
def _migrate_stub(self, domain, destination, params, flags):
"""Stub out migrateToURI3."""
src_hostname = domain._connection.hostname
dst_hostname = urlparse.urlparse(destination).netloc
# In a real live migration, libvirt and QEMU on the source and
# destination talk it out, resulting in the instance starting to exist
# on the destination. Fakelibvirt cannot do that, so we have to
# manually create the "incoming" instance on the destination
# fakelibvirt.
dst = self.computes[dst_hostname]
dst.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
src = self.computes[src_hostname]
conn = src.driver._host.get_connection()
# because migrateToURI3 is spawned in a background thread, this method
# does not block the upper nova layers. Because we don't want nova to
# think the live migration has finished until this method is done, the
# last thing we do is make fakelibvirt's Domain.jobStats() return
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_live_migrate(self):
@mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
def test_live_migrate(self, mock_get_job_info):
mock_get_job_info.return_value = libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED)
flavors = self.api.get_flavors()
flavor = flavors[0]
server_req = self._build_minimal_create_server_request(
@ -108,15 +155,9 @@ class TestLiveMigrationWithoutMultiplePortBindings(
}
)
# FIXME(sean-k-mooney): this should succeed but because of bug #188395
# it will fail.
# self._wait_for_server_parameter(
# server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
# because of the bug the migration will fail in pre_live_migrate so
# the vm should still be active on the start_host
self._wait_for_server_parameter(
self.api, server,
{'OS-EXT-SRV-ATTR:host': 'start_host', 'status': 'ACTIVE'})
{'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
self.assertIn(msg, self.stdlog.logger.output)
self.assertNotIn(msg, self.stdlog.logger.output)

+ 10
- 0
nova/tests/unit/compute/test_compute.py View File

@ -18,6 +18,7 @@
"""Tests for compute service."""
import datetime
import fixtures as std_fixtures
from itertools import chain
import operator
import sys
@ -6142,9 +6143,15 @@ class ComputeTestCase(BaseTestCase,
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self)
if CONF.use_neutron:
self.stub_out(
'nova.network.neutronv2.api.API.get_instance_nw_info', stupid)
self.useFixture(
std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.'
'API.supports_port_binding_extension',
lambda *args: True))
else:
self.stub_out('nova.network.api.API.get_instance_nw_info', stupid)
@ -6155,6 +6162,9 @@ class ComputeTestCase(BaseTestCase,
fake_notifier.NOTIFICATIONS = []
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False)
vifs = migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
stupid())
migrate_data.vifs = vifs
mock_pre.return_value = migrate_data
with mock.patch.object(self.compute.network_api,


+ 32
- 3
nova/tests/unit/compute/test_compute_mgr.py View File

@ -15,6 +15,7 @@
import contextlib
import copy
import datetime
import fixtures as std_fixtures
import time
from cinderclient import exceptions as cinder_exception
@ -3195,20 +3196,48 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_event.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
CONF.host, instance.uuid)
return result
def test_check_can_live_migrate_destination_success(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_fail(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: True))
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
def test_check_can_live_migrate_destination_contins_vifs(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: True))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertIn('vifs', migrate_data)
self.assertIsNotNone(migrate_data.vifs)
def test_check_can_live_migrate_destination_no_binding_extended(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: False))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertNotIn('vifs', migrate_data)
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutronv2.api.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
def test_dest_can_numa_live_migrate(self):


+ 12
- 1
nova/tests/unit/virt/libvirt/fakelibvirt.py View File

@ -770,6 +770,7 @@ class Domain(object):
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
self._job_type = VIR_DOMAIN_JOB_UNBOUNDED
def _parse_definition(self, xml):
try:
@ -1237,7 +1238,17 @@ class Domain(object):
return [0] * 12
def jobStats(self, flags=0):
return {}
# NOTE(artom) By returning VIR_DOMAIN_JOB_UNBOUNDED, we're pretending a
# job is constantly running. Tests are expected to call the
# complete_job or fail_job methods when they're ready for jobs (read:
# live migrations) to "complete".
return {'type': self._job_type}
def complete_job(self):
self._job_type = VIR_DOMAIN_JOB_COMPLETED
def fail_job(self):
self._job_type = VIR_DOMAIN_JOB_FAILED
def injectNMI(self, flags=0):
return 0


+ 5
- 0
nova/tests/unit/virt/test_virt_drivers.py View File

@ -39,6 +39,7 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import block_device as driver_block_device
from nova.virt import event as virtevent
from nova.virt import fake
@ -617,6 +618,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
@mock.patch(
'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.jobStats',
new=mock.Mock(return_value={
'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED}))
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
fake_context = context.RequestContext('fake', 'fake')


+ 14
- 0
releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml View File

@ -0,0 +1,14 @@
---
fixes:
- |
In the Rocky (18.0.0) release support was added to nova to use neutron's
multiple port binding feature when the binding-extended API extension
is available. In the Train (20.0.0) release the SR-IOV live migration
feature broke the semantics of the vifs field in the ``migration_data``
object that signals if the new multiple port binding workflow should
be used by always populating it even when the ``binding-extended`` API
extension is not present. This broke live migration for any deployment
that did not support the optional ``binding-extended`` API extension.
The Rocky behavior has now been restored enabling live migration
using the single port binding workflow when multiple port bindings
are not available.

Loading…
Cancel
Save