Add source dev parsing for vdpa interfaces

This change extends the guest xml parsing such that
the source device path can be extreacted from interface
elements of type vdpa.

This is required to identify the interface to remove when
detaching a vdpa port from a domain.

This change fixes a latent bug in the libvirt fixutre
related to the domain xml generation for vdpa interfaces.

Change-Id: I5f41170e7038f4b872066de4b1ad509113034960
This commit is contained in:
Sean Mooney 2022-05-07 19:47:58 +03:00
parent ddcc286ee1
commit 6f1c7ab2e7
9 changed files with 135 additions and 71 deletions

View File

@ -5,6 +5,11 @@ Using ports vnic_type='vdpa'
Introduced support for vDPA.
.. versionadded:: 26.0.0 (Zed)
Added support for most instance move operations (except live migration),
and the interface attach/detach operations.
.. important::
The functionality described below is only supported by the
libvirt/KVM virt driver.
@ -63,7 +68,7 @@ vDPA support depends on kernel 5.7+, Libvirt 6.9.0+ and QEMU 5.1+.
vDPA lifecycle operations
~~~~~~~~~~~~~~~~~~~~~~~~~
At this time vDPA ports can only be added to a VM when it is first created.
To boot a VM with vDPA ports they must first be created in neutron.
To do this the normal SR-IOV workflow is used where by the port is first created
in neutron and passed into nova as part of the server create request.
@ -71,22 +76,3 @@ in neutron and passed into nova as part of the server create request.
openstack port create --network <my network> --vnic-type vdpa vdpa-port
openstack server create --flavor <my-flavor> --image <my-image> --port <vdpa-port uuid> vdpa-vm
When vDPA support was first introduced no move operations were supported.
As this documentation was added in the change that enabled some move operations
The following should be interpreted both as a retrospective and future looking
viewpoint and treated as a living document which will be updated as functionality evolves.
23.0.0: initial support is added for creating a VM with vDPA ports, move operations
are blocked in the API but implemented in code.
26.0.0: support for all move operation except live migration is tested and api blocks are removed.
25.x.y: (planned) api block removal backported to stable/Yoga
24.x.y: (planned) api block removal backported to stable/Xena
23.x.y: (planned) api block removal backported to stable/wallaby
26.0.0: (in progress) interface attach/detach, suspend/resume and hot plug live migration
are implemented to fully support all lifecycle operations on instances with vDPA ports.
.. note::
The ``(planned)`` and ``(in progress)`` qualifiers will be removed when those items are
completed. If your current version of the document contains those qualifiers then those
lifecycle operations are unsupported.

View File

@ -118,6 +118,7 @@ MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59
MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ = 60
SUPPORT_VNIC_TYPE_REMOTE_MANAGED = 61
MIN_COMPUTE_VDPA_ATTACH_DETACH = 62
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
@ -277,7 +278,7 @@ def reject_vtpm_instances(operation):
return outer
def reject_vdpa_instances(operation):
def reject_vdpa_instances(operation, until=None):
"""Reject requests to decorated function if instance has vDPA interfaces.
Raise OperationNotSupportedForVDPAInterfaces if operations involves one or
@ -291,8 +292,18 @@ def reject_vdpa_instances(operation):
vif['vnic_type'] == network_model.VNIC_TYPE_VDPA
for vif in instance.get_network_info()
):
raise exception.OperationNotSupportedForVDPAInterface(
instance_uuid=instance.uuid, operation=operation)
reject = True
if until is not None:
min_ver = objects.service.get_minimum_version_all_cells(
nova_context.get_admin_context(), ['nova-compute']
)
if min_ver >= until:
reject = False
if reject:
raise exception.OperationNotSupportedForVDPAInterface(
instance_uuid=instance.uuid, operation=operation
)
return f(self, context, instance, *args, **kw)
return inner
return outer
@ -5351,9 +5362,14 @@ class API:
instance_uuid=instance.uuid)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
@reject_vdpa_instances(
instance_actions.ATTACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
)
@check_instance_state(
vm_state=[
vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
], task_state=[None]
)
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
"""Use hotplug to add an network adapter to an instance."""
@ -5366,12 +5382,6 @@ class API:
# port.resource_request field which only returned for admins
port = self.network_api.show_port(
context.elevated(), port_id)['port']
if port.get('binding:vnic_type', "normal") == "vdpa":
# FIXME(sean-k-mooney): Attach works but detach results in a
# QEMU error; blocked until this is resolved
raise exception.OperationNotSupportedForVDPAInterface(
instance_uuid=instance.uuid,
operation=instance_actions.ATTACH_INTERFACE)
if port.get('binding:vnic_type', 'normal') in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
@ -5390,37 +5400,24 @@ class API:
requested_ip=requested_ip, tag=tag)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
@reject_vdpa_instances(
instance_actions.DETACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
)
@check_instance_state(
vm_state=[
vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
], task_state=[None]
)
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
# FIXME(sean-k-mooney): Detach currently results in a failure to remove
# the interface from the live libvirt domain, so while the networking
# is torn down on the host the vDPA device is still attached to the VM.
# This is likely a libvirt/qemu bug so block detach until that is
# resolved.
for vif in instance.get_network_info():
if vif['id'] == port_id:
if vif['vnic_type'] == 'vdpa':
raise exception.OperationNotSupportedForVDPAInterface(
instance_uuid=instance.uuid,
operation=instance_actions.DETACH_INTERFACE)
if vif['vnic_type'] in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
raise exception.ForbiddenPortsWithAccelerator()
break
else:
# NOTE(sean-k-mooney) This should never happen but just in case the
# info cache does not have the port we are detaching we can fall
# back to neutron.
port = self.network_api.show_port(context, port_id)['port']
if port.get('binding:vnic_type', 'normal') == 'vdpa':
raise exception.OperationNotSupportedForVDPAInterface(
instance_uuid=instance.uuid,
operation=instance_actions.DETACH_INTERFACE)
self._record_action_start(
context, instance, instance_actions.DETACH_INTERFACE)

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 61
SERVICE_VERSION = 62
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -216,6 +216,9 @@ SERVICE_VERSION_HISTORY = (
# Version 61: Compute RPC v6.0:
# Add support for remotely-managed ports (vnic-type 'remote-managed')
{'compute_rpc': '6.0'},
# Version 62: Compute RPC v6.0:
# Add support for VDPA port attach/detach
{'compute_rpc': '6.0'},
)
# This is used to raise an error at service startup if older than N-1 computes

View File

@ -1434,20 +1434,28 @@ class Domain(object):
'Test attempts to add more than 8 PCI devices. This is '
'not supported by the fake libvirt implementation.')
nic['func'] = func
# this branch covers most interface types with a source
# such as linux bridge interfaces.
if 'source' in nic:
if nic['type'] in ('ethernet',):
# this branch covers kernel ovs interfaces
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
</interface>''' % nic
elif nic['type'] in ('ethernet',):
# this branch covers kernel ovs interfaces
elif nic['type'] in ('vdpa',):
# this branch covers hardware offloaded ovs with vdpa
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source dev='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
</interface>''' % nic
# this branch covers most interface types with a source
# such as linux bridge interfaces.
elif 'source' in nic:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>

View File

@ -1246,22 +1246,53 @@ class VDPAServersTest(_PCIServersTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
def test_attach_interface_service_version_61(self):
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
):
self._test_common(self._attach_interface, uuids.vdpa_port)
def test_attach_interface(self):
self.start_vdpa_compute()
hostname = self.start_vdpa_compute()
# create the port and a server, but don't attach the port to the server
# yet
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
vdpa_port = self.create_vdpa_port()
# attempt to attach the port to the server
ex = self.assertRaises(
client.OpenStackApiException,
self._attach_interface, server, vdpa_port['id'])
self.assertIn(
'not supported for instance with vDPA ports',
ex.response.text)
self._attach_interface(server, vdpa_port['id'])
# ensure the binding details sent to "neutron" were correct
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertIn('binding:profile', port)
self.assertEqual(
{
'pci_vendor_info': '15b3:101e',
'pci_slot': '0000:06:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
)
self.assertEqual(hostname, port['binding:host_id'])
self.assertEqual(server['id'], port['device_id'])
def test_detach_interface_service_version_61(self):
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
):
self._test_common(self._detach_interface, uuids.vdpa_port)
def test_detach_interface(self):
self._test_common(self._detach_interface, uuids.vdpa_port)
self.start_vdpa_compute()
vdpa_port, server = self._create_port_and_server()
# ensure the binding details sent to "neutron" were correct
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertEqual(server['id'], port['device_id'])
self._detach_interface(server, vdpa_port['id'])
# ensure the port is no longer owned by the vm
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertEqual('', port['device_id'])
self.assertEqual({}, port['binding:profile'])
def test_shelve_offload(self):
hostname = self.start_vdpa_compute()

View File

@ -404,9 +404,21 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
cfg = vconfig.LibvirtConfigGuestInterface()
# NOTE(sean-k-mooney): a default constructed object is not valid
# to pass to get_interface_by_cfg as so we just modify the xml to
# make it not match
cfg.parse_str("""
<interface type="wont_match">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>""")
self.assertIsNone(
self.guest.get_interface_by_cfg(
vconfig.LibvirtConfigGuestInterface(),
cfg,
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[

View File

@ -1920,6 +1920,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
elif self.net_type == 'vdpa':
self.source_dev = c.get('dev')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')

View File

@ -254,8 +254,17 @@ class Guest(object):
"""
if cfg:
LOG.debug(f'looking for interface given config: {cfg}')
interfaces = self.get_all_devices(
type(cfg), from_persistent_config)
if not interfaces:
LOG.debug(f'No interface of type: {type(cfg)} found in domain')
return None
# FIXME(sean-k-mooney): we should be able to print the list of
# interfaces however some tests use incomplete objects that cant
# be printed due to incomplete mocks or defects in the libvirt
# fixture. Lets address this later.
# LOG.debug(f'within interfaces: {list(interfaces)}')
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
@ -264,6 +273,16 @@ class Guest(object):
# equality check based on available information on nova side
if cfg == interface:
return interface
else:
# NOTE(sean-k-mooney): {list(interfaces)} could be used
# instead of self._domain.XMLDesc(0) once all tests have
# printable interfaces see the comment above ^.
# While the XML is more verbose it should always work
# for our current test suite and in production code.
LOG.debug(
f'interface for config: {cfg}'
f'not found in domain: {self._domain.XMLDesc(0)}'
)
return None
def get_vcpus_info(self):

View File

@ -34,7 +34,13 @@ extras =
hyperv
vmware
passenv =
OS_DEBUG GENERATE_HASHES
OS_DEBUG
GENERATE_HASHES
# NOTE(sean-k-mooney) optimization is enabled by default and when enabled
# asserts are complied out. Disable optimization to allow asserts in
# nova to fire in unit and functional tests. This can be useful for
# debugging issue with fixtures and mocks.
PYTHONOPTIMIZE
# there is also secret magic in subunit-trace which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
commands =