tests: Add functional test for vDPA device

Add a simple test to validate behavior with vDPA devices. Most of this
is simply fleshing out the fixtures we use to fake out vDPA devices and
generally tweaking things to make them better.

Change-Id: I1423d8a9652751b667463f90c69eae1a054dd776
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
Co-authored-by: Sean Mooney <work@seanmooney.info>
This commit is contained in:
Stephen Finucane 2021-03-11 15:11:05 +00:00
parent 45798adf5a
commit 22fcfcda23
6 changed files with 483 additions and 102 deletions

View File

@ -441,7 +441,7 @@ class PciDeviceStats(object):
if after_count < before_count:
LOG.debug(
'Dropped %d devices due to mismatched PCI attribute(s)',
'Dropped %d device(s) due to mismatched PCI attribute(s)',
before_count - after_count
)
@ -458,7 +458,7 @@ class PciDeviceStats(object):
if after_count < before_count:
LOG.debug(
'Dropped %d devices as they are on the wrong NUMA node(s)',
'Dropped %d device(s) as they are on the wrong NUMA node(s)',
before_count - after_count
)
@ -474,7 +474,7 @@ class PciDeviceStats(object):
if after_count < before_count:
LOG.debug(
'Dropped %d devices as they are PFs which we have not '
'Dropped %d device(s) as they are PFs which we have not '
'requested',
before_count - after_count
)
@ -491,7 +491,7 @@ class PciDeviceStats(object):
if after_count < before_count:
LOG.debug(
'Dropped %d devices as they are VDPA devices which we have '
'Dropped %d device(s) as they are VDPA devices which we have '
'not requested',
before_count - after_count
)

View File

@ -413,6 +413,24 @@ class InstanceHelperMixin:
fake_notifier.wait_for_versioned_notifications('instance.reboot.end')
return self._wait_for_state_change(server, expected_state)
def _attach_interface(self, server, port_uuid):
"""attach a neutron port to a server."""
body = {
"interfaceAttachment": {
"port_id": port_uuid
}
}
attachment = self.api.attach_interface(server['id'], body)
fake_notifier.wait_for_versioned_notifications(
'instance.interface_attach.end')
return attachment
def _detach_interface(self, server, port_uuid):
"""detach a neutron port form a server."""
self.api.detach_interface(server['id'], port_uuid)
fake_notifier.wait_for_versioned_notifications(
'instance.interface_detach.end')
def _rebuild_server(self, server, image_uuid, expected_state='ACTIVE'):
"""Rebuild a server."""
self.api.post_server_action(

View File

@ -85,7 +85,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
return self.start_service('scheduler')
def _get_connection(
self, host_info=None, pci_info=None, mdev_info=None,
self, host_info=None, pci_info=None, mdev_info=None, vdpa_info=None,
libvirt_version=None, qemu_version=None, hostname=None,
):
if not host_info:
@ -107,12 +107,14 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
host_info=host_info,
pci_info=pci_info,
mdev_info=mdev_info,
vdpa_info=vdpa_info,
hostname=hostname)
return fake_connection
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, libvirt_version=None, qemu_version=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
qemu_version=None,
):
"""Start a compute service.
@ -129,8 +131,8 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
def _start_compute(hostname, host_info):
fake_connection = self._get_connection(
host_info, pci_info, mdev_info, libvirt_version, qemu_version,
hostname,
host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
qemu_version, hostname,
)
# This is fun. Firstly we need to do a global'ish mock so we can
# actually start the service.
@ -299,8 +301,8 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'subnet_id': subnet_4['id']
}
],
'binding:vif_type': 'hw_veb',
'binding:vif_details': {'vlan': 42},
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}

View File

@ -23,6 +23,7 @@ import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
@ -275,9 +276,7 @@ class SRIOVServersTest(_PCIServersTestBase):
self.assertNotIn('binding:profile', port)
# create a server using the VF via neutron
flavor_id = self._create_flavor()
self._create_server(
flavor_id=flavor_id,
networks=[
{'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
],
@ -548,9 +547,7 @@ class SRIOVServersTest(_PCIServersTestBase):
self.neutron.create_port({'port': self.neutron.network_4_port_1})
# create a server using the VF via neutron
flavor_id = self._create_flavor()
self._create_server(
flavor_id=flavor_id,
networks=[
{'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
],
@ -672,6 +669,222 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
self.neutron.sriov_pf_port2['id'])
class VDPAServersTest(_PCIServersTestBase):
# this is needed for os_compute_api:os-migrate-server:migrate policy
ADMIN_API = True
microversion = 'latest'
# Whitelist both the PF and VF; in reality, you probably wouldn't do this
# but we want to make sure that the PF is correctly taken off the table
# once any VF is used
PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
{
'vendor_id': '15b3',
'product_id': '101d',
'physical_network': 'physnet4',
},
{
'vendor_id': '15b3',
'product_id': '101e',
'physical_network': 'physnet4',
},
)]
# No need for aliases as these test will request SRIOV via neutron
PCI_ALIAS = []
NUM_PFS = 1
NUM_VFS = 4
FAKE_LIBVIRT_VERSION = 6_009_000 # 6.9.0
FAKE_QEMU_VERSION = 5_001_000 # 5.1.0
def setUp(self):
super().setUp()
# The ultimate base class _IntegratedTestBase uses NeutronFixture but
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
def start_compute(self):
vf_ratio = self.NUM_VFS // self.NUM_PFS
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0)
vdpa_info = fakelibvirt.HostVDPADevicesInfo()
pci_info.add_device(
dev_type='PF',
bus=0x6,
slot=0x0,
function=0,
iommu_group=40, # totally arbitrary number
numa_node=0,
vf_ratio=vf_ratio,
vend_id='15b3',
vend_name='Mellanox Technologies',
prod_id='101d',
prod_name='MT2892 Family [ConnectX-6 Dx]',
driver_name='mlx5_core')
for idx in range(self.NUM_VFS):
vf = pci_info.add_device(
dev_type='VF',
bus=0x6,
slot=0x0,
function=idx + 1,
iommu_group=idx + 41, # totally arbitrary number + offset
numa_node=0,
vf_ratio=vf_ratio,
parent=(0x6, 0x0, 0),
vend_id='15b3',
vend_name='Mellanox Technologies',
prod_id='101e',
prod_name='ConnectX Family mlx5Gen Virtual Function',
driver_name='mlx5_core')
vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf)
return super().start_compute(
pci_info=pci_info, vdpa_info=vdpa_info,
libvirt_version=self.FAKE_LIBVIRT_VERSION,
qemu_version=self.FAKE_QEMU_VERSION)
def create_vdpa_port(self):
vdpa_port = {
'id': uuids.vdpa_port,
'network_id': self.neutron.network_4['id'],
'status': 'ACTIVE',
'mac_address': 'b5:bc:2e:e7:51:ee',
'fixed_ips': [
{
'ip_address': '192.168.4.6',
'subnet_id': self.neutron.subnet_4['id']
}
],
'binding:vif_details': {},
'binding:vif_type': 'ovs',
'binding:vnic_type': 'vdpa',
}
# create the port
self.neutron.create_port({'port': vdpa_port})
return vdpa_port
def test_create_server(self):
"""Create an instance using a neutron-provisioned vDPA VIF."""
orig_create = nova.virt.libvirt.guest.Guest.create
def fake_create(cls, xml, host):
tree = etree.fromstring(xml)
elem = tree.find('./devices/interface/[@type="vdpa"]')
# compare source device
# the MAC address is derived from the neutron port, while the
# source dev path assumes we attach vDPA devs in order
expected = """
<interface type="vdpa">
<mac address="b5:bc:2e:e7:51:ee"/>
<model type="virtio"/>
<source dev="/dev/vhost-vdpa-3"/>
</interface>"""
actual = etree.tostring(elem, encoding='unicode')
self.assertXmlEqual(expected, actual)
return orig_create(xml, host)
self.stub_out(
'nova.virt.libvirt.guest.Guest.create',
fake_create,
)
hostname = self.start_compute()
num_pci = self.NUM_PFS + self.NUM_VFS
# both the PF and VF with vDPA capabilities (dev_type=vdpa) should have
# been counted
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
# create the port
vdpa_port = self.create_vdpa_port()
# ensure the binding details are currently unset
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertNotIn('binding:profile', port)
# create a server using the vDPA device via neutron
self._create_server(networks=[{'port': vdpa_port['id']}])
# ensure there is one less VF available and that the PF is no longer
# usable
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
# ensure the binding details sent to "neutron" were correct
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertIn('binding:profile', port)
self.assertEqual(
{
'pci_vendor_info': '15b3:101e',
'pci_slot': '0000:06:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
)
def _test_common(self, op, *args, **kwargs):
self.start_compute()
# create the port and a server, with the port attached to the server
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks=[{'port': vdpa_port['id']}])
# attempt the unsupported action and ensure it fails
ex = self.assertRaises(
client.OpenStackApiException,
op, server, *args, **kwargs)
self.assertIn(
'not supported for instance with vDPA ports',
ex.response.text)
def test_attach_interface(self):
self.start_compute()
# create the port and a server, but don't attach the port to the server
# yet
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
# attempt to attach the port to the server
ex = self.assertRaises(
client.OpenStackApiException,
self._attach_interface, server, vdpa_port['id'])
self.assertIn(
'not supported for instance with vDPA ports',
ex.response.text)
def test_detach_interface(self):
self._test_common(self._detach_interface, uuids.vdpa_port)
def test_shelve(self):
self._test_common(self._shelve_server)
def test_suspend(self):
self._test_common(self._suspend_server)
def test_evacute(self):
self._test_common(self._evacuate_server)
def test_resize(self):
flavor_id = self._create_flavor()
self._test_common(self._resize_server, flavor_id)
def test_cold_migrate(self):
self._test_common(self._migrate_server)
class PCIServersTest(_PCIServersTestBase):
ADMIN_API = True

View File

@ -185,7 +185,7 @@ VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
# virConnectListAllNodeDevices flags
VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV = 2
VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET = 16
VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET = 1 << 4
VIR_CONNECT_LIST_NODE_DEVICES_CAP_VDPA = 1 << 17
# secret type
@ -241,6 +241,11 @@ os_uname = collections.namedtuple(
)
def _get_libvirt_nodedev_name(bus, slot, function):
"""Convert an address to a libvirt device name string."""
return f'pci_0000_{bus:02x}_{slot:02x}_{function:d}'
class FakePCIDevice(object):
"""Generate a fake PCI device.
@ -255,22 +260,22 @@ class FakePCIDevice(object):
pci_default_parent = "pci_0000_80_01_0"
pci_device_template = textwrap.dedent("""
<device>
<name>pci_0000_81_%(slot)02x_%(function)d</name>
<path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:%(slot)02x.%(function)d</path>
<name>pci_0000_%(bus)02x_%(slot)02x_%(function)d</name>
<path>/sys/devices/pci0000:80/0000:80:01.0/0000:%(bus)02x:%(slot)02x.%(function)d</path>
<parent>%(parent)s</parent>
<driver>
<name>%(driver)s</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>129</bus>
<bus>%(bus)d</bus>
<slot>%(slot)d</slot>
<function>%(function)d</function>
<product id='0x%(prod_id)s'>%(prod_name)s</product>
<vendor id='0x%(vend_id)s'>%(vend_name)s</vendor>
%(capability)s
<iommuGroup number='%(iommu_group)d'>
<address domain='0x0000' bus='0x81' slot='%(slot)#02x' function='0x%(function)d'/>
<address domain='0x0000' bus='%(bus)#02x' slot='%(slot)#02x' function='0x%(function)d'/>
</iommuGroup>
<numa node='%(numa_node)s'/>
<pci-express>
@ -280,7 +285,7 @@ class FakePCIDevice(object):
</capability>
</device>""".strip()) # noqa
cap_templ = "<capability type='%(cap_type)s'>%(addresses)s</capability>"
addr_templ = "<address domain='0x0000' bus='0x81' slot='%(slot)#02x' function='%(function)#02x'/>" # noqa
addr_templ = "<address domain='0x0000' bus='%(bus)#02x' slot='%(slot)#02x' function='%(function)#02x'/>" # noqa
mdevtypes_templ = textwrap.dedent("""
<type id='%(type_id)s'>
<name>GRID M60-0B</name><deviceAPI>vfio-pci</deviceAPI>
@ -289,22 +294,35 @@ class FakePCIDevice(object):
is_capable_of_mdevs = False
def __init__(self, dev_type, slot, function, iommu_group, numa_node,
vf_ratio=None, multiple_gpu_types=False, parent=None):
def __init__(
self, dev_type, bus, slot, function, iommu_group, numa_node, *,
vf_ratio=None, multiple_gpu_types=False, parent=None,
vend_id=None, vend_name=None, prod_id=None, prod_name=None,
driver_name=None,
):
"""Populate pci devices
:param dev_type: (string) Indicates the type of the device (PCI, PF,
VF).
:param dev_type: (str) Indicates the type of the device (PCI, PF, VF,
MDEV_TYPES).
:param bus: (int) Bus number of the device.
:param slot: (int) Slot number of the device.
:param function: (int) Function number of the device.
:param iommu_group: (int) IOMMU group ID.
:param numa_node: (int) NUMA node of the device.
:param vf_ratio: (int) Ratio of Virtual Functions on Physical. Only
applicable if ``dev_type`` is one of: ``PF``, ``VF``.
:param multiple_gpu_types: (bool) Supports different vGPU types
:param multiple_gpu_types: (bool) Supports different vGPU types.
:param parent: (int, int, int) A tuple of bus, slot and function
corresponding to the parent.
:param vend_id: (str) The vendor ID.
:param vend_name: (str) The vendor name.
:param prod_id: (str) The product ID.
:param prod_name: (str) The product name.
:param driver_name: (str) The driver name.
"""
self.dev_type = dev_type
self.bus = bus
self.slot = slot
self.function = function
self.iommu_group = iommu_group
@ -312,28 +330,49 @@ class FakePCIDevice(object):
self.vf_ratio = vf_ratio
self.multiple_gpu_types = multiple_gpu_types
self.parent = parent
self.vend_id = vend_id
self.vend_name = vend_name
self.prod_id = prod_id
self.prod_name = prod_name
self.driver_name = driver_name
self.generate_xml()
def generate_xml(self, skip_capability=False):
vend_id = PCI_VEND_ID
vend_name = PCI_VEND_NAME
# initial validation
assert self.dev_type in ('PCI', 'VF', 'PF', 'MDEV_TYPES'), (
f'got invalid dev_type {self.dev_type}')
if self.dev_type == 'PCI':
assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices'
if self.dev_type in ('PF', 'VF'):
assert self.vf_ratio, 'require vf_ratio for PFs and VFs'
if self.dev_type == 'VF':
assert self.parent, 'require parent for VFs'
assert isinstance(self.parent, tuple), 'parent must be an address'
assert len(self.parent) == 3, 'parent must be an address'
vend_id = self.vend_id or PCI_VEND_ID
vend_name = self.vend_name or PCI_VEND_NAME
capability = ''
if self.dev_type == 'PCI':
if self.vf_ratio:
raise ValueError('vf_ratio does not apply for PCI devices')
prod_id = PCI_PROD_ID
prod_name = PCI_PROD_NAME
driver = PCI_DRIVER_NAME
prod_id = self.prod_id or PCI_PROD_ID
prod_name = self.prod_name or PCI_PROD_NAME
driver = self.driver_name or PCI_DRIVER_NAME
elif self.dev_type == 'PF':
prod_id = PF_PROD_ID
prod_name = PF_PROD_NAME
driver = PF_DRIVER_NAME
prod_id = self.prod_id or PF_PROD_ID
prod_name = self.prod_name or PF_PROD_NAME
driver = self.driver_name or PF_DRIVER_NAME
if not skip_capability:
capability = self.cap_templ % {
'cap_type': PF_CAP_TYPE,
'addresses': '\n'.join([
self.addr_templ % {
'bus': self.bus,
# these are the slot, function values of the child
# VFs, we can only assign 8 functions to a slot
# (0-7) so bump the slot each time we exceed this
@ -344,13 +383,14 @@ class FakePCIDevice(object):
} for x in range(1, self.vf_ratio + 1)])
}
elif self.dev_type == 'VF':
prod_id = VF_PROD_ID
prod_name = VF_PROD_NAME
driver = VF_DRIVER_NAME
prod_id = self.prod_id or VF_PROD_ID
prod_name = self.prod_name or VF_PROD_NAME
driver = self.driver_name or VF_DRIVER_NAME
if not skip_capability:
capability = self.cap_templ % {
'cap_type': VF_CAP_TYPE,
'addresses': self.addr_templ % {
'bus': self.bus,
# this is the slot, function value of the parent PF
# if we're e.g. device 8, we'll have a different slot
# to our parent so reverse this
@ -360,11 +400,11 @@ class FakePCIDevice(object):
}
}
elif self.dev_type == 'MDEV_TYPES':
prod_id = MDEV_CAPABLE_PROD_ID
prod_name = MDEV_CAPABLE_PROD_NAME
driver = MDEV_CAPABLE_DRIVER_NAME
vend_id = MDEV_CAPABLE_VEND_ID
vend_name = MDEV_CAPABLE_VEND_NAME
prod_id = self.prod_id or MDEV_CAPABLE_PROD_ID
prod_name = self.prod_name or MDEV_CAPABLE_PROD_NAME
driver = self.driver_name or MDEV_CAPABLE_DRIVER_NAME
vend_id = self.vend_id or MDEV_CAPABLE_VEND_ID
vend_name = self.vend_name or MDEV_CAPABLE_VEND_NAME
types = [self.mdevtypes_templ % {
'type_id': NVIDIA_11_VGPU_TYPE,
'instances': 16,
@ -380,10 +420,13 @@ class FakePCIDevice(object):
'addresses': '\n'.join(types)
}
self.is_capable_of_mdevs = True
else:
raise ValueError('Expected one of: PCI, VF, PCI')
parent = self.pci_default_parent
if self.parent:
parent = _get_libvirt_nodedev_name(*self.parent)
self.pci_device = self.pci_device_template % {
'bus': self.bus,
'slot': self.slot,
'function': self.function,
'vend_id': vend_id,
@ -394,7 +437,7 @@ class FakePCIDevice(object):
'capability': capability,
'iommu_group': self.iommu_group,
'numa_node': self.numa_node,
'parent': self.parent or self.pci_default_parent
'parent': parent,
}
# -1 is the sentinel set in /sys/bus/pci/devices/*/numa_node
# for no NUMA affinity. When the numa_node is set to -1 on a device
@ -406,11 +449,12 @@ class FakePCIDevice(object):
return self.pci_device
# TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of
# a unified devices object
class HostPCIDevicesInfo(object):
"""Represent a pool of host PCI devices."""
TOTAL_NUMA_NODES = 2
pci_devname_template = 'pci_0000_81_%(slot)02x_%(function)d'
def __init__(self, num_pci=0, num_pfs=2, num_vfs=8, num_mdevcap=0,
numa_node=None, multiple_gpu_types=False):
@ -422,7 +466,6 @@ class HostPCIDevicesInfo(object):
:param num_vfs: (int) The number of PCI SR-IOV Virtual Functions.
:param num_mdevcap: (int) The number of PCI devices capable of creating
mediated devices.
:param iommu_group: (int) Initial IOMMU group ID.
:param numa_node: (int) NUMA node of the device; if set all of the
devices will be assigned to the specified node else they will be
split between ``$TOTAL_NUMA_NODES`` nodes.
@ -439,19 +482,16 @@ class HostPCIDevicesInfo(object):
if num_pfs and num_vfs % num_pfs:
raise ValueError('num_vfs must be a factor of num_pfs')
slot = 0
bus = 0x81
slot = 0x0
function = 0
iommu_group = 40 # totally arbitrary number
# Generate PCI devs
for dev in range(num_pci):
pci_dev_name = self.pci_devname_template % {
'slot': slot, 'function': function}
LOG.info('Generating PCI device %r', pci_dev_name)
self.devices[pci_dev_name] = FakePCIDevice(
self.add_device(
dev_type='PCI',
bus=bus,
slot=slot,
function=function,
iommu_group=iommu_group,
@ -462,13 +502,9 @@ class HostPCIDevicesInfo(object):
# Generate MDEV capable devs
for dev in range(num_mdevcap):
pci_dev_name = self.pci_devname_template % {
'slot': slot, 'function': function}
LOG.info('Generating MDEV capable device %r', pci_dev_name)
self.devices[pci_dev_name] = FakePCIDevice(
self.add_device(
dev_type='MDEV_TYPES',
bus=bus,
slot=slot,
function=function,
iommu_group=iommu_group,
@ -485,19 +521,16 @@ class HostPCIDevicesInfo(object):
function = 0
numa_node_pf = self._calc_numa_node(dev, numa_node)
pci_dev_name = self.pci_devname_template % {
'slot': slot, 'function': function}
LOG.info('Generating PF device %r', pci_dev_name)
self.devices[pci_dev_name] = FakePCIDevice(
self.add_device(
dev_type='PF',
bus=bus,
slot=slot,
function=function,
iommu_group=iommu_group,
numa_node=numa_node_pf,
vf_ratio=vf_ratio)
pf_dev_name = pci_dev_name
parent = (bus, slot, function)
# Generate VFs
for _ in range(vf_ratio):
function += 1
@ -508,22 +541,46 @@ class HostPCIDevicesInfo(object):
slot += 1
function = 0
pci_dev_name = self.pci_devname_template % {
'slot': slot, 'function': function}
LOG.info('Generating VF device %r', pci_dev_name)
self.devices[pci_dev_name] = FakePCIDevice(
self.add_device(
dev_type='VF',
bus=bus,
slot=slot,
function=function,
iommu_group=iommu_group,
numa_node=numa_node_pf,
vf_ratio=vf_ratio,
parent=pf_dev_name)
parent=parent)
slot += 1
def add_device(
self, dev_type, bus, slot, function, iommu_group, numa_node,
vf_ratio=None, multiple_gpu_types=False, parent=None,
vend_id=None, vend_name=None, prod_id=None, prod_name=None,
driver_name=None,
):
pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function)
LOG.info('Generating %s device %r', dev_type, pci_dev_name)
dev = FakePCIDevice(
dev_type=dev_type,
bus=bus,
slot=slot,
function=function,
iommu_group=iommu_group,
numa_node=numa_node,
vf_ratio=vf_ratio,
multiple_gpu_types=multiple_gpu_types,
parent=parent,
vend_id=vend_id,
vend_name=vend_name,
prod_id=prod_id,
prod_name=prod_name,
driver_name=driver_name)
self.devices[pci_dev_name] = dev
return dev
@classmethod
def _calc_numa_node(cls, dev, numa_node):
return dev % cls.TOTAL_NUMA_NODES if numa_node is None else numa_node
@ -581,6 +638,68 @@ class HostMdevDevicesInfo(object):
return dev
class FakeVDPADevice:
template = textwrap.dedent("""
<device>
<name>%(name)s</name>
<path>%(path)s</path>
<parent>%(parent)s</parent>
<driver>
<name>vhost_vdpa</name>
</driver>
<capability type='vdpa'>
<chardev>/dev/vhost-vdpa-%(idx)d</chardev>
</capability>
</device>""".strip())
def __init__(self, name, idx, parent):
assert isinstance(parent, FakePCIDevice)
assert parent.dev_type == 'VF'
self.name = name
self.idx = idx
self.parent = parent
self.generate_xml()
def generate_xml(self):
pf_pci = self.parent.parent
vf_pci = (self.parent.bus, self.parent.slot, self.parent.function)
pf_addr = '0000:%02x:%02x.%d' % pf_pci
vf_addr = '0000:%02x:%02x.%d' % vf_pci
parent = _get_libvirt_nodedev_name(*vf_pci)
path = f'/sys/devices/pci0000:00/{pf_addr}/{vf_addr}/vdpa{self.idx}'
self.xml = self.template % {
'name': self.name,
'idx': self.idx,
'path': path,
'parent': parent,
}
def XMLDesc(self, flags):
return self.xml
class HostVDPADevicesInfo:
def __init__(self):
self.devices = {}
def get_all_devices(self):
return self.devices.keys()
def get_device_by_name(self, device_name):
dev = self.devices[device_name]
return dev
def add_device(self, name, idx, parent):
LOG.info('Generating vDPA device %r', name)
dev = FakeVDPADevice(name=name, idx=idx, parent=parent)
self.devices[name] = dev
return dev
class HostInfo(object):
def __init__(self, cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
@ -994,6 +1113,8 @@ class Domain(object):
pci_bus, pci_slot,
pci_function)
nic_info['source'] = pci_device
elif nic_info['type'] == 'vdpa':
nic_info['source'] = source.get('dev')
nics_info += [nic_info]
@ -1184,24 +1305,31 @@ class Domain(object):
nics = ''
for nic in self._def['devices']['nics']:
if 'source' in nic and nic['type'] != 'hostdev':
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<target dev='tap274487d1-60'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
# this covers for direct nic type
else:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source>
<address type='pci' domain='0x0000' bus='0x81' slot='0x00'
function='0x01'/>
</source>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>''' % nic # noqa
if 'source' in nic:
if nic['type'] == 'hostdev':
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source>
<address type='pci' domain='0x0000' bus='0x81' slot='0x00' function='0x01'/>
</source>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>''' % nic # noqa: E501
elif nic['type'] == 'vdpa':
# TODO(stephenfin): In real life, this would actually have
# an '<address>' element, but that requires information
# about the host that we're not passing through yet
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source dev='%(source)s'/>
<model type='virtio'/>
</interface>'''
else:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<target dev='tap274487d1-60'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>''' % nic # noqa: E501
hostdevs = ''
for hostdev in self._def['devices']['hostdevs']:
@ -1458,9 +1586,11 @@ class Secret(object):
class Connection(object):
def __init__(self, uri=None, readonly=False, version=FAKE_LIBVIRT_VERSION,
hv_version=FAKE_QEMU_VERSION, host_info=None, pci_info=None,
mdev_info=None, hostname=None):
def __init__(
self, uri=None, readonly=False, version=FAKE_LIBVIRT_VERSION,
hv_version=FAKE_QEMU_VERSION, hostname=None,
host_info=None, pci_info=None, mdev_info=None, vdpa_info=None,
):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
@ -1498,6 +1628,7 @@ class Connection(object):
num_pfs=0,
num_vfs=0)
self.mdev_info = mdev_info or HostMdevDevicesInfo(devices={})
self.vdpa_info = vdpa_info or HostVDPADevicesInfo()
self.hostname = hostname or 'compute1'
def _add_nodedev(self, nodedev):
@ -1791,6 +1922,9 @@ class Connection(object):
if name.startswith('mdev'):
return self.mdev_info.get_device_by_name(name)
if name.startswith('vdpa'):
return self.vdpa_info.get_device_by_name(name)
pci_dev = self.pci_info.get_device_by_name(name)
if pci_dev:
return pci_dev
@ -1810,6 +1944,8 @@ class Connection(object):
return self.mdev_info.get_all_devices()
if cap == 'mdev_types':
return self.pci_info.get_all_mdev_capable_devices()
if cap == 'vdpa':
return self.vdpa_info.get_all_devices()
else:
raise ValueError('Capability "%s" is not supported' % cap)
@ -1843,11 +1979,22 @@ class Connection(object):
return secret
def listAllDevices(self, flags):
# Note this is incomplete as we do not filter
# based on the flags however it is enough for our
# current testing.
return [NodeDevice(self, xml=dev.XMLDesc(0))
for dev in self.pci_info.devices.values()]
devices = []
if flags & VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV:
devices.extend(
NodeDevice(self, xml=dev.XMLDesc(0))
for dev in self.pci_info.devices.values()
)
if flags & VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET:
# TODO(stephenfin): Implement fake netdevs so we can test the
# capability reporting
pass
if flags & VIR_CONNECT_LIST_NODE_DEVICES_CAP_VDPA:
devices.extend(
NodeDevice(self, xml=dev.XMLDesc(0))
for dev in self.vdpa_info.devices.values()
)
return devices
def openAuth(uri, auth, flags=0):

View File

@ -1406,7 +1406,8 @@ class Host(object):
raise
def list_all_devices(
self, flags: int = 0) -> ty.List['libvirt.virNodeDevice']:
self, flags: int = 0,
) -> ty.List['libvirt.virNodeDevice']:
"""Lookup devices.
:param flags: a bitmask of flags to filter the returned devices.