pci: Add vDPA vnic to PCI request mapping and filtering
This change extend the vnic type to PCI request dev type mapping to support the vDPA vnic type. This change extends the PCI stats module to filter out VDPA 'dev_type' pools if its not explicitly requested. This change explicitly filters out the vDPA dev_type from the pci alias schema since that is not supported. Blueprint: libvirt-vdpa-support Change-Id: I91dd7993395f693c7d26c1caa44fa365f5cbec12
This commit is contained in:
parent
f55f5daed8
commit
ab04eb2196
|
@ -354,7 +354,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
|
|||
self._bulk_update_status(vfs_list,
|
||||
fields.PciDeviceStatus.UNCLAIMABLE)
|
||||
|
||||
elif self.dev_type == fields.PciDeviceType.SRIOV_VF:
|
||||
elif self.dev_type in (
|
||||
fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA
|
||||
):
|
||||
# Update VF status to CLAIMED if it's parent has not been
|
||||
# previously allocated or claimed
|
||||
# When claiming/allocating a VF, it's parent PF becomes
|
||||
|
@ -414,7 +416,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
|
|||
self._bulk_update_status(vfs_list,
|
||||
fields.PciDeviceStatus.UNAVAILABLE)
|
||||
|
||||
elif (self.dev_type == fields.PciDeviceType.SRIOV_VF):
|
||||
elif self.dev_type in (
|
||||
fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA
|
||||
):
|
||||
parent = self.parent_device
|
||||
if parent:
|
||||
if parent.status not in parent_ok_statuses:
|
||||
|
@ -473,7 +477,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
|
|||
self._bulk_update_status(vfs_list,
|
||||
fields.PciDeviceStatus.AVAILABLE)
|
||||
free_devs.extend(vfs_list)
|
||||
if self.dev_type == fields.PciDeviceType.SRIOV_VF:
|
||||
if self.dev_type in (
|
||||
fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA
|
||||
):
|
||||
# Set PF status to AVAILABLE if all of it's VFs are free
|
||||
parent = self.parent_device
|
||||
if not parent:
|
||||
|
|
|
@ -189,7 +189,9 @@ class PciDevTracker(object):
|
|||
if dev.dev_type == fields.PciDeviceType.SRIOV_PF:
|
||||
dev.child_devices = []
|
||||
parents[dev.address] = dev
|
||||
elif dev.dev_type == fields.PciDeviceType.SRIOV_VF:
|
||||
elif dev.dev_type in (
|
||||
fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA
|
||||
):
|
||||
dev.parent_device = parents.get(dev.parent_addr)
|
||||
if dev.parent_device:
|
||||
parents[dev.parent_addr].child_devices.append(dev)
|
||||
|
|
|
@ -56,7 +56,8 @@ PCI_TRUSTED_TAG = 'trusted'
|
|||
PCI_DEVICE_TYPE_TAG = 'dev_type'
|
||||
|
||||
DEVICE_TYPE_FOR_VNIC_TYPE = {
|
||||
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
|
||||
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF,
|
||||
network_model.VNIC_TYPE_VDPA: obj_fields.PciDeviceType.VDPA,
|
||||
}
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
|
|
@ -217,15 +217,18 @@ class PciDeviceStats(object):
|
|||
|
||||
In case the device is a PF, all of it's dependent VFs should
|
||||
be removed from pools count, if these are present.
|
||||
When the device is a VF, it's parent PF pool count should be
|
||||
decreased, unless it is no longer in a pool.
|
||||
When the device is a VF, or a VDPA device, it's parent PF
|
||||
pool count should be decreased, unless it is no longer in a pool.
|
||||
"""
|
||||
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
|
||||
vfs_list = pci_dev.child_devices
|
||||
if vfs_list:
|
||||
for vf in vfs_list:
|
||||
self.remove_device(vf)
|
||||
elif pci_dev.dev_type == fields.PciDeviceType.SRIOV_VF:
|
||||
elif pci_dev.dev_type in (
|
||||
fields.PciDeviceType.SRIOV_VF,
|
||||
fields.PciDeviceType.VDPA,
|
||||
):
|
||||
try:
|
||||
parent = pci_dev.parent_device
|
||||
# Make sure not to decrease PF pool count if this parent has
|
||||
|
@ -387,6 +390,28 @@ class PciDeviceStats(object):
|
|||
]
|
||||
return pools
|
||||
|
||||
def _filter_pools_for_unrequested_vdpa_devices(self, pools, request):
|
||||
"""Filter out pools with VDPA devices, unless these are required.
|
||||
|
||||
This is necessary as vdpa devices require special handling and
|
||||
should not be allocated to generic pci device requests.
|
||||
|
||||
:param pools: A list of PCI device pool dicts
|
||||
:param request: An InstancePCIRequest object describing the type,
|
||||
quantity and required NUMA affinity of device(s) we want.
|
||||
:returns: A list of pools that can be used to support the request if
|
||||
this is possible.
|
||||
"""
|
||||
if all(
|
||||
spec.get('dev_type') != fields.PciDeviceType.VDPA
|
||||
for spec in request.spec
|
||||
):
|
||||
pools = [
|
||||
pool for pool in pools
|
||||
if not pool.get('dev_type') == fields.PciDeviceType.VDPA
|
||||
]
|
||||
return pools
|
||||
|
||||
def _filter_pools(self, pools, request, numa_cells):
|
||||
"""Determine if an individual PCI request can be met.
|
||||
|
||||
|
@ -421,7 +446,7 @@ class PciDeviceStats(object):
|
|||
)
|
||||
|
||||
if after_count < request.count:
|
||||
LOG.debug('Not enough PCI devices left to satify request')
|
||||
LOG.debug('Not enough PCI devices left to satisfy request')
|
||||
return None
|
||||
|
||||
# Next, let's exclude all devices that aren't on the correct NUMA node
|
||||
|
@ -438,10 +463,10 @@ class PciDeviceStats(object):
|
|||
)
|
||||
|
||||
if after_count < request.count:
|
||||
LOG.debug('Not enough PCI devices left to satify request')
|
||||
LOG.debug('Not enough PCI devices left to satisfy request')
|
||||
return None
|
||||
|
||||
# Finally, if we're not requesting PFs then we should not use these.
|
||||
# If we're not requesting PFs then we should not use these.
|
||||
# Exclude them.
|
||||
before_count = after_count
|
||||
pools = self._filter_pools_for_unrequested_pfs(pools, request)
|
||||
|
@ -455,7 +480,24 @@ class PciDeviceStats(object):
|
|||
)
|
||||
|
||||
if after_count < request.count:
|
||||
LOG.debug('Not enough PCI devices left to satify request')
|
||||
LOG.debug('Not enough PCI devices left to satisfy request')
|
||||
return None
|
||||
|
||||
# If we're not requesting VDPA devices then we should not use these
|
||||
# either. Exclude them.
|
||||
before_count = after_count
|
||||
pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request)
|
||||
after_count = sum([pool['count'] for pool in pools])
|
||||
|
||||
if after_count < before_count:
|
||||
LOG.debug(
|
||||
'Dropped %d devices as they are VDPA devices which we have '
|
||||
'not requested',
|
||||
before_count - after_count
|
||||
)
|
||||
|
||||
if after_count < request.count:
|
||||
LOG.debug('Not enough PCI devices left to satisfy request')
|
||||
return None
|
||||
|
||||
return pools
|
||||
|
|
|
@ -5772,7 +5772,8 @@ class TestAPI(TestAPIBase):
|
|||
objects.NetworkRequest(port_id=uuids.portid_3),
|
||||
objects.NetworkRequest(port_id=uuids.portid_4),
|
||||
objects.NetworkRequest(port_id=uuids.portid_5),
|
||||
objects.NetworkRequest(port_id=uuids.trusted_port)])
|
||||
objects.NetworkRequest(port_id=uuids.trusted_port),
|
||||
objects.NetworkRequest(port_id=uuids.portid_vdpa)])
|
||||
pci_requests = objects.InstancePCIRequests(requests=[])
|
||||
# _get_port_vnic_info should be called for every NetworkRequest with a
|
||||
# port_id attribute (so six times)
|
||||
|
@ -5785,13 +5786,14 @@ class TestAPI(TestAPIBase):
|
|||
(model.VNIC_TYPE_DIRECT_PHYSICAL, None, 'netN', None, None),
|
||||
(model.VNIC_TYPE_DIRECT, True, 'netN',
|
||||
mock.sentinel.resource_request2, None),
|
||||
(model.VNIC_TYPE_VDPA, None, 'netN', None, None),
|
||||
]
|
||||
# _get_physnet_tunneled_info should be called for every NetworkRequest
|
||||
# (so seven times)
|
||||
mock_get_physnet_tunneled_info.side_effect = [
|
||||
('physnet1', False), ('physnet1', False), ('', True),
|
||||
('physnet1', False), ('physnet2', False), ('physnet3', False),
|
||||
('physnet4', False),
|
||||
('physnet4', False), ('physnet1', False)
|
||||
]
|
||||
api = neutronapi.API()
|
||||
|
||||
|
@ -5808,12 +5810,13 @@ class TestAPI(TestAPIBase):
|
|||
mock.sentinel.request_group1,
|
||||
mock.sentinel.request_group2],
|
||||
port_resource_requests)
|
||||
self.assertEqual(5, len(pci_requests.requests))
|
||||
self.assertEqual(6, len(pci_requests.requests))
|
||||
has_pci_request_id = [net.pci_request_id is not None for net in
|
||||
requested_networks.objects]
|
||||
self.assertEqual(pci_requests.requests[3].spec[0]["dev_type"],
|
||||
"type-PF")
|
||||
expected_results = [True, False, False, True, True, True, True]
|
||||
self.assertEqual(pci_requests.requests[5].spec[0]["dev_type"], "vdpa")
|
||||
expected_results = [True, False, False, True, True, True, True, True]
|
||||
self.assertEqual(expected_results, has_pci_request_id)
|
||||
# Make sure only the trusted VF has the 'trusted' tag set in the spec.
|
||||
for pci_req in pci_requests.requests:
|
||||
|
@ -5827,7 +5830,7 @@ class TestAPI(TestAPIBase):
|
|||
|
||||
# Only the port with a resource_request will have pci_req.requester_id.
|
||||
self.assertEqual(
|
||||
[None, None, None, None, uuids.trusted_port],
|
||||
[None, None, None, None, uuids.trusted_port, None],
|
||||
[pci_req.requester_id for pci_req in pci_requests.requests])
|
||||
|
||||
self.assertCountEqual(
|
||||
|
|
|
@ -523,18 +523,20 @@ class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
|
|||
|
||||
class _TestSRIOVPciDeviceObject(object):
|
||||
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528,
|
||||
num_pfs=2, num_vfs=8):
|
||||
num_pfs=2, num_vfs=8, num_vdpa=0):
|
||||
self.sriov_pf_devices = []
|
||||
for dev in range(num_pfs):
|
||||
pci_dev = {'compute_node_id': 1,
|
||||
'address': '0000:81:00.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % pf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_PF,
|
||||
'parent_addr': None,
|
||||
'numa_node': 0}
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:81:00.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % pf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_PF,
|
||||
'parent_addr': None,
|
||||
'numa_node': 0
|
||||
}
|
||||
pci_dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
pci_dev_obj.id = dev + 81
|
||||
pci_dev_obj.child_devices = []
|
||||
|
@ -542,21 +544,42 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
|
||||
self.sriov_vf_devices = []
|
||||
for dev in range(num_vfs):
|
||||
pci_dev = {'compute_node_id': 1,
|
||||
'address': '0000:81:10.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_VF,
|
||||
'parent_addr': '0000:81:00.%d' % int(dev / 4),
|
||||
'numa_node': 0}
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:81:10.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_VF,
|
||||
'parent_addr': '0000:81:00.%d' % int(dev / 4),
|
||||
'numa_node': 0
|
||||
}
|
||||
pci_dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
pci_dev_obj.id = dev + 1
|
||||
pci_dev_obj.parent_device = self.sriov_pf_devices[int(dev / 4)]
|
||||
pci_dev_obj.parent_device.child_devices.append(pci_dev_obj)
|
||||
self.sriov_vf_devices.append(pci_dev_obj)
|
||||
|
||||
self.sriov_vdpa_devices = []
|
||||
for dev in range(num_vdpa):
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:81:11.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.VDPA,
|
||||
'parent_addr': '0000:81:00.%d' % (dev % num_pfs),
|
||||
'numa_node': 0
|
||||
}
|
||||
pci_dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
pci_dev_obj.id = dev + 1
|
||||
pci_dev_obj.parent_device = self.sriov_pf_devices[dev % num_pfs]
|
||||
pci_dev_obj.parent_device.child_devices.append(pci_dev_obj)
|
||||
self.sriov_vdpa_devices.append(pci_dev_obj)
|
||||
|
||||
def _create_fake_instance(self):
|
||||
self.inst = instance.Instance()
|
||||
self.inst.uuid = uuids.instance
|
||||
|
@ -587,26 +610,35 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
self._create_pci_devices()
|
||||
devobj = self.sriov_pf_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.CLAIMED)
|
||||
self.assertEqual(devobj.instance_uuid,
|
||||
self.inst.uuid)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 0)
|
||||
# check if the all the dependants are UNCLAIMABLE
|
||||
self.assertTrue(all(
|
||||
[dev.status == fields.PciDeviceStatus.UNCLAIMABLE for
|
||||
dev in self._get_children_by_parent_address(
|
||||
self.sriov_pf_devices[0].address)]))
|
||||
[dev.status == fields.PciDeviceStatus.UNCLAIMABLE for
|
||||
dev in self._get_children_by_parent_address(
|
||||
self.sriov_pf_devices[0].address)]))
|
||||
|
||||
def test_claim_VF(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices()
|
||||
devobj = self.sriov_vf_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.CLAIMED)
|
||||
self.assertEqual(devobj.instance_uuid,
|
||||
self.inst.uuid)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 0)
|
||||
|
||||
# check if parent device status has been changed to UNCLAIMABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(fields.PciDeviceStatus.UNCLAIMABLE, parent.status)
|
||||
|
||||
def test_claim_VDPA(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_vdpa_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.CLAIMED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 0)
|
||||
|
||||
# check if parent device status has been changed to UNCLAIMABLE
|
||||
|
@ -619,10 +651,8 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
devobj = self.sriov_pf_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.instance_uuid,
|
||||
self.inst.uuid)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 1)
|
||||
# check if the all the dependants are UNAVAILABLE
|
||||
self.assertTrue(all(
|
||||
|
@ -636,10 +666,22 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
devobj = self.sriov_vf_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.instance_uuid,
|
||||
self.inst.uuid)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 1)
|
||||
|
||||
# check if parent device status has been changed to UNAVAILABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(fields.PciDeviceStatus.UNAVAILABLE, parent.status)
|
||||
|
||||
def test_allocate_VDPA(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_vdpa_devices[0]
|
||||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.instance_uuid, self.inst.uuid)
|
||||
self.assertEqual(len(self.inst.pci_devices), 1)
|
||||
|
||||
# check if parent device status has been changed to UNAVAILABLE
|
||||
|
@ -652,8 +694,17 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
devobj = self.sriov_pf_devices[0]
|
||||
self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(exception.PciDeviceVFInvalidStatus,
|
||||
devobj.claim, self.inst)
|
||||
self.assertRaises(
|
||||
exception.PciDeviceVFInvalidStatus, devobj.claim, self.inst)
|
||||
|
||||
def test_claim_PF_fail_VDPA(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_pf_devices[0]
|
||||
self.sriov_vdpa_devices[0].status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(
|
||||
exception.PciDeviceVFInvalidStatus, devobj.claim, self.inst)
|
||||
|
||||
def test_claim_VF_fail(self):
|
||||
self._create_fake_instance()
|
||||
|
@ -662,8 +713,17 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
parent.status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(exception.PciDevicePFInvalidStatus,
|
||||
devobj.claim, self.inst)
|
||||
self.assertRaises(
|
||||
exception.PciDevicePFInvalidStatus, devobj.claim, self.inst)
|
||||
|
||||
def test_claim_VDPA_fail(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_vdpa_devices[0]
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
parent.status = fields.PciDeviceStatus.CLAIMED
|
||||
self.assertRaises(
|
||||
exception.PciDevicePFInvalidStatus, devobj.claim, self.inst)
|
||||
|
||||
def test_allocate_PF_fail(self):
|
||||
self._create_fake_instance()
|
||||
|
@ -671,8 +731,8 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
devobj = self.sriov_pf_devices[0]
|
||||
self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(exception.PciDeviceVFInvalidStatus,
|
||||
devobj.allocate, self.inst)
|
||||
self.assertRaises(
|
||||
exception.PciDeviceVFInvalidStatus, devobj.allocate, self.inst)
|
||||
|
||||
def test_allocate_VF_fail(self):
|
||||
self._create_fake_instance()
|
||||
|
@ -681,8 +741,27 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
parent.status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(exception.PciDevicePFInvalidStatus,
|
||||
devobj.allocate, self.inst)
|
||||
self.assertRaises(
|
||||
exception.PciDevicePFInvalidStatus, devobj.allocate, self.inst)
|
||||
|
||||
def test_allocate_PF_fail_VDPA(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_pf_devices[0]
|
||||
self.sriov_vdpa_devices[0].status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(
|
||||
exception.PciDeviceVFInvalidStatus, devobj.allocate, self.inst)
|
||||
|
||||
def test_allocate_VDPA_fail(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=1, num_vfs=0, num_vdpa=2)
|
||||
devobj = self.sriov_vdpa_devices[0]
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
parent.status = fields.PciDeviceStatus.CLAIMED
|
||||
|
||||
self.assertRaises(
|
||||
exception.PciDevicePFInvalidStatus, devobj.allocate, self.inst)
|
||||
|
||||
def test_free_allocated_PF(self):
|
||||
self._create_fake_instance()
|
||||
|
@ -691,14 +770,13 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
devobj.free(self.inst)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.AVAILABLE)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.AVAILABLE)
|
||||
self.assertIsNone(devobj.instance_uuid)
|
||||
# check if the all the dependants are AVAILABLE
|
||||
self.assertTrue(all(
|
||||
[dev.status == fields.PciDeviceStatus.AVAILABLE for
|
||||
dev in self._get_children_by_parent_address(
|
||||
self.sriov_pf_devices[0].address)]))
|
||||
[dev.status == fields.PciDeviceStatus.AVAILABLE for
|
||||
dev in self._get_children_by_parent_address(
|
||||
self.sriov_pf_devices[0].address)]))
|
||||
|
||||
def test_free_allocated_VF(self):
|
||||
self._create_fake_instance()
|
||||
|
@ -708,20 +786,41 @@ class _TestSRIOVPciDeviceObject(object):
|
|||
for devobj in dependents:
|
||||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
self.assertEqual(devobj.status,
|
||||
fields.PciDeviceStatus.ALLOCATED)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED)
|
||||
for devobj in dependents[:-1]:
|
||||
devobj.free(self.inst)
|
||||
# check if parent device status is still UNAVAILABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(fields.PciDeviceStatus.UNAVAILABLE,
|
||||
parent.status)
|
||||
self.assertEqual(
|
||||
fields.PciDeviceStatus.UNAVAILABLE, parent.status)
|
||||
devobj = dependents[-1]
|
||||
devobj.free(self.inst)
|
||||
# check if parent device status is now AVAILABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(fields.PciDeviceStatus.AVAILABLE,
|
||||
parent.status)
|
||||
self.assertEqual(
|
||||
fields.PciDeviceStatus.AVAILABLE, parent.status)
|
||||
|
||||
def test_free_allocated_VDPA(self):
|
||||
self._create_fake_instance()
|
||||
self._create_pci_devices(num_pfs=2, num_vfs=0, num_vdpa=8)
|
||||
vdpa = self.sriov_vdpa_devices[0]
|
||||
dependents = self._get_children_by_parent_address(vdpa.parent_addr)
|
||||
for devobj in dependents:
|
||||
devobj.claim(self.inst.uuid)
|
||||
devobj.allocate(self.inst)
|
||||
self.assertEqual(devobj.status, fields.PciDeviceStatus.ALLOCATED)
|
||||
for devobj in dependents[:-1]:
|
||||
devobj.free(self.inst)
|
||||
# check if parent device status is still UNAVAILABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(
|
||||
fields.PciDeviceStatus.UNAVAILABLE, parent.status)
|
||||
for devobj in dependents[-1:]:
|
||||
devobj.free(self.inst)
|
||||
# check if parent device status is now AVAILABLE
|
||||
parent = self._get_parent_by_address(devobj.parent_addr)
|
||||
self.assertEqual(
|
||||
fields.PciDeviceStatus.AVAILABLE, parent.status)
|
||||
|
||||
|
||||
class TestSRIOVPciDeviceListObject(test_objects._LocalTest,
|
||||
|
|
|
@ -629,37 +629,75 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
|
|||
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528):
|
||||
self.sriov_pf_devices = []
|
||||
for dev in range(2):
|
||||
pci_dev = {'compute_node_id': 1,
|
||||
'address': '0000:81:00.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % pf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_PF,
|
||||
'parent_addr': None,
|
||||
'numa_node': 0}
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:81:00.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % pf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_PF,
|
||||
'parent_addr': None,
|
||||
'numa_node': 0
|
||||
}
|
||||
dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
dev_obj.child_devices = []
|
||||
self.sriov_pf_devices.append(dev_obj)
|
||||
|
||||
self.sriov_vf_devices = []
|
||||
for dev in range(8):
|
||||
pci_dev = {'compute_node_id': 1,
|
||||
'address': '0000:81:10.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_VF,
|
||||
'parent_addr': '0000:81:00.%d' % int(dev / 4),
|
||||
'numa_node': 0}
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:81:10.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.SRIOV_VF,
|
||||
'parent_addr': '0000:81:00.%d' % int(dev / 4),
|
||||
'numa_node': 0
|
||||
}
|
||||
dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
dev_obj.parent_device = self.sriov_pf_devices[int(dev / 4)]
|
||||
dev_obj.parent_device.child_devices.append(dev_obj)
|
||||
self.sriov_vf_devices.append(dev_obj)
|
||||
|
||||
self.vdpa_devices = []
|
||||
for dev in range(8):
|
||||
pci_dev = {
|
||||
'compute_node_id': 1,
|
||||
'address': '0000:82:10.%d' % dev,
|
||||
'vendor_id': '8086',
|
||||
'product_id': '%d' % vf_product_id,
|
||||
'status': 'available',
|
||||
'request_id': None,
|
||||
'dev_type': fields.PciDeviceType.VDPA,
|
||||
'parent_addr': '0000:81:00.%d' % int(dev / 4),
|
||||
'numa_node': 0
|
||||
}
|
||||
dev_obj = objects.PciDevice.create(None, pci_dev)
|
||||
dev_obj.parent_device = self.sriov_pf_devices[int(dev / 4)]
|
||||
dev_obj.parent_device.child_devices.append(dev_obj)
|
||||
self.vdpa_devices.append(dev_obj)
|
||||
|
||||
list(map(self.pci_stats.add_device, self.sriov_pf_devices))
|
||||
list(map(self.pci_stats.add_device, self.sriov_vf_devices))
|
||||
list(map(self.pci_stats.add_device, self.vdpa_devices))
|
||||
|
||||
def test_consume_VDPA_requests(self):
|
||||
self._create_pci_devices()
|
||||
pci_requests = [
|
||||
objects.InstancePCIRequest(
|
||||
count=8, spec=[{'dev_type': 'vdpa'}])]
|
||||
devs = self.pci_stats.consume_requests(pci_requests)
|
||||
self.assertEqual(8, len(devs))
|
||||
self.assertEqual('vdpa', devs[0].dev_type)
|
||||
free_devs = self.pci_stats.get_free_devs()
|
||||
# Validate that the parents of these devs has been removed
|
||||
# from pools.
|
||||
for dev in devs:
|
||||
self.assertNotIn(dev.parent_addr,
|
||||
[free_dev.address for free_dev in free_devs])
|
||||
|
||||
def test_consume_VF_requests(self):
|
||||
self._create_pci_devices()
|
||||
|
|
Loading…
Reference in New Issue