Support SRIOV interface attach and detach

For attach:
* Generates InstancePciRequest for SRIOV interfaces attach requests
* Claims and allocates a PciDevice for such request

For detach:
* Frees PciDevice and deletes the InstancePciRequests

On the libvirt driver side the following small fixes was necessar:
* Fixes PCI address generation to avoid double 0x prefixes in LibvirtConfigGuestHostdevPCI
* Adds support for comparing LibvirtConfigGuestHostdevPCI objects
* Extends the comparison of LibvirtConfigGuestInterface to support
  macvtap interfaces where target_dev is only known by libvirt but not
  nova
* generalize guest.get_interface_by_cfg() to work with both
  LibvirtConfigGuest[Inteface|HostdevPCI] objects

Implements: blueprint sriov-interface-attach-detach

Change-Id: I67504a37b0fe2ae5da3cba2f3122d9d0e18b9481
This commit is contained in:
Balazs Gibizer 2020-07-14 16:50:20 +02:00 committed by Stephen Finucane
parent ee9ceca416
commit 1361ea5ad1
17 changed files with 864 additions and 80 deletions

View File

@ -29,10 +29,10 @@ devices with potentially different capabilities.
**Limitations**
* Attaching SR-IOV ports to existing servers is not currently supported.
This is now rejected by the API but previously it fail later in the
process. See `bug 1708433 <https://bugs.launchpad.net/nova/+bug/1708433>`_
for details.
* Attaching SR-IOV ports to existing servers was not supported until the
22.0.0 Victoria release. Due to various bugs in libvirt and qemu we
recommend to use at least libvirt version 6.0.0 and at least qemu version
4.2.
* Cold migration (resize) of servers with SR-IOV devices attached was not
supported until the 14.0.0 Newton release, see
`bug 1512800 <https://bugs.launchpad.net/nova/+bug/1512880>`_ for details.

View File

@ -174,7 +174,8 @@ class InterfaceAttachmentController(wsgi.Controller):
exception.AttachInterfaceNotSupported,
exception.SecurityGroupCannotBeApplied,
exception.NetworkInterfaceTaggedAttachNotSupported,
exception.NetworksWithQoSPolicyNotSupported) as e:
exception.NetworksWithQoSPolicyNotSupported,
exception.InterfaceAttachPciClaimFailed) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except (exception.InstanceIsLocked,
exception.FixedIpAlreadyInUse,

View File

@ -7472,8 +7472,9 @@ class ComputeManager(manager.Manager):
# cinder v3 api flow
self.volume_api.attachment_delete(context, bdm.attachment_id)
def _deallocate_port_for_instance(self, context, instance, port_id,
raise_on_failure=False):
def _deallocate_port_for_instance(
self, context, instance, port_id, raise_on_failure=False,
pci_device=None):
try:
result = self.network_api.deallocate_port_for_instance(
context, instance, port_id)
@ -7486,6 +7487,17 @@ class ComputeManager(manager.Manager):
{'port_id': port_id, 'error': ex},
instance=instance)
else:
if pci_device:
self.rt.unclaim_pci_devices(context, pci_device, instance)
# remove pci device from instance
instance.pci_devices.objects.remove(pci_device)
# remove pci request from instance
instance.pci_requests.requests = [
pci_req for pci_req in instance.pci_requests.requests
if pci_req.request_id != pci_device.request_id]
if port_allocation:
# Deallocate the resources in placement that were used by the
# detached port.
@ -7506,6 +7518,73 @@ class ComputeManager(manager.Manager):
{'port_id': port_id, 'error': ex},
instance=instance)
def _claim_pci_device_for_interface_attach(
self,
context: nova.context.RequestContext,
instance: 'objects.Instance',
requested_networks: 'objects.NetworkRequestsList'
) -> ty.Optional['objects.PciDevice']:
"""Claim PCI device if the requested interface needs it
If a PCI device is claimed then the requested_networks is updated
with the pci request id and the pci_requests and pci_devices fields
of the instance is also updated accordingly.
:param context: nova.context.RequestContext
:param instance: the objects.Instance to where the interface is being
attached
:param requested_networks: the objects.NetworkRequestList describing
the requested interface. The requested_networks.objects list shall
have a single item.
:raises ValueError: if there is more than one interface requested
:raises InterfaceAttachFailed: if the PCI device claim fails
:returns: An objects.PciDevice describing the claimed PCI device for
the interface or None if no device is requested
"""
if len(requested_networks.objects) != 1:
raise ValueError(
"Only support one interface per interface attach request")
requested_network = requested_networks.objects[0]
pci_numa_affinity_policy = hardware.get_pci_numa_policy_constraint(
instance.flavor, instance.image_meta)
pci_reqs = objects.InstancePCIRequests(
requests=[], instance_uuid=instance.uuid)
self.network_api.create_resource_requests(
context, requested_networks, pci_reqs,
affinity_policy=pci_numa_affinity_policy)
if not pci_reqs.requests:
# The requested port does not require a PCI device so nothing to do
return None
if len(pci_reqs.requests) > 1:
raise ValueError(
"Only support one interface per interface attach request")
pci_req = pci_reqs.requests[0]
devices = self.rt.claim_pci_devices(
context, pci_reqs, instance.numa_topology)
if not devices:
LOG.info('Failed to claim PCI devices during interface attach '
'for PCI request %s', pci_req, instance=instance)
raise exception.InterfaceAttachPciClaimFailed(
instance_uuid=instance.uuid)
device = devices[0]
# Update the requested network with the pci request id
requested_network.pci_request_id = pci_req.request_id
instance.pci_requests.requests.append(pci_req)
instance.pci_devices.objects.append(device)
return device
# TODO(mriedem): There are likely race failures which can result in
# NotFound and QuotaError exceptions getting traced as well.
@messaging.expected_exceptions(
@ -7554,9 +7633,29 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.START)
bind_host_id = self.driver.network_binding_host_id(context, instance)
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip,
bind_host_id=bind_host_id, tag=tag)
requested_networks = objects.NetworkRequestList(
objects=[
objects.NetworkRequest(
network_id=network_id,
port_id=port_id,
address=requested_ip,
tag=tag,
)
]
)
pci_device = self._claim_pci_device_for_interface_attach(
context, instance, requested_networks)
network_info = self.network_api.allocate_for_instance(
context,
instance,
requested_networks,
bind_host_id=bind_host_id,
attach=True,
)
if len(network_info) != 1:
LOG.error('allocate_port_for_instance returned %(ports)s '
'ports', {'ports': len(network_info)})
@ -7575,7 +7674,8 @@ class ComputeManager(manager.Manager):
"port %(port_id)s, reason: %(msg)s",
{'port_id': port_id, 'msg': ex},
instance=instance)
self._deallocate_port_for_instance(context, instance, port_id)
self._deallocate_port_for_instance(
context, instance, port_id, pci_device=pci_device)
compute_utils.notify_about_instance_action(
context, instance, self.host,
@ -7586,6 +7686,20 @@ class ComputeManager(manager.Manager):
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
if pci_device:
# NOTE(gibi): The _claim_pci_device_for_interface_attach() call
# found a pci device but it only marked the device as claimed. The
# periodic update_available_resource would move the device to
# allocated state. But as driver.attach_interface() has been
# succeeded we now know that the interface is also allocated
# (used by) to the instance. So make sure the pci tracker also
# tracks this device as allocated. This way we can avoid a possible
# race condition when a detach arrives for a device that is only
# in claimed state.
self.rt.allocate_pci_devices_for_instance(context, instance)
instance.save()
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_ATTACH,
@ -7620,6 +7734,26 @@ class ComputeManager(manager.Manager):
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
pci_device = None
pci_req = pci_req_module.get_instance_pci_request_from_vif(
context, instance, condemned)
pci_device = None
if pci_req:
pci_devices = [pci_device
for pci_device in instance.pci_devices.objects
if pci_device.request_id == pci_req.request_id]
if not pci_devices:
LOG.warning(
"Detach interface failed, port_id=%(port_id)s, "
"reason: PCI device not found for PCI request %(pci_req)s",
{'port_id': port_id, 'pci_req': pci_req})
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
pci_device = pci_devices[0]
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_DETACH,
@ -7640,7 +7774,10 @@ class ComputeManager(manager.Manager):
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
self._deallocate_port_for_instance(
context, instance, port_id, raise_on_failure=True)
context, instance, port_id, raise_on_failure=True,
pci_device=pci_device)
instance.save()
compute_utils.notify_about_instance_action(
context, instance, self.host,

View File

@ -1867,18 +1867,34 @@ class ResourceTracker(object):
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def claim_pci_devices(self, context, pci_requests):
def claim_pci_devices(
self, context, pci_requests, instance_numa_topology=None):
"""Claim instance PCI resources
:param context: security context
:param pci_requests: a list of nova.objects.InstancePCIRequests
:param instance_numa_topology: an InstanceNumaTopology object used to
ensure PCI devices are aligned with the NUMA topology of the
instance
:returns: a list of nova.objects.PciDevice objects
"""
result = self.pci_tracker.claim_instance(
context, pci_requests, None)
context, pci_requests, instance_numa_topology)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def unclaim_pci_devices(self, context, pci_device, instance):
"""Deallocate PCI devices
:param context: security context
:param pci_device: the objects.PciDevice describing the PCI device to
be freed
:param instance: the objects.Instance the PCI resources are freed from
"""
self.pci_tracker.free_device(pci_device, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def allocate_pci_devices_for_instance(self, context, instance):
"""Allocate instance claimed PCI resources

View File

@ -1353,6 +1353,11 @@ class InterfaceAttachFailedNoNetwork(Invalid):
"for project '%(project_id)s'.")
class InterfaceAttachPciClaimFailed(Invalid):
msg_fmt = _("Failed to claim PCI device for %(instance_uuid)s during "
"interface attach")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")

View File

@ -687,6 +687,7 @@ class API(base.Base):
# under user's zone.
self._reset_port_dns_name(network, port_id, neutron)
# TODO(gibi): remove unused attach flag
def _validate_requested_port_ids(self, context, instance, neutron,
requested_networks, attach=False):
"""Processes and validates requested networks for allocation.
@ -719,8 +720,6 @@ class API(base.Base):
attached to another instance.
:raises nova.exception.PortNotUsableDNS: If a requested port has a
value assigned to its dns_name attribute.
:raises nova.exception.AttachSRIOVPortNotSupported: If a requested port
is an SR-IOV port and ``attach=True``.
"""
ports = {}
ordered_networks = []
@ -758,16 +757,6 @@ class API(base.Base):
# Make sure the port is usable
_ensure_no_port_binding_failure(port)
# Make sure the port can be attached.
if attach:
# SR-IOV port attach is not supported.
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
raise exception.AttachSRIOVPortNotSupported(
port_id=port['id'],
instance_uuid=instance.uuid)
# If requesting a specific port, automatically process
# the network for that port as if it were explicitly
# requested.
@ -1006,6 +995,7 @@ class API(base.Base):
return requests_and_created_ports
# TODO(gibi): remove the unused attach flag
def allocate_for_instance(self, context, instance,
requested_networks,
security_groups=None, bind_host_id=None,
@ -1697,6 +1687,7 @@ class API(base.Base):
update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
# TODO(gibi): remove this unused function
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
bind_host_id=None, tag=None):

View File

@ -1463,6 +1463,131 @@ class NeutronFixture(fixtures.Fixture):
'resource_request': {},
'binding:profile': {},
'binding:vnic_type': 'direct',
'binding:vif_type': 'hw_veb',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}
sriov_port2 = {
'id': '3f675f19-8b2d-479d-9d42-054644a95a04',
'name': '',
'description': '',
'network_id': network_2['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '52:54:00:1e:59:c5',
'fixed_ips': [
{
'ip_address': '192.168.13.2',
'subnet_id': subnet_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'resource_request': {},
'binding:profile': {},
'binding:vnic_type': 'direct',
'binding:vif_type': 'hw_veb',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}
sriov_pf_port = {
'id': 'ce2a6ff9-573d-493e-9498-8100953e6f00',
'name': '',
'description': '',
'network_id': network_2['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '52:54:00:1e:59:c6',
'fixed_ips': [
{
'ip_address': '192.168.13.2',
'subnet_id': subnet_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'resource_request': {},
'binding:profile': {},
'binding:vnic_type': 'direct-physical',
'binding:vif_type': 'hostdev_physical',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}
sriov_pf_port2 = {
'id': 'ad2fd6c2-2c55-4c46-abdc-a8ec0d5f6a29',
'name': '',
'description': '',
'network_id': network_2['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '52:54:00:1e:59:c7',
'fixed_ips': [
{
'ip_address': '192.168.13.2',
'subnet_id': subnet_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'resource_request': {},
'binding:profile': {},
'binding:vnic_type': 'direct-physical',
'binding:vif_type': 'hostdev_physical',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}
macvtap_port = {
'id': '6eada1f1-6311-428c-a7a5-52b35cabc8fd',
'name': '',
'description': '',
'network_id': network_2['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '52:54:00:1e:59:c8',
'fixed_ips': [
{
'ip_address': '192.168.13.4',
'subnet_id': subnet_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'binding:profile': {},
'binding:vnic_type': 'macvtap',
'binding:vif_type': 'hw_veb',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}
macvtap_port2 = {
'id': 'fc79cc0c-93e9-4613-9f78-34c828d92e9f',
'name': '',
'description': '',
'network_id': network_2['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '52:54:00:1e:59:c9',
'fixed_ips': [
{
'ip_address': '192.168.13.4',
'subnet_id': subnet_2['id']
}
],
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': '',
'binding:profile': {},
'binding:vnic_type': 'macvtap',
'binding:vif_type': 'hw_veb',
'binding:vif_details': {'vlan': 100},
'port_security_enabled': False,
}

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import fixtures
import mock
@ -26,7 +27,10 @@ import nova
from nova import context
from nova import objects
from nova.objects import fields
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional.libvirt import base
from nova.tests.unit import fake_notifier
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
@ -198,6 +202,123 @@ class SRIOVServersTest(_PCIServersTestBase):
self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
class SRIOVAttachDetachTest(_PCIServersTestBase):
# no need for aliases as these test will request SRIOV via neutron
PCI_ALIAS = []
PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
"physical_network": "physnet2",
},
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.VF_PROD_ID,
"physical_network": "physnet2",
},
)]
def setUp(self):
super().setUp()
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
# add extra ports and the related network to the neutron fixture
# specifically for these tests. It cannot be added globally in the
# fixture init as it adds a second network that makes auto allocation
# based test to fail due to ambiguous networks.
self.neutron._networks[
self.neutron.network_2['id']] = self.neutron.network_2
self.neutron._subnets[
self.neutron.subnet_2['id']] = self.neutron.subnet_2
for port in [self.neutron.sriov_port, self.neutron.sriov_port2,
self.neutron.sriov_pf_port, self.neutron.sriov_pf_port2,
self.neutron.macvtap_port, self.neutron.macvtap_port2]:
self.neutron._ports[port['id']] = copy.deepcopy(port)
def _get_attached_port_ids(self, instance_uuid):
return [
attachment['port_id']
for attachment in self.api.get_port_interfaces(instance_uuid)]
def _detach_port(self, instance_uuid, port_id):
self.api.detach_interface(instance_uuid, port_id)
fake_notifier.wait_for_versioned_notifications(
'instance.interface_detach.end')
def _attach_port(self, instance_uuid, port_id):
self.api.attach_interface(
instance_uuid,
{'interfaceAttachment': {
'port_id': port_id}})
fake_notifier.wait_for_versioned_notifications(
'instance.interface_attach.end')
def _test_detach_attach(self, first_port_id, second_port_id):
# This test takes two ports that requires PCI claim.
# Starts a compute with one PF and one connected VF.
# Then starts a VM with the first port. Then detach it, then
# re-attach it. These expected to be successful. Then try to attach the
# second port and asserts that it fails as no free PCI device left on
# the host.
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
self.compute = self.start_service('compute', host='test_compute0')
# Create server with a port
server = self._create_server(networks=[{'port': first_port_id}])
updated_port = self.neutron.show_port(first_port_id)['port']
self.assertEqual('test_compute0', updated_port['binding:host_id'])
self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))
self._detach_port(server['id'], first_port_id)
updated_port = self.neutron.show_port(first_port_id)['port']
self.assertIsNone(updated_port['binding:host_id'])
self.assertNotIn(
first_port_id,
self._get_attached_port_ids(server['id']))
# Attach back the port
self._attach_port(server['id'], first_port_id)
updated_port = self.neutron.show_port(first_port_id)['port']
self.assertEqual('test_compute0', updated_port['binding:host_id'])
self.assertIn(first_port_id, self._get_attached_port_ids(server['id']))
# Try to attach the second port but no free PCI device left
ex = self.assertRaises(
client.OpenStackApiException, self._attach_port, server['id'],
second_port_id)
self.assertEqual(400, ex.response.status_code)
self.assertIn('Failed to claim PCI device', str(ex))
attached_ports = self._get_attached_port_ids(server['id'])
self.assertIn(first_port_id, attached_ports)
self.assertNotIn(second_port_id, attached_ports)
def test_detach_attach_direct(self):
self._test_detach_attach(
self.neutron.sriov_port['id'], self.neutron.sriov_port2['id'])
def test_detach_macvtap(self):
self._test_detach_attach(
self.neutron.macvtap_port['id'],
self.neutron.macvtap_port2['id'])
def test_detach_direct_physical(self):
self._test_detach_attach(
self.neutron.sriov_pf_port['id'],
self.neutron.sriov_pf_port2['id'])
class PCIServersTest(_PCIServersTestBase):
ADMIN_API = True

View File

@ -10271,13 +10271,16 @@ class ComputeAPITestCase(BaseTestCase):
req_ip = '1.2.3.4'
lock_name = 'interface-%s-%s' % (instance.uuid, port_id)
mock_allocate = mock.Mock(return_value=nwinfo)
self.compute.network_api.allocate_port_for_instance = mock_allocate
self.compute.network_api.allocate_for_instance = mock_allocate
with test.nested(
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True),
mock.patch('oslo_concurrency.lockutils.lock')
) as (cap, mock_lock):
mock.patch('oslo_concurrency.lockutils.lock'),
mock.patch.object(self.compute,
'_claim_pci_device_for_interface_attach',
return_value=None)
) as (cap, mock_lock, mock_claim_pci):
vif = self.compute.attach_interface(self.context,
instance,
network_id,
@ -10285,8 +10288,15 @@ class ComputeAPITestCase(BaseTestCase):
req_ip, None)
self.assertEqual(vif['id'], port_id)
mock_allocate.assert_called_once_with(
self.context, instance, port_id, network_id, req_ip,
bind_host_id='fake-mini', tag=None)
self.context, instance,
test.MatchType(objects.NetworkRequestList),
bind_host_id='fake-mini', attach=True)
network_requests = mock_allocate.mock_calls[0][1][2]
self.assertEqual(1, len(network_requests.objects))
network_request = network_requests[0]
self.assertEqual(network_id, network_request.network_id)
self.assertEqual(port_id, network_request.port_id)
self.assertEqual(req_ip, str(network_request.address))
mock_notify.assert_has_calls([
mock.call(self.context, instance, self.compute.host,
action='interface_attach', phase='start'),
@ -10295,6 +10305,79 @@ class ComputeAPITestCase(BaseTestCase):
mock_lock.assert_called_once_with(lock_name, mock.ANY, mock.ANY,
mock.ANY, delay=mock.ANY, do_log=mock.ANY, fair=mock.ANY,
semaphores=mock.ANY)
mock_claim_pci.assert_called_once_with(
self.context, instance, network_requests)
return nwinfo, port_id
@mock.patch.object(compute_utils, 'notify_about_instance_action')
def test_attach_sriov_interface(self, mock_notify):
instance = self._create_fake_instance_obj()
instance.pci_requests = objects.InstancePCIRequests(requests=[])
instance.pci_devices = objects.PciDeviceList(objects=[])
instance.numa_topology = objects.InstanceNUMATopology()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
mock_allocate = mock.Mock(return_value=nwinfo)
self.compute.network_api.allocate_for_instance = mock_allocate
with test.nested(
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
mock.patch.object(self.compute.rt, 'claim_pci_devices'),
mock.patch.object(self.compute.rt,
'allocate_pci_devices_for_instance'),
mock.patch.object(instance, 'save')
) as (
mock_capabilities, mock_create_resource_req, mock_claim_pci,
mock_allocate_pci, mock_save):
pci_req = objects.InstancePCIRequest(request_id=uuids.pci_req)
pci_device = objects.PciDevice(request_id=pci_req.request_id)
mock_claim_pci.return_value = [pci_device]
def create_resource_req(context, requested_networks,
pci_requests=None, affinity_policy=None):
# Simulate that the requested port is an SRIOV port
pci_requests.requests.append(pci_req)
mock_create_resource_req.side_effect = create_resource_req
vif = self.compute.attach_interface(
self.context, instance, network_id, port_id, req_ip, None)
self.assertEqual(vif['id'], port_id)
mock_allocate.assert_called_once_with(
self.context, instance,
test.MatchType(objects.NetworkRequestList),
bind_host_id='fake-mini', attach=True)
network_requests = mock_allocate.mock_calls[0][1][2]
self.assertEqual(1, len(network_requests.objects))
network_request = network_requests[0]
self.assertEqual(network_id, network_request.network_id)
self.assertEqual(port_id, network_request.port_id)
self.assertEqual(req_ip, str(network_request.address))
mock_create_resource_req.assert_called_once_with(
self.context, network_requests,
test.MatchType(objects.InstancePCIRequests),
affinity_policy=None)
mock_claim_pci.assert_called_once_with(
self.context, test.MatchType(objects.InstancePCIRequests),
instance.numa_topology)
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
mock_allocate_pci.assert_called_once_with(self.context, instance)
mock_save.assert_called_once_with()
self.assertIn(pci_req, instance.pci_requests.requests)
self.assertIn(pci_device, instance.pci_devices.objects)
return nwinfo, port_id
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@ -10305,11 +10388,16 @@ class ComputeAPITestCase(BaseTestCase):
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
mock_allocate = mock.Mock(return_value=nwinfo)
self.compute.network_api.allocate_port_for_instance = mock_allocate
self.compute.network_api.allocate_for_instance = mock_allocate
with mock.patch.dict(self.compute.driver.capabilities,
with test.nested(
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True,
supports_tagged_attach_interface=True):
supports_tagged_attach_interface=True),
mock.patch.object(self.compute,
'_claim_pci_device_for_interface_attach',
return_value=None)
) as (mock_capabilities, mock_claim_pci):
vif = self.compute.attach_interface(self.context,
instance,
network_id,
@ -10317,13 +10405,24 @@ class ComputeAPITestCase(BaseTestCase):
req_ip, tag='foo')
self.assertEqual(vif['id'], port_id)
mock_allocate.assert_called_once_with(
self.context, instance, port_id, network_id, req_ip,
bind_host_id='fake-mini', tag='foo')
self.context, instance,
test.MatchType(objects.NetworkRequestList),
bind_host_id='fake-mini', attach=True)
network_requests = mock_allocate.mock_calls[0][1][2]
self.assertEqual(1, len(network_requests.objects))
network_request = network_requests[0]
self.assertEqual(network_id, network_request.network_id)
self.assertEqual(port_id, network_request.port_id)
self.assertEqual(req_ip, str(network_request.address))
self.assertEqual('foo', network_request.tag)
mock_notify.assert_has_calls([
mock.call(self.context, instance, self.compute.host,
action='interface_attach', phase='start'),
mock.call(self.context, instance, self.compute.host,
action='interface_attach', phase='end')])
mock_claim_pci.assert_called_once_with(
self.context, instance, network_requests)
return nwinfo, port_id
def test_tagged_attach_interface_raises(self):
@ -10357,23 +10456,34 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(compute_utils, 'notify_about_instance_action'),
mock.patch.object(self.compute.driver, 'attach_interface'),
mock.patch.object(self.compute.network_api,
'allocate_port_for_instance'),
'allocate_for_instance'),
mock.patch.object(self.compute.network_api,
'deallocate_port_for_instance'),
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True)) as (
supports_attach_interface=True),
mock.patch.object(self.compute,
'_claim_pci_device_for_interface_attach',
return_value=None),
) as (
mock_notify, mock_attach, mock_allocate, mock_deallocate,
mock_dict):
mock_dict, mock_claim_pci):
mock_allocate.return_value = nwinfo
mock_attach.side_effect = exception.NovaException("attach_failed")
self.assertRaises(exception.InterfaceAttachFailed,
self.compute.attach_interface, self.context,
instance, network_id, port_id, req_ip, None)
mock_allocate.assert_called_once_with(self.context, instance,
port_id, network_id, req_ip,
bind_host_id='fake-host',
tag=None)
mock_allocate.assert_called_once_with(
self.context, instance,
test.MatchType(objects.NetworkRequestList),
bind_host_id='fake-host', attach=True)
network_requests = mock_allocate.mock_calls[0][1][2]
self.assertEqual(1, len(network_requests.objects))
network_request = network_requests[0]
self.assertEqual(network_id, network_request.network_id)
self.assertEqual(port_id, network_request.port_id)
self.assertEqual(req_ip, str(network_request.address))
mock_deallocate.assert_called_once_with(self.context, instance,
port_id)
mock_notify.assert_has_calls([
@ -10383,6 +10493,139 @@ class ComputeAPITestCase(BaseTestCase):
action='interface_attach',
exception=mock_attach.side_effect,
phase='error')])
mock_claim_pci.assert_called_once_with(
self.context, instance, network_requests)
def test_attach_sriov_interface_failed_in_driver(self):
new_type = flavors.get_flavor_by_flavor_id('4')
instance = objects.Instance(
id=42,
uuid=uuids.interface_failed_instance,
image_ref='foo',
system_metadata={},
flavor=new_type,
host='fake-host',
pci_requests=objects.InstancePCIRequests(requests=[]),
pci_devices=objects.PciDeviceList(objects=[]),
numa_topology=objects.InstanceNUMATopology())
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
with test.nested(
mock.patch.object(compute_utils, 'notify_about_instance_action'),
mock.patch.object(self.compute.driver, 'attach_interface'),
mock.patch.object(self.compute.network_api,
'allocate_for_instance'),
mock.patch.object(self.compute.network_api,
'deallocate_port_for_instance',
return_value=(mock.sentinel.nw_info, {})),
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
mock.patch.object(self.compute.rt, 'claim_pci_devices'),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices')
) as (
mock_notify, mock_attach, mock_allocate, mock_deallocate,
mock_dict, mock_create_resource_req, mock_claim_pci,
mock_unclaim_pci):
pci_req = objects.InstancePCIRequest(request_id=uuids.pci_req)
pci_device = objects.PciDevice(request_id=pci_req.request_id)
mock_claim_pci.return_value = [pci_device]
def create_resource_req(context, requested_networks,
pci_requests=None, affinity_policy=None):
# Simulate that the requested port is an SRIOV port
pci_requests.requests.append(pci_req)
mock_create_resource_req.side_effect = create_resource_req
mock_allocate.return_value = nwinfo
mock_attach.side_effect = exception.NovaException("attach_failed")
self.assertRaises(
exception.InterfaceAttachFailed, self.compute.attach_interface,
self.context, instance, network_id, port_id, req_ip, None)
mock_allocate.assert_called_once_with(
self.context, instance,
test.MatchType(objects.NetworkRequestList),
bind_host_id='fake-host', attach=True)
network_requests = mock_allocate.mock_calls[0][1][2]
self.assertEqual(1, len(network_requests.objects))
network_request = network_requests[0]
self.assertEqual(network_id, network_request.network_id)
self.assertEqual(port_id, network_request.port_id)
self.assertEqual(req_ip, str(network_request.address))
mock_deallocate.assert_called_once_with(
self.context, instance, port_id)
mock_create_resource_req.assert_called_once_with(
self.context, network_requests,
test.MatchType(objects.InstancePCIRequests),
affinity_policy=None)
mock_claim_pci.assert_called_once_with(
self.context, test.MatchType(objects.InstancePCIRequests),
instance.numa_topology)
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
mock_unclaim_pci.assert_called_once_with(
self.context, pci_device, instance)
self.assertNotIn(pci_req, instance.pci_requests.requests)
self.assertNotIn(pci_device, instance.pci_devices.objects)
def test_attach_sriov_interface_pci_claim_fails(self):
instance = self._create_fake_instance_obj()
instance.pci_requests = objects.InstancePCIRequests(requests=[])
instance.pci_devices = objects.PciDeviceList(objects=[])
instance.numa_topology = objects.InstanceNUMATopology()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
with test.nested(
mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
mock.patch.object(self.compute.rt, 'claim_pci_devices',
return_value=[]),
) as (
mock_capabilities, mock_create_resource_req, mock_claim_pci):
pci_req = objects.InstancePCIRequest(request_id=uuids.pci_req)
def create_resource_req(context, requested_networks,
pci_requests=None, affinity_policy=None):
# Simulate that the requested port is an SRIOV port
pci_requests.requests.append(pci_req)
mock_create_resource_req.side_effect = create_resource_req
ex = self.assertRaises(
messaging.ExpectedException, self.compute.attach_interface,
self.context, instance, network_id, port_id, req_ip, None)
wrapped_exc = ex.exc_info[1]
self.assertEqual(
exception.InterfaceAttachPciClaimFailed, type(wrapped_exc))
mock_create_resource_req.assert_called_once_with(
self.context, test.MatchType(objects.NetworkRequestList),
test.MatchType(objects.InstancePCIRequests),
affinity_policy=None)
mock_claim_pci.assert_called_once_with(
self.context, test.MatchType(objects.InstancePCIRequests),
instance.numa_topology)
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
self.assertNotIn(pci_req, instance.pci_requests.requests)
@mock.patch.object(compute_utils, 'notify_about_instance_action')
def test_detach_interface(self, mock_notify):
@ -10393,6 +10636,16 @@ class ComputeAPITestCase(BaseTestCase):
instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
nwinfo)
lock_name = 'interface-%s-%s' % (instance.uuid, port_id)
pci_req = objects.InstancePCIRequest(request_id=uuids.pci_req_id)
other_pci_req = objects.InstancePCIRequest(
request_id=uuids.another_pci_req_id)
instance.pci_requests = objects.InstancePCIRequests(
requests=[pci_req, other_pci_req])
pci_dev = objects.PciDevice(request_id=uuids.pci_req_id)
another_pci_dev = objects.PciDevice(
request_id=uuids.another_pci_req_id)
instance.pci_devices = objects.PciDeviceList(
objects=[pci_dev, another_pci_dev])
port_allocation = {uuids.rp1: {'NET_BW_EGR_KILOBIT_PER_SEC': 10000}}
with test.nested(
@ -10402,8 +10655,14 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(self.compute.network_api,
'deallocate_port_for_instance',
return_value=([], port_allocation)),
mock.patch('oslo_concurrency.lockutils.lock')) as (
mock_remove_alloc, mock_deallocate, mock_lock):
mock.patch('oslo_concurrency.lockutils.lock'),
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices'),
mock.patch.object(instance, 'save')
) as (
mock_remove_alloc, mock_deallocate, mock_lock,
mock_get_pci_req, mock_unclaim_pci, mock_instance_save):
self.compute.detach_interface(self.context, instance, port_id)
mock_deallocate.assert_called_once_with(
@ -10420,6 +10679,11 @@ class ComputeAPITestCase(BaseTestCase):
mock_lock.assert_called_once_with(lock_name, mock.ANY, mock.ANY,
mock.ANY, delay=mock.ANY, do_log=mock.ANY, fair=mock.ANY,
semaphores=mock.ANY)
mock_unclaim_pci.assert_called_once_with(
self.context, pci_dev, instance)
self.assertNotIn(pci_req, instance.pci_requests.requests)
self.assertNotIn(pci_dev, instance.pci_devices.objects)
mock_instance_save.assert_called_once_with()
@mock.patch('nova.compute.manager.LOG.log')
def test_detach_interface_failed(self, mock_log):
@ -10483,6 +10747,46 @@ class ComputeAPITestCase(BaseTestCase):
mock.call(self.context, instance, self.compute.host,
action='interface_detach', phase='start')])
@mock.patch.object(compute_manager.LOG, 'warning')
def test_detach_sriov_interface_pci_device_not_found(self, mock_warning):
nwinfo, port_id = self.test_attach_interface()
instance = self._create_fake_instance_obj()
instance.info_cache = objects.InstanceInfoCache.new(
self.context, uuids.info_cache_instance)
instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
nwinfo)
pci_req = objects.InstancePCIRequest(request_id=uuids.pci_req_id)
other_pci_req = objects.InstancePCIRequest(
request_id=uuids.another_pci_req_id)
instance.pci_requests = objects.InstancePCIRequests(
requests=[pci_req, other_pci_req])
another_pci_dev = objects.PciDevice(
request_id=uuids.another_pci_req_id)
instance.pci_devices = objects.PciDeviceList(
objects=[another_pci_dev])
with test.nested(
mock.patch.object(self.compute.network_api,
'deallocate_port_for_instance',
new=mock.NonCallableMock()),
mock.patch('oslo_concurrency.lockutils.lock'),
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices',
new=mock.NonCallableMock()),
) as (
mock_deallocate, mock_lock, mock_get_pci_req,
mock_unclaim_pci):
self.assertRaises(exception.InterfaceDetachFailed,
self.compute.detach_interface, self.context, instance, port_id)
self.assertIn(pci_req, instance.pci_requests.requests)
mock_warning.assert_called_once_with(
'Detach interface failed, port_id=%(port_id)s, reason: '
'PCI device not found for PCI request %(pci_req)s',
{'port_id': port_id, 'pci_req': pci_req})
def test_attach_volume_new_flow(self):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',

View File

@ -2545,19 +2545,21 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
db_instance)
e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
@mock.patch.object(self.compute,
'_claim_pci_device_for_interface_attach',
return_value=None)
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(self.compute.network_api,
'allocate_port_for_instance',
side_effect=e)
'allocate_for_instance', side_effect=e)
@mock.patch.object(self.compute, '_instance_update',
side_effect=lambda *a, **k: {})
def do_test(update, meth, add_fault, notify, event):
def do_test(update, meth, add_fault, notify, event, mock_claim_pci):
self.assertRaises(exception.InterfaceAttachFailed,
self.compute.attach_interface,
self.context, f_instance, 'net_id', 'port_id',
None, None)
self.context, f_instance, uuids.network_id,
uuids.port_id, None, None)
add_fault.assert_has_calls([
mock.call(self.context, f_instance, e,
mock.ANY)])

View File

@ -3863,11 +3863,20 @@ class TestPciTrackerDelegationMethods(BaseTestCase):
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.rt.claim_pci_devices(self.context, pci_requests)
self.rt.claim_pci_devices(
self.context, pci_requests, mock.sentinel.numa_topology)
self.rt.pci_tracker.claim_instance.assert_called_once_with(
self.context, pci_requests, None)
self.context, pci_requests, mock.sentinel.numa_topology)
self.assertTrue(self.rt.pci_tracker.save.called)
def test_unclaim_pci_devices(self):
self.rt.unclaim_pci_devices(
self.context, mock.sentinel.pci_device, mock.sentinel.instance)
self.rt.pci_tracker.free_device.assert_called_once_with(
mock.sentinel.pci_device, mock.sentinel.instance)
self.rt.pci_tracker.save.assert_called_once_with(self.context)
def test_allocate_pci_devices_for_instance(self):
self.rt.allocate_pci_devices_for_instance(self.context, self.instance)
self.rt.pci_tracker.allocate_instance.assert_called_once_with(

View File

@ -6964,12 +6964,6 @@ class TestAllocateForInstance(test.NoDBTestCase):
exception.PortBindingFailed,
{"binding:vif_type": model.VIF_TYPE_BINDING_FAILED})
def test_validate_requested_port_ids_raise_sriov(self):
self._assert_validate_requested_port_ids_raises(
exception.AttachSRIOVPortNotSupported,
{"binding:vnic_type": model.VNIC_TYPE_DIRECT},
attach=True)
def test_validate_requested_network_ids_success_auto_net(self):
requested_networks = []
ordered_networks = []

View File

@ -1825,6 +1825,19 @@ class FakeLibvirtFixture(fixtures.Fixture):
'nova.pci.utils.get_ifname_by_pci_address',
return_value='fake_pf_interface_name'))
# libvirt calls out to sysfs to get the vfs ID during macvtap plug
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_vf_num_by_pci_address', return_value=1))
# libvirt calls out to privsep to set the mac and vlan of a macvtap
self.useFixture(fixtures.MockPatch(
'nova.privsep.linux_net.set_device_macaddr_and_vlan'))
# libvirt calls out to privsep to set the port state during macvtap
# plug
self.useFixture(fixtures.MockPatch(
'nova.privsep.linux_net.set_device_macaddr'))
# Don't assume that the system running tests has a valid machine-id
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.LibvirtDriver'

View File

@ -607,6 +607,44 @@ class GuestTestCase(test.NoDBTestCase):
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
def test_get_interface_by_cfg_hostdev_pci(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0x0000' bus='0x81' slot='0x00'
function='0x1'/>
</source>
<alias name='hostdev0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</hostdev>
</devices>
</domain>"""
cfg = vconfig.LibvirtConfigGuestHostdevPCI()
cfg.parse_str("""
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0x0000' bus='0x81' slot='0x00' function='0x1'/>
</source>
</hostdev>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
cfg.parse_str("""
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0000' bus='81' slot='00' function='1'/>
</source>
</hostdev>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
def test_get_info(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
self.domain.ID.return_value = 6

View File

@ -1692,11 +1692,15 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
# NOTE(arches) Skip checking target_dev for vhostuser
# vif type; target_dev is not a valid value for vhostuser.
# NOTE(gibi): For macvtap cases the domain has a target_dev
# generated by libvirt. It is not set by the vif driver code
# so it is not in config returned by the vif driver so we
# should not match on that.
return (
self.mac_addr == other.mac_addr and
self.net_type == other.net_type and
self.source_dev == other.source_dev and
(self.net_type == 'vhostuser' or
(self.net_type == 'vhostuser' or not self.target_dev or
self.target_dev == other.target_dev) and
self.vhostuser_path == other.vhostuser_path)
@ -2127,19 +2131,39 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
# On the other hand nova generates these values without the 0x prefix
self.domain = None
self.bus = None
self.slot = None
self.function = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestHostdevPCI):
return False
# NOTE(gibi): nova generates hexa string without 0x prefix but
# libvirt uses that prefix when returns the config so we need to
# normalize the strings before comparison
return (
int(self.domain, 16) == int(other.domain, 16) and
int(self.bus, 16) == int(other.bus, 16) and
int(self.slot, 16) == int(other.slot, 16) and
int(self.function, 16) == int(other.function, 16))
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element("address",
domain='0x' + self.domain,
bus='0x' + self.bus,
slot='0x' + self.slot,
function='0x' + self.function)
address = etree.Element(
"address",
domain=self.domain if self.domain.startswith('0x')
else '0x' + self.domain,
bus=self.bus if self.bus.startswith('0x') else '0x' + self.bus,
slot=self.slot if self.slot.startswith('0x') else '0x' + self.slot,
function=self.function if self.function.startswith('0x')
else '0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)

View File

@ -229,24 +229,23 @@ class Guest(object):
return interfaces
def get_interface_by_cfg(self, cfg):
"""Lookup a full LibvirtConfigGuestInterface with
LibvirtConfigGuestInterface generated
"""Lookup a full LibvirtConfigGuestDevice with
LibvirtConfigGuesDevice generated
by nova.virt.libvirt.vif.get_config.
:param cfg: config object that represents the guest interface.
:type cfg: LibvirtConfigGuestInterface object
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface instance
:type cfg: a subtype of LibvirtConfigGuestDevice object
:returns: nova.virt.libvirt.config.LibvirtConfigGuestDevice instance
if found, else None
"""
if cfg:
interfaces = self.get_all_devices(
vconfig.LibvirtConfigGuestInterface)
interfaces = self.get_all_devices(type(cfg))
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuestInterface get from domain and
# LibvirtConfigGuestInterface generated by
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
# nova.virt.libvirt.vif.get_config must be identical.
# NOTE(gibi): LibvirtConfigGuestInterface does a custom
# NOTE(gibi): LibvirtConfigGuest subtypes does a custom
# equality check based on available information on nova side
if cfg == interface:
return interface

View File

@ -0,0 +1,5 @@
---
features:
- |
Nova now supports attaching and detaching PCI device backed Neutron ports
to running servers.