diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 65cc89baef70..54e9300236da 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -1492,6 +1492,12 @@ class API(base_api.NetworkAPI): context, neutron, request_net.port_id) pci_request_id = None if vnic_type in network_model.VNIC_TYPES_SRIOV: + # TODO(moshele): To differentiate between the SR-IOV legacy + # and SR-IOV ovs hardware offload we will leverage the nic + # feature based scheduling in nova. This mean we will need + # libvirt to expose the nic feature. At the moment + # there is a limitation that deployers cannot use both + # SR-IOV modes (legacy and ovs) in the same deployment. spec = {pci_request.PCI_NET_TAG: phynet_name} dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type) if dev_type: diff --git a/nova/network/os_vif_util.py b/nova/network/os_vif_util.py index 32b2465ee8ea..fb5d3c75a55e 100644 --- a/nova/network/os_vif_util.py +++ b/nova/network/os_vif_util.py @@ -21,6 +21,7 @@ versioned object model os_vif.objects.* import sys from os_vif import objects +from os_vif.objects import fields as os_vif_fields from oslo_config import cfg from oslo_log import log as logging @@ -277,9 +278,24 @@ def _nova_to_osvif_vif_bridge(vif): # VIF_TYPE_OVS = 'ovs' def _nova_to_osvif_vif_ovs(vif): + vnic_type = vif.get('vnic_type', model.VNIC_TYPE_NORMAL) profile = objects.vif.VIFPortProfileOpenVSwitch( interface_id=vif.get('ovs_interfaceid') or vif['id']) - if _is_firewall_required(vif) or vif.is_hybrid_plug_enabled(): + if vnic_type == model.VNIC_TYPE_DIRECT: + profile = objects.vif.VIFPortProfileOVSRepresentor( + interface_id=vif.get('ovs_interfaceid') or vif['id'], + representor_name=_get_vif_name(vif), + representor_address=vif["profile"]['pci_slot']) + obj = _get_vif_instance( + vif, + objects.vif.VIFHostDevice, + port_profile=profile, + plugin="ovs", + dev_address=vif["profile"]['pci_slot'], + dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET) + if vif["network"]["bridge"] is not None: + obj.network.bridge = vif["network"]["bridge"] + elif _is_firewall_required(vif) or vif.is_hybrid_plug_enabled(): obj = _get_vif_instance( vif, objects.vif.VIFBridge, diff --git a/nova/tests/unit/network/test_os_vif_util.py b/nova/tests/unit/network/test_os_vif_util.py index 930c59b1ba11..0aadf14e5999 100644 --- a/nova/tests/unit/network/test_os_vif_util.py +++ b/nova/tests/unit/network/test_os_vif_util.py @@ -13,6 +13,7 @@ # under the License. from os_vif import objects as osv_objects +from os_vif.objects import fields as os_vif_fields from nova import exception from nova.network import model @@ -667,6 +668,43 @@ class OSVIFUtilTestCase(test.NoDBTestCase): self.assertObjEqual(expect, actual) + def test_nova_to_osvif_ovs_with_vnic_direct(self): + vif = model.VIF( + id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", + type=model.VIF_TYPE_OVS, + address="22:52:25:62:e2:aa", + vnic_type=model.VNIC_TYPE_DIRECT, + network=model.Network( + id="b82c1929-051e-481d-8110-4669916c7915", + label="Demo Net", + subnets=[]), + profile={'pci_slot': '0000:0a:00.1'} + ) + + actual = os_vif_util.nova_to_osvif_vif(vif) + + expect = osv_objects.vif.VIFHostDevice( + id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", + active=False, + address="22:52:25:62:e2:aa", + dev_address='0000:0a:00.1', + dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET, + plugin="ovs", + port_profile=osv_objects.vif.VIFPortProfileOVSRepresentor( + interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", + representor_name="nicdc065497-3c", + representor_address="0000:0a:00.1"), + has_traffic_filtering=False, + preserve_on_delete=False, + network=osv_objects.network.Network( + id="b82c1929-051e-481d-8110-4669916c7915", + bridge_interface=None, + label="Demo Net", + subnets=osv_objects.subnet.SubnetList( + objects=[]))) + + self.assertObjEqual(expect, actual) + def test_nova_to_osvif_vhostuser_ovs(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py index 48af5e564323..9a4d8c84ba76 100644 --- a/nova/tests/unit/virt/libvirt/test_vif.py +++ b/nova/tests/unit/virt/libvirt/test_vif.py @@ -142,6 +142,15 @@ class LibvirtVifTestCase(test.NoDBTestCase): devname='tap-xxx-yyy-zzz', ovs_interfaceid='aaa-bbb-ccc') + vif_ovs_direct = network_model.VIF(id='vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + network=network_ovs, + vnic_type=network_model.VNIC_TYPE_DIRECT, + profile={'pci_slot': '0000:0a:00.1'}, + type=network_model.VIF_TYPE_OVS, + details={'port_filter': False}, + ovs_interfaceid='aaa-bbb-ccc') + vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_ovs, @@ -1480,6 +1489,15 @@ class LibvirtVifTestCase(test.NoDBTestCase): self._assertMacEquals(node, self.vif_vhostuser_ovs) self._assertModel(xml, network_model.VIF_MODEL_VIRTIO) + def test_ovs_direct(self): + d = vif.LibvirtGenericVIFDriver() + xml = self._get_instance_xml(d, self.vif_ovs_direct) + node = self._get_node(xml) + self._assertTypeAndPciEquals(node, + "hostdev", + self.vif_ovs_direct) + self._assertMacEquals(node, self.vif_ovs_direct) + def test_agilio_ovs_direct(self): d = vif.LibvirtGenericVIFDriver() xml = self._get_instance_xml(d, self.vif_agilio_ovs_direct) diff --git a/releasenotes/notes/sriov-ovs-offload-1c3fe79e847f8c8f.yaml b/releasenotes/notes/sriov-ovs-offload-1c3fe79e847f8c8f.yaml new file mode 100644 index 000000000000..fca3c4805333 --- /dev/null +++ b/releasenotes/notes/sriov-ovs-offload-1c3fe79e847f8c8f.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Adds support to OVS vif type with direct port (SR-IOV). + In order to use this OVS acceleration mode, ``openvswitch`` 2.8.0 + and 'Linux Kernel' 4.8 are required. This feature allows control of + an SR-IOV virtual function (VF) via OpenFlow control plane and gain + improved performance of 'Open vSwitch'. Please note that in Pike + release we can't differentiate between SR-IOV hardware and OVS offloaded + on the same host. This limitation should be resolved when the + enable-sriov-nic-features will be completed. + Until then operators can use host aggregates to ensure that they can + schedule instances on specific hosts based on hardware.