Multiqueue support for OvsDpdkBond

This patch allows the Multiqueue setting for DPDK bonds. In case of DPDK
bonds, the Multiqueue setting needs to be done for each of the interfaces
attached to the bond.

Implements: blueprint ovs-2-6-features-dpdk
Signed-off-by: Karthik S <ksundara@redhat.com>
Change-Id: I21b46cee902a17f13df51d456648368e468aadb7
This commit is contained in:
Karthik S 2017-05-31 05:29:33 -04:00
parent cc7ff987ca
commit c8901b8ac3
5 changed files with 97 additions and 2 deletions

View File

@ -7,6 +7,7 @@
"type" : "ovs_dpdk_bond",
"name" : "dpdkbond0",
"mtu" : 9000,
"rx_queue": 4,
"members": [
{
"type" : "ovs_dpdk_port",

View File

@ -15,6 +15,14 @@ network_config:
name: dpdkbond0
# MTU is optional, e.g. for jumbo frames
mtu: 9000
# rx_queue is optional, used for multi-queue option. It configures the
# maximum number of queues for each interface associated with the
# ovs_dpdk_bond. If not defined, the physical interfaces will have
# single queue.
# (rx_queue) x (Number of members in the ovs_dpdk_bond) should be less
# than the number of PMD cores, as each queue will have one PMD thread
# (CPU) associated with it.
rx_queue: 4
members:
-
type: ovs_dpdk_port

View File

@ -322,6 +322,11 @@ class IfcfgNetConfig(os_net_config.NetConfig):
for member in base_opt.members:
ovs_extra.append("set Interface %s mtu_request=$MTU" %
member.name)
if base_opt.rx_queue:
data += "RX_QUEUE=%i\n" % base_opt.rx_queue
for member in base_opt.members:
ovs_extra.append("set Interface %s options:n_rxq="
"$RX_QUEUE" % member.name)
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)

View File

@ -1086,7 +1086,7 @@ class OvsDpdkBond(_BaseOpts):
routes=None, mtu=None, primary=False, members=None,
ovs_options=None, ovs_extra=None, nic_mapping=None,
persist_mapping=False, defroute=True, dhclient_args=None,
dns_servers=None, nm_controlled=False):
dns_servers=None, nm_controlled=False, rx_queue=None):
super(OvsDpdkBond, self).__init__(name, use_dhcp, use_dhcpv6,
addresses, routes, mtu, primary,
nic_mapping, persist_mapping,
@ -1095,6 +1095,7 @@ class OvsDpdkBond(_BaseOpts):
self.members = members or []
self.ovs_options = ovs_options
self.ovs_extra = format_ovs_extra(self, ovs_extra)
self.rx_queue = rx_queue
for member in self.members:
if member.primary:
@ -1117,6 +1118,7 @@ class OvsDpdkBond(_BaseOpts):
persist_mapping, defroute, dhclient_args,
dns_servers, nm_controlled) = _BaseOpts.base_opts_from_json(
json, include_primary=False)
rx_queue = json.get('rx_queue', None)
ovs_options = json.get('ovs_options')
ovs_extra = json.get('ovs_extra', [])
if not isinstance(ovs_extra, list):
@ -1145,7 +1147,7 @@ class OvsDpdkBond(_BaseOpts):
persist_mapping=persist_mapping,
defroute=defroute, dhclient_args=dhclient_args,
dns_servers=dns_servers,
nm_controlled=nm_controlled)
nm_controlled=nm_controlled, rx_queue=rx_queue)
class VppInterface(_BaseOpts):

View File

@ -990,6 +990,85 @@ BOND_IFACES="dpdk0 dpdk1"
MTU=9000
OVS_EXTRA="set Interface dpdk0 mtu_request=$MTU \
-- set Interface dpdk1 mtu_request=$MTU"
"""
self.assertEqual(dpdk_bond_config,
self.get_interface_config('dpdkbond0'))
def test_network_ovs_dpdk_bond_with_rx_queue(self):
nic_mapping = {'nic1': 'eth0', 'nic2': 'eth1', 'nic3': 'eth2'}
self.stubbed_mapped_nics = nic_mapping
iface0 = objects.Interface(name='nic2')
dpdk0 = objects.OvsDpdkPort(name='dpdk0', members=[iface0])
iface1 = objects.Interface(name='nic3')
dpdk1 = objects.OvsDpdkPort(name='dpdk1', members=[iface1])
bond = objects.OvsDpdkBond('dpdkbond0', rx_queue=4,
members=[dpdk0, dpdk1])
bridge = objects.OvsUserBridge('br-link', members=[bond])
def test_bind_dpdk_interfaces(ifname, driver, noop):
self.assertIn(ifname, ['eth1', 'eth2'])
self.assertEqual(driver, 'vfio-pci')
self.stubs.Set(utils, 'bind_dpdk_interfaces',
test_bind_dpdk_interfaces)
self.provider.add_ovs_dpdk_bond(bond)
self.provider.add_ovs_user_bridge(bridge)
dpdk_bond_config = """# This file is autogenerated by os-net-config
DEVICE=dpdkbond0
ONBOOT=yes
HOTPLUG=no
NM_CONTROLLED=no
PEERDNS=no
DEVICETYPE=ovs
TYPE=OVSDPDKBond
OVS_BRIDGE=br-link
BOND_IFACES="dpdk0 dpdk1"
RX_QUEUE=4
OVS_EXTRA="set Interface dpdk0 options:n_rxq=$RX_QUEUE \
-- set Interface dpdk1 options:n_rxq=$RX_QUEUE"
"""
self.assertEqual(dpdk_bond_config,
self.get_interface_config('dpdkbond0'))
def test_network_ovs_dpdk_bond_with_mtu_and_rx_queue(self):
nic_mapping = {'nic1': 'eth0', 'nic2': 'eth1', 'nic3': 'eth2'}
self.stubbed_mapped_nics = nic_mapping
iface0 = objects.Interface(name='nic2')
dpdk0 = objects.OvsDpdkPort(name='dpdk0', members=[iface0])
iface1 = objects.Interface(name='nic3')
dpdk1 = objects.OvsDpdkPort(name='dpdk1', members=[iface1])
bond = objects.OvsDpdkBond('dpdkbond0', rx_queue=4, mtu=9000,
members=[dpdk0, dpdk1])
bridge = objects.OvsUserBridge('br-link', members=[bond])
def test_bind_dpdk_interfaces(ifname, driver, noop):
self.assertIn(ifname, ['eth1', 'eth2'])
self.assertEqual(driver, 'vfio-pci')
self.stubs.Set(utils, 'bind_dpdk_interfaces',
test_bind_dpdk_interfaces)
self.provider.add_ovs_dpdk_bond(bond)
self.provider.add_ovs_user_bridge(bridge)
dpdk_bond_config = """# This file is autogenerated by os-net-config
DEVICE=dpdkbond0
ONBOOT=yes
HOTPLUG=no
NM_CONTROLLED=no
PEERDNS=no
DEVICETYPE=ovs
TYPE=OVSDPDKBond
OVS_BRIDGE=br-link
BOND_IFACES="dpdk0 dpdk1"
RX_QUEUE=4
MTU=9000
OVS_EXTRA="set Interface dpdk0 mtu_request=$MTU \
-- set Interface dpdk1 mtu_request=$MTU \
-- set Interface dpdk0 options:n_rxq=$RX_QUEUE \
-- set Interface dpdk1 options:n_rxq=$RX_QUEUE"
"""
self.assertEqual(dpdk_bond_config,
self.get_interface_config('dpdkbond0'))