From 04b6e9bd801beb006f97b5960f007df617681078 Mon Sep 17 00:00:00 2001 From: Andrey Zaikin Date: Fri, 31 Jan 2020 11:31:34 +0300 Subject: [PATCH] Support DPDK application on bare-metal host This patch contains binding driver, which intend to copy vhostuser port to containers's directory. Here container's directory it's mounted directory. Also this patch contains code to create proper VIF in case when neutron ovs agent configured to work with vhostuser ports. There is no code here for port creation, due to it performs in base.connect by os_vif.plug. This function creates/or recreates OVS bridge with netdev type, then it creates port in this bridge. It uses vif.network.bridge as the name for integration bridge, IOW it doesn't use ovs_bridge from kuryr.conf, vif.network.bridge is configured by neutron ovs agent. VhostUser mode is defined by neutron ovs agent, it obtains from Open vSwitch configuration: Command to check Open vSwitch configuration ovs-vsctl list Open_vSwitch |grep iface_types If neutron ovs agent finds dpdkvhostuserclient there, it sets vhostuser_mode to VIFVHostUserMode.SERVER, it means DPDK application in container will be a server, and OVS will be a client, so DPDK application will create/bind/listen vhostuser socket by predefined path. This path is set in kuryr.conf/vhostuser/mount_point. When dpdkvhostuserclient is not in OVS's capability list, e.g. it's old OVS or it was built w/o dpdkvhostuserclient support, the mode will be VIFVHostUserMode.CLIENT. In this case OVS will create/bind/listen socket, so socket file will exist, and it shoud be copied to container's mount volume. At the moment of copying OVS server already has to listen it, otherwise approach is not working. Partially Implements: blueprint support-vhost-user-port-type-on-bm-installation Change-Id: Ib9c22368e518815064282f4c3b9f9ddaf58dc622 Signed-off-by: Alexey Perevalov Signed-off-by: Andrey Zaikin Signed-off-by: Vladimir Kuramshin --- kuryr_kubernetes/cni/binding/vhostuser.py | 138 ++++++++++++++++++ kuryr_kubernetes/config.py | 12 ++ .../controller/drivers/vif_pool.py | 1 + kuryr_kubernetes/os_vif_util.py | 38 ++++- .../tests/unit/cni/test_binding.py | 94 ++++++++++++ .../tests/unit/test_os_vif_util.py | 90 +++++++++++- setup.cfg | 2 + 7 files changed, 371 insertions(+), 4 deletions(-) create mode 100644 kuryr_kubernetes/cni/binding/vhostuser.py diff --git a/kuryr_kubernetes/cni/binding/vhostuser.py b/kuryr_kubernetes/cni/binding/vhostuser.py new file mode 100644 index 000000000..8096f30d6 --- /dev/null +++ b/kuryr_kubernetes/cni/binding/vhostuser.py @@ -0,0 +1,138 @@ +# Copyright (c) 2020 Samsung Electronics Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os.path +import stat + +from kuryr.lib._i18n import _ +from os_vif.objects import fields as osv_fields +from oslo_config import cfg +from oslo_log import log +from oslo_serialization import jsonutils +from vif_plug_ovs import constants +from vif_plug_ovs import ovs + +from kuryr_kubernetes.cni.binding import base +from kuryr_kubernetes import config +from kuryr_kubernetes import exceptions as k_exc +from kuryr_kubernetes.handlers import health + +LOG = log.getLogger(__name__) + + +def _get_vhostport_type(vif): + if vif.mode == osv_fields.VIFVHostUserMode.SERVER: + return 'dpdkvhostuserclient' + elif vif.mode == osv_fields.VIFVHostUserMode.CLIENT: + return 'dpdkvhostuser' + raise k_exc.IntegrityError( + _("Unknown vhostuser mode %(mode)s for vif %(vif_id)s") + % {'mode': vif.mode, 'vif_id': vif.id}) + + +def _get_vhostuser_port_name(vif): + return ovs.OvsPlugin.gen_port_name(constants.OVS_VHOSTUSER_PREFIX, vif.id) + + +def _get_vhu_sock(config_file_path): + with open(config_file_path, 'r') as f: + conf = jsonutils.load(f) + return conf['vhostname'] + + +def _check_sock_file(vhostuser_socket): + mode = os.stat(vhostuser_socket).st_mode + return stat.S_ISSOCK(mode) + + +class VIFVHostUserDriver(health.HealthHandler, base.BaseBindingDriver): + + def __init__(self): + super(VIFVHostUserDriver, self).__init__() + self.mount_path = config.CONF.vhostuser.mount_point + self.ovs_vu_path = config.CONF.vhostuser.ovs_vhu_path + if not self.mount_path: + raise cfg.RequiredOptError('mount_point', 'vhostuser') + + def _write_config(self, container_id, ifname, port_name, vif): + """Write vhostuser configuration file + + This function writes configuration file, this file will be used by + application inside container and for cleanup (def disconnect) + procedure. + """ + vhost_conf = {} + vhost_conf["vhostname"] = port_name + vhost_conf["vhostmac"] = vif.address + vhost_conf["mode"] = vif.mode + with open(self._config_file_path(container_id, ifname), "w") as f: + jsonutils.dump(vhost_conf, f) + + def _config_file_path(self, container_id, ifname): + return os.path.join(self.mount_path, f'{container_id}-{ifname}') + + def connect(self, vif, ifname, netns, container_id): + port_name = _get_vhostuser_port_name(vif) + self._write_config(container_id, ifname, port_name, vif) + # no need to copy in case of SERVER mode + if vif.mode == osv_fields.VIFVHostUserMode.SERVER: + return + + src_vhu_sock = os.path.join(self.ovs_vu_path, port_name) + + if _check_sock_file(src_vhu_sock): + dst_vhu_sock = os.path.join(vif.path, port_name) + LOG.debug("Moving %s to %s while processing VIF %s", src_vhu_sock, + dst_vhu_sock, vif.id) + os.rename(src_vhu_sock, dst_vhu_sock) + else: + error_msg = "Socket %s required for VIF %s doesn't exist".format( + src_vhu_sock, vif.id) + LOG.error(error_msg) + raise k_exc.CNIError(error_msg) + + def disconnect(self, vif, ifname, netns, container_id): + # This function removes configuration file and appropriate + # socket file. Unfortunatelly Open vSwitch daemon can't remove + # moved socket, so we have to do it + config_file_path = self._config_file_path(container_id, ifname) + + if not os.path.exists(config_file_path): + LOG.warning("Configuration file: %s for VIF %s doesn't exist!", + config_file_path, vif.id) + return + vhu_sock_path = os.path.join(self.mount_path, + _get_vhu_sock(config_file_path)) + LOG.debug("remove: %s, %s", config_file_path, vhu_sock_path) + os.remove(vhu_sock_path) + os.remove(config_file_path) + + def is_alive(self): + healthy = False + try: + healthy = (os.path.exists(self.ovs_vu_path) + and os.path.exists(self.mount_path)) + except Exception: + LOG.exception('Error when determining health status of vhostuser ' + 'CNI driver.') + + if not healthy: + LOG.error('Directory %s or %s does not exist or Kuryr has no ' + 'permissions to access it. Marking vhostuser binding ' + 'driver as unhealthy.', self.ovs_vu_path, + self.mount_path) + + return healthy diff --git a/kuryr_kubernetes/config.py b/kuryr_kubernetes/config.py index c8a59135b..ebe2c0ad6 100644 --- a/kuryr_kubernetes/config.py +++ b/kuryr_kubernetes/config.py @@ -313,6 +313,17 @@ sriov_opts = [ ] +vhostuser = [ + cfg.StrOpt('mount_point', + help=_("Path where vhost-user port will be created " + "also it should be mount point for pod"), + default='/var/cni/vhostuser'), + cfg.StrOpt('ovs_vhu_path', + help=_("Path where OVS keeps socket files for vhost-user " + "ports"), + default='/var/run/openvswitch/') +] + CONF = cfg.CONF CONF.register_opts(kuryr_k8s_opts) CONF.register_opts(daemon_opts, group='cni_daemon') @@ -322,6 +333,7 @@ CONF.register_opts(octavia_defaults, group='octavia_defaults') CONF.register_opts(cache_defaults, group='cache_defaults') CONF.register_opts(nested_vif_driver_opts, group='pod_vif_nested') CONF.register_opts(sriov_opts, group='sriov') +CONF.register_opts(vhostuser, group='vhostuser') CONF.register_opts(lib_config.core_opts) CONF.register_opts(lib_config.binding_opts, 'binding') diff --git a/kuryr_kubernetes/controller/drivers/vif_pool.py b/kuryr_kubernetes/controller/drivers/vif_pool.py index 34eda4e26..98d544885 100644 --- a/kuryr_kubernetes/controller/drivers/vif_pool.py +++ b/kuryr_kubernetes/controller/drivers/vif_pool.py @@ -100,6 +100,7 @@ VIF_TYPE_TO_DRIVER_MAPPING = { 'VIFMacvlanNested': 'nested-macvlan', 'VIFSriov': 'sriov', 'VIFDPDKNested': 'nested-dpdk', + 'VIFVHostUser': 'neutron-vif', } diff --git a/kuryr_kubernetes/os_vif_util.py b/kuryr_kubernetes/os_vif_util.py index 2087f34fc..97b610acb 100644 --- a/kuryr_kubernetes/os_vif_util.py +++ b/kuryr_kubernetes/os_vif_util.py @@ -23,7 +23,9 @@ from os_vif.objects import route as osv_route from os_vif.objects import subnet as osv_subnet from os_vif.objects import vif as osv_vif from oslo_config import cfg as oslo_cfg +from oslo_log import log as logging from stevedore import driver as stv_driver +from vif_plug_ovs import constants as osv_const from kuryr_kubernetes import config from kuryr_kubernetes import constants as const @@ -31,6 +33,8 @@ from kuryr_kubernetes import exceptions as k_exc from kuryr_kubernetes.objects import vif as k_vif +LOG = logging.getLogger(__name__) + # REVISIT(ivc): consider making this module part of kuryr-lib _VIF_TRANSLATOR_NAMESPACE = "kuryr_kubernetes.vif_translators" _VIF_MANAGERS = {} @@ -193,6 +197,15 @@ def _make_vif_network(neutron_port, subnets): return network +# TODO(a.perevalov) generalize it with get_veth_pair_names +# but it's reasonable if we're going to add vhostuser support +# into kuryr project +def _get_vhu_vif_name(port_id): + ifname = osv_const.OVS_VHOSTUSER_PREFIX + port_id + ifname = ifname[:kl_const.NIC_NAME_LEN] + return ifname + + def _get_vif_name(neutron_port): """Gets a VIF device name for port. @@ -249,8 +262,30 @@ def neutron_to_osvif_vif_ovs(vif_plugin, os_port, subnets): network = _make_vif_network(os_port, subnets) network.bridge = ovs_bridge + vhostuser_mode = details.get('vhostuser_mode', False) - if details.get('ovs_hybrid_plug'): + LOG.debug('Detected vhostuser_mode=%s for port %s', vhostuser_mode, + os_port.id) + if vhostuser_mode: + # TODO(a.perevalov) obtain path to mount point from pod's mountVolumes + vhostuser_mount_point = (config.CONF.vhostuser.mount_point) + if not vhostuser_mount_point: + raise oslo_cfg.RequiredOptError('vhostuser_mount_point', + 'neutron_defaults') + vif = osv_vif.VIFVHostUser( + id=os_port.id, + address=os_port.mac_address, + network=network, + has_traffic_filtering=details.get('port_filter', False), + preserve_on_delete=False, + active=_is_port_active(os_port), + port_profile=profile, + plugin='ovs', + path=vhostuser_mount_point, + mode=vhostuser_mode, + vif_name=_get_vhu_vif_name(os_port.id), + bridge_name=network.bridge) + elif details.get('ovs_hybrid_plug'): vif = osv_vif.VIFBridge( id=os_port.id, address=os_port.mac_address, @@ -386,7 +421,6 @@ def neutron_to_osvif_vif(vif_translator, os_port, subnets): :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: os-vif VIF object """ - try: mgr = _VIF_MANAGERS[vif_translator] except KeyError: diff --git a/kuryr_kubernetes/tests/unit/cni/test_binding.py b/kuryr_kubernetes/tests/unit/cni/test_binding.py index da19af45e..c2c555ba4 100644 --- a/kuryr_kubernetes/tests/unit/cni/test_binding.py +++ b/kuryr_kubernetes/tests/unit/cni/test_binding.py @@ -13,13 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. import mock +import os import uuid + from os_vif import objects as osv_objects +from os_vif.objects import fields as osv_fields from oslo_config import cfg +from oslo_utils import uuidutils from kuryr_kubernetes.cni.binding import base from kuryr_kubernetes.cni.binding import sriov +from kuryr_kubernetes.cni.binding import vhostuser from kuryr_kubernetes import constants as k_const from kuryr_kubernetes import exceptions from kuryr_kubernetes import objects @@ -402,3 +407,92 @@ class TestSriovDriver(TestDriverMixin, test_base.TestCase): cls._return_device_driver(m_driver, self.vif) m_driver._bind_device.assert_called_once_with(pci, old_driver, new_driver) + + +class TestVHostUserDriver(TestDriverMixin, test_base.TestCase): + def setUp(self): + super(TestVHostUserDriver, self).setUp() + self.vu_mount_point = '/var/run/cni' + self.vu_ovs_path = '/var/run/openvswitch' + CONF.set_override('mount_point', self.vu_mount_point, + group='vhostuser') + CONF.set_override('ovs_vhu_path', self.vu_ovs_path, + group='vhostuser') + self.vif = fake._fake_vif(osv_objects.vif.VIFVHostUser) + self.vif.path = self.vu_mount_point + self.vif.address = '64:0f:2b:5f:0c:1c' + self.port_name = vhostuser._get_vhostuser_port_name(self.vif) + self.cont_id = uuidutils.generate_uuid() + + @mock.patch('kuryr_kubernetes.cni.binding.base._need_configure_l3') + @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' + 'K8sCNIRegistryPlugin.report_drivers_health') + @mock.patch('os.rename') + @mock.patch('os.path.exists', mock.Mock(return_value=True)) + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' + '_write_config') + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._check_sock_file') + @mock.patch('os_vif.plug') + def test_connect_client(self, m_vif_plug, m_check_sock, m_write_conf, + m_os_rename, m_report, m_need_l3): + m_need_l3.return_value = False + self.vif.mode = osv_fields.VIFVHostUserMode.CLIENT + m_check_sock.return_value = True + base.connect(self.vif, self.instance_info, self.ifname, self.netns, + m_report, container_id=self.cont_id) + vu_dst_socket = os.path.join(self.vu_mount_point, self.port_name) + vu_src_socket = os.path.join(self.vu_ovs_path, self.port_name) + + m_vif_plug.assert_called_once_with(self.vif, self.instance_info) + m_os_rename.assert_called_once_with(vu_src_socket, vu_dst_socket) + m_write_conf.assert_called_once_with(self.cont_id, self.ifname, + self.port_name, self.vif) + m_report.assert_called_once() + + @mock.patch('kuryr_kubernetes.cni.binding.base._need_configure_l3') + @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' + 'K8sCNIRegistryPlugin.report_drivers_health') + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' + '_write_config') + @mock.patch('os_vif.plug') + def test_connect_server(self, m_vif_plug, m_write_conf, + m_report, m_need_l3): + m_need_l3.return_value = False + self.vif.mode = osv_fields.VIFVHostUserMode.SERVER + base.connect(self.vif, self.instance_info, self.ifname, self.netns, + m_report, container_id=self.cont_id) + m_vif_plug.assert_called_once_with(self.vif, self.instance_info) + m_write_conf.assert_called_once_with(self.cont_id, self.ifname, + self.port_name, self.vif) + m_report.assert_called_once() + + @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' + 'K8sCNIRegistryPlugin.report_drivers_health') + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._check_sock_file', + mock.Mock(return_value=False)) + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser.VIFVHostUserDriver.' + '_write_config', mock.Mock()) + @mock.patch('os_vif.plug') + def test_connect_nosocket(self, m_vif_plug, m_report): + self.vif.mode = osv_fields.VIFVHostUserMode.CLIENT + self.assertRaises(exceptions.CNIError, base.connect, self.vif, + self.instance_info, self.ifname, self.netns, + m_report, container_id=self.cont_id) + + @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' + 'K8sCNIRegistryPlugin.report_drivers_health') + @mock.patch('kuryr_kubernetes.cni.binding.vhostuser._get_vhu_sock') + @mock.patch('os.remove') + @mock.patch('os.path.exists', mock.Mock(return_value=True)) + @mock.patch('os_vif.unplug') + def test_disconnect(self, m_os_unplug, m_os_remove, m_get_vhu_sock, + m_report): + m_get_vhu_sock.return_value = self.port_name + base.disconnect(self.vif, self.instance_info, self.ifname, self.netns, + m_report, container_id=self.cont_id) + conf_file_path = '{}/{}-{}'.format(self.vu_mount_point, + self.cont_id, self.ifname) + vhu_sock_path = '{}/{}'.format(self.vu_mount_point, + self.port_name) + os_remove_calls = [mock.call(vhu_sock_path), mock.call(conf_file_path)] + m_os_remove.assert_has_calls(os_remove_calls) diff --git a/kuryr_kubernetes/tests/unit/test_os_vif_util.py b/kuryr_kubernetes/tests/unit/test_os_vif_util.py index 5457e09e3..9138a39a7 100644 --- a/kuryr_kubernetes/tests/unit/test_os_vif_util.py +++ b/kuryr_kubernetes/tests/unit/test_os_vif_util.py @@ -184,7 +184,7 @@ class TestOSVIFUtils(test_base.TestCase): port_profile = mock.sentinel.port_profile network = mock.sentinel.network port_active = mock.sentinel.port_active - vif_name = mock.sentinel.vif_name + vif_name = "vhu01234567-89" hybrid_bridge = mock.sentinel.hybrid_bridge vif = mock.sentinel.vif port = fake.get_port_obj(port_id=port_id, @@ -240,7 +240,7 @@ class TestOSVIFUtils(test_base.TestCase): subnets = mock.sentinel.subnets network = mock.sentinel.network - vif_name = mock.sentinel.vif_name + vif_name = "vhu01234567-89" vif = mock.sentinel.vif m_mk_profile.return_value = port.profile @@ -258,6 +258,92 @@ class TestOSVIFUtils(test_base.TestCase): self.assertEqual(network.bridge, port.binding_vif_details['bridge_name']) + @mock.patch('kuryr_kubernetes.os_vif_util._get_vhu_vif_name') + @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') + @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') + @mock.patch('os_vif.objects.vif.VIFVHostUser') + @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') + def test_neutron_to_osvif_vif_ovs_vu_client(self, m_mk_profile, m_mk_vif, + m_make_vif_network, + m_is_port_active, + m_get_vif_name): + vif_plugin = 'vhostuser' + o_cfg.CONF.set_override('mount_point', + '/var/lib/cni/vhostuser', + group='vhostuser') + port_id = mock.sentinel.port_id + mac_address = mock.sentinel.mac_address + ovs_bridge = mock.sentinel.ovs_bridge + subnets = mock.sentinel.subnets + port_profile = mock.sentinel.port_profile + network = mock.sentinel.network + port_active = mock.sentinel.port_active + vif_name = "vhu01234567-89" + vif = mock.sentinel.vif + + m_mk_profile.return_value = port_profile + m_make_vif_network.return_value = network + m_is_port_active.return_value = port_active + m_get_vif_name.return_value = vif_name + m_mk_vif.return_value = vif + + port = fake.get_port_obj(port_id=port_id, + vif_details={'ovs_hybrid_plug': False, + 'bridge_name': ovs_bridge, + 'vhostuser_mode': 'client'}) + port.mac_address = mac_address + + self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, + subnets)) + m_mk_profile.assert_called_once_with(interface_id=port_id) + m_make_vif_network.assert_called_once_with(port, subnets) + m_is_port_active.assert_called_once_with(port) + m_get_vif_name.assert_called_once_with(port_id) + self.assertEqual(ovs_bridge, network.bridge) + + @mock.patch('kuryr_kubernetes.os_vif_util._get_vhu_vif_name') + @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') + @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') + @mock.patch('os_vif.objects.vif.VIFVHostUser') + @mock.patch('os_vif.objects.vif.VIFPortProfileOpenVSwitch') + def test_neutron_to_osvif_vif_ovs_vu_server(self, m_mk_profile, m_mk_vif, + m_make_vif_network, + m_is_port_active, + m_get_vif_name): + vif_plugin = 'vhostuser' + o_cfg.CONF.set_override('mount_point', + '/var/lib/cni/vhostuser', + group='vhostuser') + port_id = mock.sentinel.port_id + mac_address = mock.sentinel.mac_address + ovs_bridge = mock.sentinel.ovs_bridge + subnets = mock.sentinel.subnets + port_profile = mock.sentinel.port_profile + network = mock.sentinel.network + port_active = mock.sentinel.port_active + vif_name = mock.sentinel.vif_name + vif = mock.sentinel.vif + + m_mk_profile.return_value = port_profile + m_make_vif_network.return_value = network + m_is_port_active.return_value = port_active + m_get_vif_name.return_value = vif_name + m_mk_vif.return_value = vif + + port = fake.get_port_obj(port_id=port_id, + vif_details={'ovs_hybrid_plug': False, + 'bridge_name': ovs_bridge, + 'vhostuser_mode': 'server'}) + port.mac_address = mac_address + + self.assertEqual(vif, ovu.neutron_to_osvif_vif_ovs(vif_plugin, port, + subnets)) + m_mk_profile.assert_called_once_with(interface_id=port_id) + m_make_vif_network.assert_called_once_with(port, subnets) + m_is_port_active.assert_called_once_with(port) + m_get_vif_name.assert_called_once_with(port_id) + self.assertEqual(ovs_bridge, network.bridge) + @mock.patch('kuryr_kubernetes.os_vif_util._get_vif_name') @mock.patch('kuryr_kubernetes.os_vif_util._is_port_active') @mock.patch('kuryr_kubernetes.os_vif_util._make_vif_network') diff --git a/setup.cfg b/setup.cfg index 3472f497b..b26c27358 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,11 +35,13 @@ console_scripts = kuryr_kubernetes.vif_translators = ovs = kuryr_kubernetes.os_vif_util:neutron_to_osvif_vif_ovs sriov = kuryr_kubernetes.os_vif_util:neutron_to_osvif_vif_sriov + vhostuser = kuryr_kubernetes.os_vif_util:neutron_to_osvif_vif_ovs kuryr_kubernetes.cni.binding = VIFBridge = kuryr_kubernetes.cni.binding.bridge:BridgeDriver VIFOpenVSwitch = kuryr_kubernetes.cni.binding.bridge:VIFOpenVSwitchDriver VIFDPDKNested = kuryr_kubernetes.cni.binding.dpdk:DpdkDriver + VIFVHostUser = kuryr_kubernetes.cni.binding.vhostuser:VIFVHostUserDriver VIFVlanNested = kuryr_kubernetes.cni.binding.nested:VlanDriver VIFMacvlanNested = kuryr_kubernetes.cni.binding.nested:MacvlanDriver VIFSriov = kuryr_kubernetes.cni.binding.sriov:VIFSriovDriver