From a1eca36557d773123d1096edcbfaa55dba63de15 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Mon, 16 Apr 2018 05:09:27 -0400 Subject: [PATCH] libvirt: add support for virtio-net rx/tx queue sizes Add support for configuring the `rx_queue_size` and `tx_queue_size` options in the virtio-net driver by way of nova.conf Currently, valid values for the ring buffer sizes are are 256, 512, and 1024. Configuring rx requires libvirt v2.3 QEMU v2.7. Configuring tx requires libvirt v3.7 QEMU v2.10. Signed-off-by: Sahid Orentino Ferdjaoui Co-Authored-By: Nicolas Simonds DocImpact Implements: bp libvirt-virtio-set-queue-sizes Change-Id: Ib6d4a2d6b9072db42d11ecdde0950cf7a7781944 --- nova/conf/libvirt.py | 28 +++++++ nova/tests/unit/virt/libvirt/test_config.py | 25 +++++++ nova/tests/unit/virt/libvirt/test_designer.py | 29 ++++++- nova/tests/unit/virt/libvirt/test_vif.py | 66 +++++++++++++++- nova/virt/libvirt/config.py | 14 +++- nova/virt/libvirt/designer.py | 16 +++- nova/virt/libvirt/vif.py | 75 ++++++++++++++++++- ...rtio-set-queue-sizes-6c54a2ce3dc30d18.yaml | 15 ++++ 8 files changed, 257 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/bp-libvirt-virtio-set-queue-sizes-6c54a2ce3dc30d18.yaml diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py index 5053d0a1668e..c86532550611 100644 --- a/nova/conf/libvirt.py +++ b/nova/conf/libvirt.py @@ -1134,6 +1134,33 @@ Related options: ), ] + +# The queue size requires value to be a power of two from [256, 1024] +# range. +# https://libvirt.org/formatdomain.html#elementsDriverBackendOptions +QueueSizeType = types.Integer(choices=(256, 512, 1024)) + +libvirt_virtio_queue_sizes = [ + cfg.Opt('rx_queue_size', + type=QueueSizeType, + help=""" +Configure virtio rx queue size. + +This option is only usable for virtio-net device with vhost and +vhost-user backend. Available only with QEMU/KVM. Requires libvirt +v2.3 QEMU v2.7."""), + cfg.Opt('tx_queue_size', + type=QueueSizeType, + help=""" +Configure virtio tx queue size. + +This option is only usable for virtio-net device with vhost-user +backend. Available only with QEMU/KVM. Requires libvirt v3.7 QEMU +v2.10."""), + +] + + ALL_OPTS = list(itertools.chain( libvirt_general_opts, libvirt_imagebackend_opts, @@ -1151,6 +1178,7 @@ ALL_OPTS = list(itertools.chain( libvirt_volume_smbfs_opts, libvirt_remotefs_opts, libvirt_volume_vzstorage_opts, + libvirt_virtio_queue_sizes, )) diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py index 92d7090fe83f..3ffa3b71c9f4 100644 --- a/nova/tests/unit/virt/libvirt/test_config.py +++ b/nova/tests/unit/virt/libvirt/test_config.py @@ -1959,6 +1959,31 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest): obj2.parse_str(xml) self.assertXmlEqual(xml, obj2.to_xml()) + def test_config_vhostuser_queue_size(self): + obj = config.LibvirtConfigGuestInterface() + obj.net_type = "vhostuser" + obj.vhostuser_type = "unix" + obj.vhostuser_mode = "server" + obj.mac_addr = "DE:AD:BE:EF:CA:FE" + obj.vhostuser_path = "/vhost-user/test.sock" + obj.vhost_rx_queue_size = 512 + obj.vhost_tx_queue_size = 1024 + obj.model = "virtio" + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + + + + """) + + # parse the xml from the first object into a new object and make sure + # they are the same + obj2 = config.LibvirtConfigGuestInterface() + obj2.parse_str(xml) + self.assertXmlEqual(xml, obj2.to_xml()) + def test_config_interface_address(self): xml = """ diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py index 18d2e2d32260..7f3323a1a724 100644 --- a/nova/tests/unit/virt/libvirt/test_designer.py +++ b/nova/tests/unit/virt/libvirt/test_designer.py @@ -40,11 +40,23 @@ class DesignerTestCase(test.NoDBTestCase): conf = config.LibvirtConfigGuestInterface() designer.set_vif_guest_frontend_config(conf, 'fake-mac', 'fake-model', 'fake-driver', - 'fake-queues') + 'fake-queues', None) self.assertEqual('fake-mac', conf.mac_addr) self.assertEqual('fake-model', conf.model) self.assertEqual('fake-driver', conf.driver_name) self.assertEqual('fake-queues', conf.vhost_queues) + self.assertIsNone(conf.vhost_rx_queue_size) + + def test_set_vif_guest_frontend_config_rx_queue_size(self): + conf = config.LibvirtConfigGuestInterface() + designer.set_vif_guest_frontend_config(conf, 'fake-mac', + 'fake-model', 'fake-driver', + 'fake-queues', 1024) + self.assertEqual('fake-mac', conf.mac_addr) + self.assertEqual('fake-model', conf.model) + self.assertEqual('fake-driver', conf.driver_name) + self.assertEqual('fake-queues', conf.vhost_queues) + self.assertEqual(1024, conf.vhost_rx_queue_size) def test_set_vif_host_backend_bridge_config(self): conf = config.LibvirtConfigGuestInterface() @@ -175,8 +187,21 @@ class DesignerTestCase(test.NoDBTestCase): def test_set_vif_host_backend_vhostuser_config(self): conf = config.LibvirtConfigGuestInterface() designer.set_vif_host_backend_vhostuser_config(conf, 'fake-mode', - 'fake-path') + 'fake-path', None, None) self.assertEqual('vhostuser', conf.net_type) self.assertEqual('unix', conf.vhostuser_type) self.assertEqual('fake-mode', conf.vhostuser_mode) self.assertEqual('fake-path', conf.vhostuser_path) + self.assertIsNone(conf.vhost_rx_queue_size) + self.assertIsNone(conf.vhost_tx_queue_size) + + def test_set_vif_host_backend_vhostuser_config_queue_size(self): + conf = config.LibvirtConfigGuestInterface() + designer.set_vif_host_backend_vhostuser_config(conf, 'fake-mode', + 'fake-path', 512, 1024) + self.assertEqual('vhostuser', conf.net_type) + self.assertEqual('unix', conf.vhostuser_type) + self.assertEqual('fake-mode', conf.vhostuser_mode) + self.assertEqual('fake-path', conf.vhostuser_path) + self.assertEqual(512, conf.vhost_rx_queue_size) + self.assertEqual(1024, conf.vhost_tx_queue_size) diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py index baf2fce4fb67..5866931f41d1 100644 --- a/nova/tests/unit/virt/libvirt/test_vif.py +++ b/nova/tests/unit/virt/libvirt/test_vif.py @@ -681,6 +681,44 @@ class LibvirtVifTestCase(test.NoDBTestCase): self.assertIsNone(conf.vhost_queues) self.assertIsNone(conf.driver_name) + def _test_virtio_config_queue_sizes(self): + self.flags(rx_queue_size=512, group='libvirt') + self.flags(tx_queue_size=1024, group='libvirt') + hostimpl = host.Host("qemu:///system") + v = vif.LibvirtGenericVIFDriver() + conf = v.get_base_config( + None, 'ca:fe:de:ad:be:ef', {}, objects.Flavor(), 'kvm', 'normal', + hostimpl) + return hostimpl, v, conf + + @mock.patch.object(host.Host, "has_min_version", return_value=True) + def test_virtio_vhost_queue_sizes(self, has_min_version): + _, _, conf = self._test_virtio_config_queue_sizes() + self.assertEqual(512, conf.vhost_rx_queue_size) + self.assertIsNone(conf.vhost_tx_queue_size) + + @mock.patch.object(host.Host, "has_min_version", return_value=False) + def test_virtio_vhost_queue_sizes_nover(self, has_min_version): + _, _, conf = self._test_virtio_config_queue_sizes() + self.assertIsNone(conf.vhost_rx_queue_size) + self.assertIsNone(conf.vhost_tx_queue_size) + + @mock.patch.object(host.Host, "has_min_version", return_value=True) + def test_virtio_vhostuser_osvif_queue_sizes(self, has_min_version): + hostimpl, v, conf = self._test_virtio_config_queue_sizes() + v._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser, + conf, hostimpl) + self.assertEqual(512, conf.vhost_rx_queue_size) + self.assertEqual(1024, conf.vhost_tx_queue_size) + + @mock.patch.object(host.Host, "has_min_version", return_value=False) + def test_virtio_vhostuser_osvif_queue_sizes_ver_err(self, has_min_version): + hostimpl, v, conf = self._test_virtio_config_queue_sizes() + v._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser, + conf, hostimpl) + self.assertIsNone(conf.vhost_rx_queue_size) + self.assertIsNone(conf.vhost_tx_queue_size) + def test_multiple_nics(self): conf = self._get_conf() # Tests multiple nic configuration and that target_dev is @@ -789,7 +827,7 @@ class LibvirtVifTestCase(test.NoDBTestCase): d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta, None, 'kvm', 'normal', hostimpl) mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef', - 'virtio', None, None) + 'virtio', None, None, None) @mock.patch.object(vif.designer, 'set_vif_guest_frontend_config') def test_model_sriov_multi_queue_not_set(self, mock_set): @@ -806,7 +844,7 @@ class LibvirtVifTestCase(test.NoDBTestCase): conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta, None, 'kvm', 'direct', hostimpl) mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef', - 'virtio', None, None) + 'virtio', None, None, None) self.assertIsNone(conf.vhost_queues) self.assertIsNone(conf.driver_name) @@ -1429,6 +1467,30 @@ class LibvirtVifTestCase(test.NoDBTestCase): self._assertMacEquals(node, self.vif_vhostuser) self._assertModel(xml, network_model.VIF_MODEL_VIRTIO) + def test_vhostuser_driver_queue_sizes(self): + self.flags(rx_queue_size=512, group='libvirt') + self.flags(tx_queue_size=1024, group='libvirt') + d = vif.LibvirtGenericVIFDriver() + xml = self._get_instance_xml(d, self.vif_vhostuser) + self._assertXmlEqual(""" + + fake-uuid + fake-name + 102400 + 4 + + None + + + + + + + + + + """, xml) + def test_vhostuser_no_queues(self): d = vif.LibvirtGenericVIFDriver() image_meta = objects.ImageMeta.from_dict( diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index bd194cf0d6b7..8ab1216d8719 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1320,6 +1320,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): self.vhostuser_path = None self.vhostuser_type = None self.vhost_queues = None + self.vhost_rx_queue_size = None + self.vhost_tx_queue_size = None self.vif_inbound_peak = None self.vif_inbound_burst = None self.vif_inbound_average = None @@ -1349,10 +1351,16 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): if drv_elem is not None: if self.vhost_queues is not None: drv_elem.set('queues', str(self.vhost_queues)) + if self.vhost_rx_queue_size is not None: + drv_elem.set('rx_queue_size', str(self.vhost_rx_queue_size)) + if self.vhost_tx_queue_size is not None: + drv_elem.set('tx_queue_size', str(self.vhost_tx_queue_size)) - if drv_elem.get('name') or drv_elem.get('queues'): + if (drv_elem.get('name') or drv_elem.get('queues') or + drv_elem.get('rx_queue_size') or + drv_elem.get('tx_queue_size')): # Append the driver element into the dom only if name - # or queues attributes are set. + # or queues or tx/rx attributes are set. dev.append(drv_elem) if self.net_type == "ethernet": @@ -1444,6 +1452,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): elif c.tag == 'driver': self.driver_name = c.get('name') self.vhost_queues = c.get('queues') + self.vhost_rx_queue_size = c.get('rx_queue_size') + self.vhost_tx_queue_size = c.get('tx_queue_size') elif c.tag == 'source': if self.net_type == 'direct': self.source_dev = c.get('dev') diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py index cef2ee48d68a..488b9aef89ff 100644 --- a/nova/virt/libvirt/designer.py +++ b/nova/virt/libvirt/designer.py @@ -25,9 +25,12 @@ from nova.pci import utils as pci_utils MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE = (1, 3, 3) -def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None): +def set_vif_guest_frontend_config(conf, mac, model, driver, queues, + rx_queue_size): """Populate a LibvirtConfigGuestInterface instance with guest frontend details. + + NOTE: @model, @driver, @queues and @rx_queue_size can be None. """ conf.mac_addr = mac if model is not None: @@ -36,6 +39,8 @@ def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None): conf.driver_name = driver if queues is not None: conf.vhost_queues = queues + if rx_queue_size: + conf.vhost_rx_queue_size = rx_queue_size def set_vif_host_backend_bridge_config(conf, brname, tapname=None): @@ -148,14 +153,21 @@ def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"): conf.model = "virtio" -def set_vif_host_backend_vhostuser_config(conf, mode, path): +def set_vif_host_backend_vhostuser_config(conf, mode, path, rx_queue_size, + tx_queue_size): """Populate a LibvirtConfigGuestInterface instance with host backend details for vhostuser socket. + + NOTE: @rx_queue_size and @tx_queue_size can be None """ conf.net_type = "vhostuser" conf.vhostuser_type = "unix" conf.vhostuser_mode = mode conf.vhostuser_path = path + if rx_queue_size: + conf.vhost_rx_queue_size = rx_queue_size + if rx_queue_size: + conf.vhost_tx_queue_size = tx_queue_size def set_vif_bandwidth_config(conf, inst_type): diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 348f435e6939..af9385cd45ae 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -39,8 +39,10 @@ from nova import profiler from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import designer +from nova.virt.libvirt import utils as libvirt_utils from nova.virt import osinfo + LOG = logging.getLogger(__name__) CONF = nova.conf.CONF @@ -50,6 +52,13 @@ MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17) # vlan tag for macvtap passthrough mode on SRIOV VFs MIN_LIBVIRT_MACVTAP_PASSTHROUGH_VLAN = (1, 3, 5) +# virtio-net.rx_queue_size support +MIN_LIBVIRT_RX_QUEUE_SIZE = (2, 3, 0) +MIN_QEMU_RX_QUEUE_SIZE = (2, 7, 0) +# virtio-net.tx_queue_size support +MIN_LIBVIRT_TX_QUEUE_SIZE = (3, 7, 0) +MIN_QEMU_TX_QUEUE_SIZE = (2, 10, 0) + def is_vif_model_valid_for_virt(virt_type, vif_model): valid_models = { @@ -105,6 +114,10 @@ class LibvirtGenericVIFDriver(object): def get_base_config(self, instance, mac, image_meta, inst_type, virt_type, vnic_type, host): + # TODO(sahid): We should rewrite it. This method handles too + # many unrelated things. We probably need to have a specific + # virtio, vhost, vhostuser functions. + conf = vconfig.LibvirtConfigGuestInterface() # Default to letting libvirt / the hypervisor choose the model model = None @@ -138,10 +151,30 @@ class LibvirtGenericVIFDriver(object): vnic_type not in network_model.VNIC_TYPES_SRIOV): vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta, inst_type) + # TODO(sahid): It seems that we return driver 'vhost' even + # for vhostuser interface where for vhostuser interface + # the driver should be 'vhost-user'. That currently does + # not create any issue since QEMU ignores the driver + # argument for vhostuser interface but we should probably + # fix that anyway. Also we should enforce that the driver + # use vhost and not None. driver = vhost_drv or driver + rx_queue_size = None + if driver == 'vhost' or driver is None: + # vhost backend only supports update of RX queue size + rx_queue_size, _ = self._get_virtio_queue_sizes(host) + if rx_queue_size: + # TODO(sahid): Specifically force driver to be vhost + # that because if None we don't generate the XML + # driver element needed to set the queue size + # attribute. This can be removed when get_base_config + # will be fixed and rewrite to set the correct + # backend. + driver = 'vhost' + designer.set_vif_guest_frontend_config( - conf, mac, model, driver, vhost_queues) + conf, mac, model, driver, vhost_queues, rx_queue_size) return conf @@ -438,7 +471,10 @@ class LibvirtGenericVIFDriver(object): conf.driver_name = None mode, sock_path = self._get_vhostuser_settings(vif) - designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path) + rx_queue_size, tx_queue_size = self._get_virtio_queue_sizes(host) + designer.set_vif_host_backend_vhostuser_config( + conf, mode, sock_path, rx_queue_size, tx_queue_size) + # (vladikr) Not setting up driver and queues for vhostuser # as queues are not supported in Libvirt until version 1.2.17 if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ): @@ -447,6 +483,38 @@ class LibvirtGenericVIFDriver(object): return conf + def _get_virtio_queue_sizes(self, host): + """Returns rx/tx queue sizes configured or (None, None) + + Based on tx/rx queue sizes configured on host (nova.conf). The + methods check whether the versions of libvirt and QEMU are + corrects. + """ + # TODO(sahid): For vhostuser interface this function is called + # from get_base_config and also from the method reponsible to + # configure vhostuser interface meaning that the logs can be + # duplicated. In future we want to rewrite get_base_config. + rx, tx = CONF.libvirt.rx_queue_size, CONF.libvirt.tx_queue_size + if rx and not host.has_min_version( + MIN_LIBVIRT_RX_QUEUE_SIZE, MIN_QEMU_RX_QUEUE_SIZE): + LOG.warning('Setting RX queue size requires libvirt %s and QEMU ' + '%s version or greater.', + libvirt_utils.version_to_string( + MIN_LIBVIRT_RX_QUEUE_SIZE), + libvirt_utils.version_to_string( + MIN_QEMU_RX_QUEUE_SIZE)) + rx = None + if tx and not host.has_min_version( + MIN_LIBVIRT_TX_QUEUE_SIZE, MIN_QEMU_TX_QUEUE_SIZE): + LOG.warning('Setting TX queue size requires libvirt %s and QEMU ' + '%s version or greater.', + libvirt_utils.version_to_string( + MIN_LIBVIRT_TX_QUEUE_SIZE), + libvirt_utils.version_to_string( + MIN_QEMU_TX_QUEUE_SIZE)) + tx = None + return rx, tx + def get_config_ib_hostdev(self, instance, vif, image_meta, inst_type, virt_type, host): return self.get_base_hostdev_pci_config(vif) @@ -485,8 +553,9 @@ class LibvirtGenericVIFDriver(object): # and rewrite to set the correct backend. conf.driver_name = None + rx_queue_size, tx_queue_size = self._get_virtio_queue_sizes(host) designer.set_vif_host_backend_vhostuser_config( - conf, vif.mode, vif.path) + conf, vif.mode, vif.path, rx_queue_size, tx_queue_size) if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ): LOG.debug('Queues are not a vhostuser supported feature.') conf.vhost_queues = None diff --git a/releasenotes/notes/bp-libvirt-virtio-set-queue-sizes-6c54a2ce3dc30d18.yaml b/releasenotes/notes/bp-libvirt-virtio-set-queue-sizes-6c54a2ce3dc30d18.yaml new file mode 100644 index 000000000000..638754437291 --- /dev/null +++ b/releasenotes/notes/bp-libvirt-virtio-set-queue-sizes-6c54a2ce3dc30d18.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + libvirt: add support for virtio-net rx/tx queue sizes + + Add support for configuring the ``rx_queue_size`` and + ``tx_queue_size`` options in the QEMU virtio-net driver by way of + nova.conf. Only supported for vhost/vhostuser interfaces + + Currently, valid values for the ring buffer sizes are 256, 512, + and 1024. + + Adjustable RX queue sizes requires QEMU 2.7.0, and libvirt 2.3.0 + (or newer) Adjustable TX queue sizes requires QEMU 2.10.0, and + libvirt 3.7.0 (or newer) \ No newline at end of file