libvirt: place emulator threads on CONF.compute.cpu_shared_set
Some workloads run best when the hypervisor overhead processes (emulator threads in libvirt/QEMU) can be placed on different physical host CPUs than other guest CPU resources. This allows those workloads to prevent latency spikes for guest vCPU threads. To ensure emulator threads are placed on a different set of physical CPUs than those running guest dedicated vCPUs, set the ``CONF.compute.cpu_shared_set`` configuration option to the set of host CPUs that should be used for best-effort CPU resources. Then set a flavor extra spec to ``hw:emulator_threads_policy=share`` to instruct nova to place that workload's emulator threads on that set of host CPUs. implement: bp/overhead-pin-set Signed-off-by: Sahid Orentino Ferdjaoui <sahid.ferdjaoui@redhat.com> Change-Id: I0e63ab37d584ee3d7fde6553efaa61bfc866e67d
This commit is contained in:
parent
04469a5181
commit
9724ec118b
doc/source/user
nova
releasenotes/notes
@ -559,8 +559,11 @@ Emulator threads policy
|
||||
|
||||
Valid THREAD-POLICY values are:
|
||||
|
||||
- ``share``: (default) The emulator threads float across the pCPUs associated
|
||||
to the guest.
|
||||
- ``share``: (default) The emulator threads float across the pCPUs
|
||||
associated to the guest. To place a workload's emulator threads on
|
||||
a set of isolated physical CPUs, set ``share``` and
|
||||
``[compute]/cpu_shared_set`` configuration option to the set of
|
||||
host CPUs that should be used for best-effort CPU resources.
|
||||
|
||||
- ``isolate``: The emulator threads are isolated on a single pCPU.
|
||||
|
||||
|
@ -3020,8 +3020,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
# which are 6, 7
|
||||
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
|
||||
|
||||
def test_get_guest_config_numa_host_instance_isolated_emulator_threads(
|
||||
self):
|
||||
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
||||
@ -3075,6 +3074,116 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
|
||||
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
|
||||
|
||||
def test_get_guest_config_numa_host_instance_shared_emulthreads_err(
|
||||
self):
|
||||
self.flags(cpu_shared_set="48-50", group="compute")
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.SHARE),
|
||||
cells=[
|
||||
objects.InstanceNUMACell(
|
||||
id=0, cpuset=set([0, 1]),
|
||||
memory=1024, pagesize=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={0: 4, 1: 5},
|
||||
cpuset_reserved=set([6])),
|
||||
objects.InstanceNUMACell(
|
||||
id=1, cpuset=set([2, 3]),
|
||||
memory=1024, pagesize=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={2: 7, 3: 8})])
|
||||
|
||||
instance_ref = objects.Instance(**self.test_instance)
|
||||
instance_ref.numa_topology = instance_topology
|
||||
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = "x86_64"
|
||||
caps.host.topology = fakelibvirt.NUMATopology()
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
||||
instance_ref, image_meta)
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
||||
return_value=instance_topology),
|
||||
mock.patch.object(host.Host, 'has_min_version',
|
||||
return_value=True),
|
||||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(
|
||||
hardware, 'get_vcpu_pin_set',
|
||||
return_value=set([4, 5, 6, 7, 8])),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set(range(10))),
|
||||
):
|
||||
# pCPUs [48-50] are not online
|
||||
self.assertRaises(exception.Invalid, drvr._get_guest_config,
|
||||
instance_ref, [], image_meta, disk_info)
|
||||
|
||||
def test_get_guest_config_numa_host_instance_shared_emulator_threads(
|
||||
self):
|
||||
self.flags(cpu_shared_set="48-50", group="compute")
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.SHARE),
|
||||
cells=[
|
||||
objects.InstanceNUMACell(
|
||||
id=0, cpuset=set([0, 1]),
|
||||
memory=1024, pagesize=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={0: 4, 1: 5},
|
||||
cpuset_reserved=set([6])),
|
||||
objects.InstanceNUMACell(
|
||||
id=1, cpuset=set([2, 3]),
|
||||
memory=1024, pagesize=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={2: 7, 3: 8})])
|
||||
|
||||
instance_ref = objects.Instance(**self.test_instance)
|
||||
instance_ref.numa_topology = instance_topology
|
||||
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = "x86_64"
|
||||
caps.host.topology = fakelibvirt.NUMATopology()
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
||||
instance_ref, image_meta)
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
||||
return_value=instance_topology),
|
||||
mock.patch.object(host.Host, 'has_min_version',
|
||||
return_value=True),
|
||||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(
|
||||
hardware, 'get_vcpu_pin_set',
|
||||
return_value=set([4, 5, 6, 7, 8])),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set(list(range(10)) +
|
||||
[48, 50])),
|
||||
):
|
||||
cfg = drvr._get_guest_config(instance_ref, [],
|
||||
image_meta, disk_info)
|
||||
|
||||
# cpu_shared_set is configured with [48, 49, 50] but only
|
||||
# [48, 50] are online.
|
||||
self.assertEqual(set([48, 50]), cfg.cputune.emulatorpin.cpuset)
|
||||
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
|
||||
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
|
||||
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
|
||||
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
|
||||
|
||||
def test_get_cpu_numa_config_from_instance(self):
|
||||
topology = objects.InstanceNUMATopology(cells=[
|
||||
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
|
||||
|
@ -4290,20 +4290,37 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
a) If emulator threads policy is isolated, we pin emulator threads
|
||||
to one cpu we have reserved for it.
|
||||
b) Otherwise;
|
||||
b1) If realtime IS NOT enabled, the emulator threads are
|
||||
b) If emulator threads policy is shared and CONF.cpu_shared_set is
|
||||
defined, we pin emulator threads on the set of pCPUs defined by
|
||||
CONF.cpu_shared_set
|
||||
c) Otherwise;
|
||||
c1) If realtime IS NOT enabled, the emulator threads are
|
||||
allowed to float cross all the pCPUs associated with
|
||||
the guest vCPUs.
|
||||
b2) If realtime IS enabled, at least 1 vCPU is required
|
||||
c2) If realtime IS enabled, at least 1 vCPU is required
|
||||
to be set aside for non-realtime usage. The emulator
|
||||
threads are allowed to float across the pCPUs that
|
||||
are associated with the non-realtime VCPUs.
|
||||
"""
|
||||
emulatorpin_cpuset = set([])
|
||||
shared_ids = hardware.get_cpu_shared_set()
|
||||
|
||||
if emulator_threads_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE:
|
||||
if object_numa_cell.cpuset_reserved:
|
||||
emulatorpin_cpuset = object_numa_cell.cpuset_reserved
|
||||
elif ((emulator_threads_policy ==
|
||||
fields.CPUEmulatorThreadsPolicy.SHARE) and
|
||||
shared_ids):
|
||||
online_pcpus = self._host.get_online_cpus()
|
||||
cpuset = shared_ids & online_pcpus
|
||||
if not cpuset:
|
||||
msg = (_("Invalid cpu_shared_set config, one or more of the "
|
||||
"specified cpuset is not online. Online cpuset(s): "
|
||||
"%(online)s, requested cpuset(s): %(req)s"),
|
||||
{'online': sorted(online_pcpus),
|
||||
'req': sorted(shared_ids)})
|
||||
raise exception.Invalid(msg)
|
||||
emulatorpin_cpuset = cpuset
|
||||
elif not wants_realtime or vcpu not in vcpus_rt:
|
||||
emulatorpin_cpuset = pin_cpuset.cpuset
|
||||
|
||||
|
@ -0,0 +1,15 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Introduces ``[compute]/cpu_shared_set`` option for compute nodes.
|
||||
Some workloads run best when the hypervisor overhead processes
|
||||
(emulator threads in libvirt/QEMU) can be placed on different
|
||||
physical host CPUs than other guest CPU resources. This allows
|
||||
those workloads to prevent latency spikes for guest vCPU threads.
|
||||
|
||||
To place a workload's emulator threads on a set of isolated
|
||||
physical CPUs, set the ``[compute]/cpu_shared_set`` configuration
|
||||
option to the set of host CPUs that should be used for best-effort
|
||||
CPU resources. Then set a flavor extra spec to
|
||||
``hw:emulator_threads_policy=share`` to instruct nova to place
|
||||
that workload's emulator threads on that set of host CPUs.
|
Loading…
x
Reference in New Issue
Block a user