numa: update numa usage to include reserved CPUs
We have reserved pCPUs now we should update the host topology according that reservation. implements blueprint libvirt-emulator-threads-policy Change-Id: I7a801a5544e1139d932ea32a19ff70bcf3365ece
This commit is contained in:
parent
d58515c905
commit
f6f1bac379
|
@ -3055,3 +3055,48 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
|
|||
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
|
||||
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
|
||||
self.assertIsNone(inst_topo.cells[1].cpuset_reserved)
|
||||
|
||||
def test_isolate_usage(self):
|
||||
host_topo = self._host_topology()
|
||||
inst_topo = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
||||
cells=[objects.InstanceNUMACell(
|
||||
id=0,
|
||||
cpuset=set([0]), memory=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={0: 0},
|
||||
cpuset_reserved=set([1]))])
|
||||
|
||||
host_topo = hw.numa_usage_from_instances(
|
||||
host_topo, [inst_topo])
|
||||
self.assertEqual(2, host_topo.cells[0].cpu_usage)
|
||||
self.assertEqual(set([0, 1]), host_topo.cells[0].pinned_cpus)
|
||||
self.assertEqual(0, host_topo.cells[1].cpu_usage)
|
||||
self.assertEqual(set([]), host_topo.cells[1].pinned_cpus)
|
||||
|
||||
def test_isolate_full_usage(self):
|
||||
host_topo = self._host_topology()
|
||||
inst_topo1 = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
||||
cells=[objects.InstanceNUMACell(
|
||||
id=0,
|
||||
cpuset=set([0]), memory=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={0: 0},
|
||||
cpuset_reserved=set([1]))])
|
||||
inst_topo2 = objects.InstanceNUMATopology(
|
||||
emulator_threads_policy=(
|
||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
||||
cells=[objects.InstanceNUMACell(
|
||||
id=1,
|
||||
cpuset=set([0]), memory=2048,
|
||||
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
||||
cpu_pinning={0: 2},
|
||||
cpuset_reserved=set([3]))])
|
||||
|
||||
host_topo = hw.numa_usage_from_instances(
|
||||
host_topo, [inst_topo1, inst_topo2])
|
||||
self.assertEqual(2, host_topo.cells[0].cpu_usage)
|
||||
self.assertEqual(set([0, 1]), host_topo.cells[0].pinned_cpus)
|
||||
|
|
|
@ -1589,7 +1589,7 @@ def numa_usage_from_instances(host, instances, free=False):
|
|||
pinned_cpus=hostcell.pinned_cpus, siblings=hostcell.siblings)
|
||||
|
||||
for instance in instances:
|
||||
for instancecell in instance.cells:
|
||||
for cellid, instancecell in enumerate(instance.cells):
|
||||
if instancecell.id == hostcell.id:
|
||||
memory_usage = (
|
||||
memory_usage + sign * instancecell.memory)
|
||||
|
@ -1600,11 +1600,22 @@ def numa_usage_from_instances(host, instances, free=False):
|
|||
cpu_usage_diff *= max(map(len, hostcell.siblings))
|
||||
cpu_usage += sign * cpu_usage_diff
|
||||
|
||||
if (cellid == 0
|
||||
and instance.emulator_threads_isolated):
|
||||
# The emulator threads policy when defined
|
||||
# with 'isolate' makes the instance to consume
|
||||
# an additional pCPU as overhead. That pCPU is
|
||||
# mapped on the host NUMA node related to the
|
||||
# guest NUMA node 0.
|
||||
cpu_usage += sign * len(instancecell.cpuset_reserved)
|
||||
|
||||
if instancecell.pagesize and instancecell.pagesize > 0:
|
||||
newcell.mempages = _numa_pagesize_usage_from_cell(
|
||||
hostcell, instancecell, sign)
|
||||
if instance.cpu_pinning_requested:
|
||||
pinned_cpus = set(instancecell.cpu_pinning.values())
|
||||
if instancecell.cpuset_reserved:
|
||||
pinned_cpus |= instancecell.cpuset_reserved
|
||||
if free:
|
||||
if (instancecell.cpu_thread_policy ==
|
||||
fields.CPUThreadAllocationPolicy.ISOLATE):
|
||||
|
|
Loading…
Reference in New Issue