hardware: stop using instance cell topology in CPU pinning logic

Currently we consider the existing topology of the instance NUMA cell when
deciding on the proper way to fit instance CPUs onto host. This is
actually wrong after https://review.openstack.org/#/c/198312/. We don't
need to consider the requested topology in the CPU fitting any more as
the code that decides on the final CPU topology takes all of this into
account.

We still need to expose the topology so that the spawning process can
use that to calculate the final CPU topology for the instance, but we
should not consider this information relevant on any of the subsequent
CPU pinning fit recalculations.

Change-Id: I9a947a984bf8ec6413e9b4c45d61e8989bf903a1
Co-Authored-By: Stephen Finucane <stephen.finucane@intel.com>
Related-bug: 1501358
Related-bug: 1467927
This commit is contained in:
Nikola Dipanov
2015-09-30 17:21:49 +01:00
committed by Stephen Finucane
parent d460d58a0d
commit cc500b2c98
2 changed files with 55 additions and 51 deletions

View File

@@ -1938,6 +1938,10 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=3)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
@@ -1948,31 +1952,53 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_pinning = {0: 0, 1: 2, 2: 3}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 8)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell(
@@ -1982,27 +2008,15 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fails(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048,
cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {0: 0, 1: 3, 2: 4, 3: 7}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell(
@@ -2017,6 +2031,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x + 4 for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
@@ -2029,6 +2045,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):

View File

@@ -689,8 +689,6 @@ def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
if threads_per_core * len(cores_list) < len(instance_cell):
return False
if instance_cell.siblings:
return instance_cell.cpu_topology.threads <= threads_per_core
else:
return len(instance_cell) % threads_per_core == 0
@@ -701,17 +699,12 @@ def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
if _can_pack_instance_cell(instance_cell,
cores_per_sib, sib_list):
sliced_sibs = map(lambda s: list(s)[:cores_per_sib], sib_list)
if instance_cell.siblings:
pinning = zip(itertools.chain(*instance_cell.siblings),
itertools.chain(*sliced_sibs))
else:
pinning = zip(sorted(instance_cell.cpuset),
itertools.chain(*sliced_sibs))
pinning = zip(sorted(instance_cell.cpuset),
itertools.chain(*sliced_sibs))
topology = (instance_cell.cpu_topology or
objects.VirtCPUTopology(sockets=1,
cores=len(sliced_sibs),
threads=cores_per_sib))
topology = objects.VirtCPUTopology(sockets=1,
cores=len(sliced_sibs),
threads=cores_per_sib)
instance_cell.pin_vcpus(*pinning)
instance_cell.cpu_topology = topology
instance_cell.id = host_cell_id
@@ -736,24 +729,17 @@ def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell):
return
if host_cell.siblings:
# Instance requires hyperthreading in it's topology
if instance_cell.cpu_topology and instance_cell.siblings:
return _pack_instance_onto_cores(host_cell.free_siblings,
instance_cell, host_cell.id)
# Try to pack the instance cell in one core
largest_free_sibling_set = sorted(
host_cell.free_siblings, key=len)[-1]
if len(instance_cell.cpuset) <= len(largest_free_sibling_set):
return _pack_instance_onto_cores(
[largest_free_sibling_set], instance_cell, host_cell.id)
# We can't to pack it onto one core so try with avail siblings
else:
# Try to pack the instance cell in one core
largest_free_sibling_set = sorted(
host_cell.free_siblings, key=len)[-1]
if (len(instance_cell.cpuset) <=
len(largest_free_sibling_set)):
return _pack_instance_onto_cores(
[largest_free_sibling_set], instance_cell, host_cell.id)
# We can't to pack it onto one core so try with avail siblings
else:
return _pack_instance_onto_cores(
host_cell.free_siblings, instance_cell, host_cell.id)
return _pack_instance_onto_cores(
host_cell.free_siblings, instance_cell, host_cell.id)
else:
# Straightforward to pin to available cpus when there is no
# hyperthreading on the host