Merge "hardware: stop using instance cell topology in CPU pinning logic"

This commit is contained in:
Jenkins
2016-01-05 16:46:31 +00:00
committed by Gerrit Code Review
2 changed files with 55 additions and 51 deletions

View File

@@ -1938,6 +1938,10 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=3)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_no_sibling_fits_w_usage(self): def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
@@ -1948,31 +1952,53 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
got_pinning = {0: 0, 1: 2, 2: 3}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_fits(self): def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[], memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([])) mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell( inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo) cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self): def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])], siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([])) mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell( inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo) cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 8)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self): def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell( host_pin = objects.NUMACell(
@@ -1982,27 +2008,15 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
pinned_cpus=set([1, 2, 5, 6]), pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[]) mempages=[])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell( inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo) cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fails(self): got_pinning = {0: 0, 1: 3, 2: 4, 3: 7}
host_pin = objects.NUMACell( self.assertEqual(got_pinning, inst_pin.cpu_pinning)
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048,
cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_host_siblings_fit_single_core(self): def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell( host_pin = objects.NUMACell(
@@ -2017,6 +2031,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology) self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x + 4 for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit(self): def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
@@ -2029,6 +2045,8 @@ class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
self.assertInstanceCellPinned(inst_pin) self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology) self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):

View File

@@ -689,8 +689,6 @@ def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
if threads_per_core * len(cores_list) < len(instance_cell): if threads_per_core * len(cores_list) < len(instance_cell):
return False return False
if instance_cell.siblings:
return instance_cell.cpu_topology.threads <= threads_per_core
else: else:
return len(instance_cell) % threads_per_core == 0 return len(instance_cell) % threads_per_core == 0
@@ -701,17 +699,12 @@ def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
if _can_pack_instance_cell(instance_cell, if _can_pack_instance_cell(instance_cell,
cores_per_sib, sib_list): cores_per_sib, sib_list):
sliced_sibs = map(lambda s: list(s)[:cores_per_sib], sib_list) sliced_sibs = map(lambda s: list(s)[:cores_per_sib], sib_list)
if instance_cell.siblings: pinning = zip(sorted(instance_cell.cpuset),
pinning = zip(itertools.chain(*instance_cell.siblings), itertools.chain(*sliced_sibs))
itertools.chain(*sliced_sibs))
else:
pinning = zip(sorted(instance_cell.cpuset),
itertools.chain(*sliced_sibs))
topology = (instance_cell.cpu_topology or topology = objects.VirtCPUTopology(sockets=1,
objects.VirtCPUTopology(sockets=1, cores=len(sliced_sibs),
cores=len(sliced_sibs), threads=cores_per_sib)
threads=cores_per_sib))
instance_cell.pin_vcpus(*pinning) instance_cell.pin_vcpus(*pinning)
instance_cell.cpu_topology = topology instance_cell.cpu_topology = topology
instance_cell.id = host_cell_id instance_cell.id = host_cell_id
@@ -736,24 +729,17 @@ def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell):
return return
if host_cell.siblings: if host_cell.siblings:
# Instance requires hyperthreading in it's topology # Try to pack the instance cell in one core
if instance_cell.cpu_topology and instance_cell.siblings: largest_free_sibling_set = sorted(
return _pack_instance_onto_cores(host_cell.free_siblings, host_cell.free_siblings, key=len)[-1]
instance_cell, host_cell.id) if len(instance_cell.cpuset) <= len(largest_free_sibling_set):
return _pack_instance_onto_cores(
[largest_free_sibling_set], instance_cell, host_cell.id)
# We can't to pack it onto one core so try with avail siblings
else: else:
# Try to pack the instance cell in one core return _pack_instance_onto_cores(
largest_free_sibling_set = sorted( host_cell.free_siblings, instance_cell, host_cell.id)
host_cell.free_siblings, key=len)[-1]
if (len(instance_cell.cpuset) <=
len(largest_free_sibling_set)):
return _pack_instance_onto_cores(
[largest_free_sibling_set], instance_cell, host_cell.id)
# We can't to pack it onto one core so try with avail siblings
else:
return _pack_instance_onto_cores(
host_cell.free_siblings, instance_cell, host_cell.id)
else: else:
# Straightforward to pin to available cpus when there is no # Straightforward to pin to available cpus when there is no
# hyperthreading on the host # hyperthreading on the host