Merge "Add methods for calculating CPU pinning"

This commit is contained in:
Jenkins 2015-01-06 17:24:18 +00:00 committed by Gerrit Code Review
commit 7666227798
4 changed files with 258 additions and 1 deletions

View File

@ -30,7 +30,7 @@ class InstanceNUMACell(base.NovaObject,
VERSION = '1.2'
fields = {
'id': obj_fields.IntegerField(read_only=True),
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),

View File

@ -54,6 +54,19 @@ class NUMACell(base.NovaObject,
def free_cpus(self):
return self.cpuset - self.pinned_cpus or set()
@property
def free_siblings(self):
return [sibling_set & self.free_cpus
for sibling_set in self.siblings]
@property
def avail_cpus(self):
return len(self.free_cpus)
@property
def avail_memory(self):
return self.memory - self.memory_usage
def _to_dict(self):
return {
'id': self.id,

View File

@ -1570,3 +1570,144 @@ class VirtMemoryPagesTestCase(test.NoDBTestCase):
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
class _CPUPinningTestCaseBase(object):
def assertEqualTopology(self, expected, got):
for attr in ('sockets', 'cores', 'threads'):
self.assertEqual(getattr(expected, attr), getattr(got, attr),
"Mismatch on %s" % attr)
def assertInstanceCellPinned(self, instance_cell, cell_ids=None):
default_cell_id = 0
self.assertIsNotNone(instance_cell)
if cell_ids is None:
self.assertEqual(default_cell_id, instance_cell.id)
else:
self.assertIn(instance_cell.id, cell_ids)
self.assertEqual(len(instance_cell.cpuset),
len(instance_cell.cpu_pinning))
class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_get_pinning_inst_too_large_cpu(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0)
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_too_large_mem(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=1024)
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_not_avail(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([0]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_no_sibling_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0)
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([1]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0)
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fails(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048,
cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)

View File

@ -657,6 +657,109 @@ def _numa_cell_supports_pagesize_request(host_cell, inst_cell):
return verify_pagesizes(host_cell, inst_cell, [inst_cell.pagesize])
def _pack_instance_onto_cores(available_siblings, instance_cell, host_cell_id):
"""Pack an instance onto a set of siblings
:param available_siblings: list of sets of CPU id's - available
siblings per core
:param instance_cell: An instance of objects.InstanceNUMACell describing
the pinning requirements of the instance
:returns: An instance of objects.InstanceNUMACell containing the pinning
information, and potentially a new topology to be exposed to the
instance. None if there is no valid way to satisfy the sibling
requirements for the instance.
This method will calculate the pinning for the given instance and it's
topology, making sure that hyperthreads of the instance match up with
those of the host when the pinning takes effect.
"""
# We build up a data structure 'can_pack' that answers the question:
# 'Given the number of threads I want to pack, give me a list of all
# the available sibling sets that can accomodate it'
can_pack = collections.defaultdict(list)
for sib in available_siblings:
for threads_no in range(1, len(sib) + 1):
can_pack[threads_no].append(sib)
def _can_pack_instance_cell(instance_cell, threads_per_core, cores_list):
"""Determines if instance cell can fit an avail set of cores."""
if threads_per_core * len(cores_list) < len(instance_cell):
return False
if instance_cell.siblings:
return instance_cell.cpu_topology.threads <= threads_per_core
else:
return len(instance_cell) % threads_per_core == 0
# We iterate over the can_pack dict in descending order of cores that
# can be packed - an attempt to get even distribution over time
for cores_per_sib, sib_list in sorted(
(t for t in can_pack.items()), reverse=True):
if _can_pack_instance_cell(instance_cell,
cores_per_sib, sib_list):
sliced_sibs = map(lambda s: list(s)[:cores_per_sib], sib_list)
if instance_cell.siblings:
pinning = zip(itertools.chain(*instance_cell.siblings),
itertools.chain(*sliced_sibs))
else:
pinning = zip(sorted(instance_cell.cpuset),
itertools.chain(*sliced_sibs))
topology = (instance_cell.cpu_topology or
objects.VirtCPUTopology(sockets=1,
cores=len(sliced_sibs),
threads=cores_per_sib))
instance_cell.pin_vcpus(*pinning)
instance_cell.cpu_topology = topology
instance_cell.id = host_cell_id
return instance_cell
def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell):
"""Figure out if cells can be pinned to a host cell and return details
:param host_cell: objects.NUMACell instance - the host cell that
the isntance should be pinned to
:param instance_cell: objects.InstanceNUMACell instance without any
pinning information
:returns: objects.InstanceNUMACell instance with pinning information,
or None if instance cannot be pinned to the given host
"""
if (host_cell.avail_cpus < len(instance_cell.cpuset) or
host_cell.avail_memory < instance_cell.memory):
# If we do not have enough CPUs available or not enough memory
# on the host cell, we quit early (no oversubscription).
return
if host_cell.siblings:
# Instance requires hyperthreading in it's topology
if instance_cell.cpu_topology and instance_cell.siblings:
return _pack_instance_onto_cores(host_cell.free_siblings,
instance_cell, host_cell.id)
else:
# Try to pack the instance cell in one core
largest_free_sibling_set = sorted(
host_cell.free_siblings, key=len)[-1]
if (len(instance_cell.cpuset) <=
len(largest_free_sibling_set)):
return _pack_instance_onto_cores(
[largest_free_sibling_set], instance_cell, host_cell.id)
# We can't to pack it onto one core so try with avail siblings
else:
return _pack_instance_onto_cores(
host_cell.free_siblings, instance_cell, host_cell.id)
else:
# Straightforward to pin to available cpus when there is no
# hyperthreading on the host
return _pack_instance_onto_cores(
[host_cell.free_cpus], instance_cell, host_cell.id)
def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None):
"""Check if a instance cell can fit and set it's cell id