Add method for getting the CPU pinning constraint

We add a method that when passed flavor with extra spec containing
hw:cpu_policy key, and an optional NUMA topology instance,
calculates the CPU pinning constraint for the instance (or None if
the instance is not be pinned), and set the cpu_pinning attribute of the
passed NUMA topology (or a constructed one).

In case admin doesn't specify a shared pinning policy on the flavor,
users are free to set it with image metadata 'hw_cpu_policy' key,
otherwise, attempts to override it with the image  key will result in
an error.

This will be done at boot time and then persisted to be used later
in scheduling.

This patch concludes

Blueprint: virt-driver-cpu-pinning

Change-Id: I17b4eab368fca6ddb7e000f9c24368bbde18534d
This commit is contained in:
Nikola Dipanov
2014-12-09 18:08:01 +01:00
parent d4706b88e8
commit c05fcdf516
3 changed files with 121 additions and 5 deletions

View File

@@ -1827,3 +1827,8 @@ class MemoryPageSizeNotSupported(Invalid):
class CPUPinningInvalid(Invalid):
msg_fmt = _("Cannot pin/unpin cpus %(requested)s from the following "
"pinned set %(pinned)s")
class ImageCPUPinningForbidden(Invalid):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")

View File

@@ -971,6 +971,82 @@ class NUMATopologyTest(test.NoDBTestCase):
},
"expect": exception.ImageNUMATopologyForbidden,
},
{
# NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# Invalid CPU pinning override
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "shared"
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": exception.ImageCPUPinningForbidden,
},
]
for testitem in testdata:
@@ -989,12 +1065,16 @@ class NUMATopologyTest(test.NoDBTestCase):
self.assertEqual(len(testitem["expect"].cells),
len(topology.cells))
for i in range(len(topology.cells)):
self.assertEqual(testitem["expect"].cells[i].id,
topology.cells[i].id)
self.assertEqual(testitem["expect"].cells[i].cpuset,
topology.cells[i].cpuset)
self.assertEqual(testitem["expect"].cells[i].memory,
topology.cells[i].memory)
self.assertEqual(testitem["expect"].cells[i].pagesize,
topology.cells[i].pagesize)
self.assertEqual(testitem["expect"].cells[i].cpu_pinning,
topology.cells[i].cpu_pinning)
def test_host_usage_contiguous(self):
hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0)

View File

@@ -1050,6 +1050,37 @@ def _numa_get_constraints_auto(nodes, flavor, image_meta):
return objects.InstanceNUMATopology(cells=cells)
def _add_cpu_pinning_constraint(flavor, image_meta, numa_topology):
flavor_pinning = flavor.get('extra_specs', {}).get("hw:cpu_policy")
image_pinning = image_meta.get('properties', {}).get("hw_cpu_policy")
if flavor_pinning == "dedicated":
requested = True
elif flavor_pinning == "shared":
if image_pinning == "dedicated":
raise exception.ImageCPUPinningForbidden()
requested = False
else:
requested = image_pinning == "dedicated"
if not requested:
return numa_topology
if numa_topology:
# NOTE(ndipanov) Setting the cpu_pinning attribute to a non-None value
# means CPU pinning was requested
for cell in numa_topology.cells:
cell.cpu_pinning = {}
return numa_topology
else:
single_cell = objects.InstanceNUMACell(
id=0,
cpuset=set(range(flavor['vcpus'])),
memory=flavor['memory_mb'],
cpu_pinning={})
numa_topology = objects.InstanceNUMATopology(cells=[single_cell])
return numa_topology
# TODO(sahid): Move numa related to hardward/numa.py
def numa_get_constraints(flavor, image_meta):
"""Return topology related to input request
@@ -1064,7 +1095,7 @@ def numa_get_constraints(flavor, image_meta):
pagesize = _numa_get_pagesize_constraints(
flavor, image_meta)
topology = None
numa_topology = None
if nodes or pagesize:
nodes = nodes and int(nodes) or 1
# We'll pick what path to go down based on whether
@@ -1074,16 +1105,16 @@ def numa_get_constraints(flavor, image_meta):
flavor, image_meta, "numa_cpus.0") is None
if auto:
topology = _numa_get_constraints_auto(
numa_topology = _numa_get_constraints_auto(
nodes, flavor, image_meta)
else:
topology = _numa_get_constraints_manual(
numa_topology = _numa_get_constraints_manual(
nodes, flavor, image_meta)
# We currently support same pagesize for all cells.
[setattr(c, 'pagesize', pagesize) for c in topology.cells]
[setattr(c, 'pagesize', pagesize) for c in numa_topology.cells]
return topology
return _add_cpu_pinning_constraint(flavor, image_meta, numa_topology)
class VirtNUMALimitTopology(VirtNUMATopology):