objects: Replace 'cpu_pinning_requested' helper

This helper avoided a slightly long-winded check for what was
essentially a binary condition before. With the future introduction of a
'mixed' policy, this isn't always going to be suitable going forward.
Replace it with explicit checks for types.

Part of blueprint use-pcpu-and-vcpu-in-one-instance

Change-Id: If7512c998bca770889d81012a17a1a2978cae68e
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-03-11 16:01:34 +00:00
parent dff70b3bce
commit 690ce37e72
7 changed files with 94 additions and 42 deletions

View File

@ -828,9 +828,11 @@ class ComputeManager(manager.Manager):
# if this is an unpinned instance and the host only has
# 'cpu_dedicated_set' configured, we need to tell the operator to
# correct their configuration
if not (instance.numa_topology and
instance.numa_topology.cpu_pinning_requested):
if not instance.numa_topology or (
instance.numa_topology.cpu_policy in (
None, fields.CPUAllocationPolicy.SHARED
)
):
# we don't need to check 'vcpu_pin_set' since it can't coexist
# alongside 'cpu_dedicated_set'
if (CONF.compute.cpu_dedicated_set and

View File

@ -82,10 +82,6 @@ class InstanceNUMACell(base.NovaEphemeralObject,
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
@ -205,6 +201,19 @@ class InstanceNUMATopology(base.NovaObject,
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
# TODO(stephenfin): We should add a real 'cpu_policy' field on this object
# and deprecate the one found on the cell
@property
def cpu_policy(self):
cpu_policy = set(cell.cpu_policy for cell in self.cells)
if len(cpu_policy) > 1:
# NOTE(stephenfin): This should never happen in real life; it's to
# prevent programmer error.
raise exception.InternalError(
'Instance NUMA cells must have the same CPU policy.'
)
return cpu_policy.pop()
@property
def cpu_pinning(self):
"""Return a set of all host CPUs this NUMATopology is pinned to."""
@ -212,10 +221,6 @@ class InstanceNUMATopology(base.NovaObject,
cell.cpu_pinning.values() for cell in self.cells
if cell.cpu_pinning]))
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
def clear_host_pinning(self):
"""Clear any data related to how instance is pinned to the host.

View File

@ -10,11 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
import testtools
from nova import exception
from nova import objects
@ -94,13 +93,6 @@ class _TestInstanceNUMACell(object):
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
def test_cpu_pinning_requested(self):
inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
cpu_pinning=None)
self.assertFalse(inst_cell.cpu_pinning_requested)
inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
self.assertTrue(inst_cell.cpu_pinning_requested)
def test_cpu_pinning(self):
topo_obj = get_fake_obj_numa_topology(self.context)
self.assertEqual(set(), topo_obj.cpu_pinning)
@ -192,12 +184,46 @@ class _TestInstanceNUMATopology(object):
objects.InstanceNUMATopology.get_by_instance_uuid,
self.context, 'fake_uuid')
def test_cpu_pinning_requested(self):
fake_topo_obj = copy.deepcopy(fake_obj_numa_topology)
self.assertFalse(fake_topo_obj.cpu_pinning_requested)
for cell in fake_topo_obj.cells:
cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
self.assertTrue(fake_topo_obj.cpu_pinning_requested)
def test_cpu_policy(self):
cpu_policy = fields.CPUAllocationPolicy.SHARED
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
],
)
self.assertEqual(cpu_policy, topology.cpu_policy)
def test_cpu_policy__error(self):
"""Ensure we raise an error if cells have different values."""
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
cpu_pinning=None,
cpu_policy=None,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
cpu_pinning=None,
cpu_policy=fields.CPUAllocationPolicy.SHARED
),
],
)
with testtools.ExpectedException(exception.InternalError):
topology.cpu_policy
def test_cpuset_reserved(self):
topology = objects.InstanceNUMATopology(

View File

@ -12,7 +12,6 @@
import itertools
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
@ -128,16 +127,24 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3)
@mock.patch('nova.objects.instance_numa.InstanceNUMACell'
'.cpu_pinning_requested',
return_value=True)
def _do_test_numa_topology_filter_cpu_policy(
self, numa_topology, cpu_policy, cpu_thread_policy, passes,
mock_pinning_requested):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
self, numa_topology, cpu_policy, cpu_thread_policy, passes):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([1]),
memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
),
objects.InstanceNUMACell(
id=1,
cpuset=set([3]),
memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
),
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=uuids.fake)

View File

@ -1126,7 +1126,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit
# against itself on any NUMA cell
if instance_cell.cpu_pinning_requested:
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
# TODO(stephenfin): Is 'cpuset_reserved' present if consuming emulator
# threads from shared CPU pools? If so, we don't want to add this here
required_cpus = len(instance_cell.cpuset) + cpuset_reserved
@ -1151,7 +1151,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
})
return
if instance_cell.cpu_pinning_requested:
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
LOG.debug('Pinning has been requested')
new_instance_cell = _numa_fit_instance_cell_with_pinning(
host_cell, instance_cell, cpuset_reserved)
@ -2256,7 +2256,9 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
memory_usage = memory_usage + sign * instance_cell.memory
if not instance_cell.cpu_pinning_requested:
if instance_cell.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
shared_cpus_usage += sign * len(instance_cell.cpuset)
continue

View File

@ -464,7 +464,13 @@ class VMOps(object):
memory_per_numa_node = instance_topology.cells[0].memory
cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
if instance_topology.cpu_pinning_requested:
# TODO(stephenfin): We can avoid this check entirely if we rely on the
# 'supports_pcpus' driver capability (via a trait), but we need to drop
# support for the legacy 'vcpu_pin_set' path in the libvirt driver
# first
if instance_topology.cpu_policy not in (
None, fields.CPUAllocationPolicy.SHARED,
):
raise exception.InstanceUnacceptable(
reason=_("Hyper-V does not support CPU pinning."),
instance_id=instance.uuid)

View File

@ -8187,7 +8187,9 @@ class LibvirtDriver(driver.ComputeDriver):
if not instance.numa_topology:
continue
if not instance.numa_topology.cpu_pinning_requested:
if instance.numa_topology.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
continue
allocations_needing_reshape.append(instance.uuid)
@ -8209,7 +8211,9 @@ class LibvirtDriver(driver.ComputeDriver):
if not instance.numa_topology:
continue
if not instance.numa_topology.cpu_pinning_requested:
if instance.numa_topology.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
continue
allocations_needing_reshape.append(migration.uuid)