From 690ce37e72f3a189100ab9839c2233e1e4951ba0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 11 Mar 2020 16:01:34 +0000 Subject: [PATCH] objects: Replace 'cpu_pinning_requested' helper This helper avoided a slightly long-winded check for what was essentially a binary condition before. With the future introduction of a 'mixed' policy, this isn't always going to be suitable going forward. Replace it with explicit checks for types. Part of blueprint use-pcpu-and-vcpu-in-one-instance Change-Id: If7512c998bca770889d81012a17a1a2978cae68e Signed-off-by: Stephen Finucane --- nova/compute/manager.py | 8 ++- nova/objects/instance_numa.py | 21 ++++--- nova/tests/unit/objects/test_instance_numa.py | 56 ++++++++++++++----- .../filters/test_numa_topology_filters.py | 27 +++++---- nova/virt/hardware.py | 8 ++- nova/virt/hyperv/vmops.py | 8 ++- nova/virt/libvirt/driver.py | 8 ++- 7 files changed, 94 insertions(+), 42 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 68047738a243..9840b6bd2f7f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -828,9 +828,11 @@ class ComputeManager(manager.Manager): # if this is an unpinned instance and the host only has # 'cpu_dedicated_set' configured, we need to tell the operator to # correct their configuration - if not (instance.numa_topology and - instance.numa_topology.cpu_pinning_requested): - + if not instance.numa_topology or ( + instance.numa_topology.cpu_policy in ( + None, fields.CPUAllocationPolicy.SHARED + ) + ): # we don't need to check 'vcpu_pin_set' since it can't coexist # alongside 'cpu_dedicated_set' if (CONF.compute.cpu_dedicated_set and diff --git a/nova/objects/instance_numa.py b/nova/objects/instance_numa.py index 62866b328ceb..cb36c1d1ddf8 100644 --- a/nova/objects/instance_numa.py +++ b/nova/objects/instance_numa.py @@ -82,10 +82,6 @@ class InstanceNUMACell(base.NovaEphemeralObject, return list(map(set, zip(*[iter(cpu_list)] * threads))) - @property - def cpu_pinning_requested(self): - return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED - def pin(self, vcpu, pcpu): if vcpu not in self.cpuset: return @@ -205,6 +201,19 @@ class InstanceNUMATopology(base.NovaObject, """Defined so that boolean testing works the same as for lists.""" return len(self.cells) + # TODO(stephenfin): We should add a real 'cpu_policy' field on this object + # and deprecate the one found on the cell + @property + def cpu_policy(self): + cpu_policy = set(cell.cpu_policy for cell in self.cells) + if len(cpu_policy) > 1: + # NOTE(stephenfin): This should never happen in real life; it's to + # prevent programmer error. + raise exception.InternalError( + 'Instance NUMA cells must have the same CPU policy.' + ) + return cpu_policy.pop() + @property def cpu_pinning(self): """Return a set of all host CPUs this NUMATopology is pinned to.""" @@ -212,10 +221,6 @@ class InstanceNUMATopology(base.NovaObject, cell.cpu_pinning.values() for cell in self.cells if cell.cpu_pinning])) - @property - def cpu_pinning_requested(self): - return all(cell.cpu_pinning_requested for cell in self.cells) - def clear_host_pinning(self): """Clear any data related to how instance is pinned to the host. diff --git a/nova/tests/unit/objects/test_instance_numa.py b/nova/tests/unit/objects/test_instance_numa.py index 8278815a7525..59c7289b812d 100644 --- a/nova/tests/unit/objects/test_instance_numa.py +++ b/nova/tests/unit/objects/test_instance_numa.py @@ -10,11 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. -import copy - import mock from oslo_utils.fixture import uuidsentinel as uuids from oslo_versionedobjects import base as ovo_base +import testtools from nova import exception from nova import objects @@ -94,13 +93,6 @@ class _TestInstanceNUMACell(object): inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17)) self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning) - def test_cpu_pinning_requested(self): - inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), - cpu_pinning=None) - self.assertFalse(inst_cell.cpu_pinning_requested) - inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED - self.assertTrue(inst_cell.cpu_pinning_requested) - def test_cpu_pinning(self): topo_obj = get_fake_obj_numa_topology(self.context) self.assertEqual(set(), topo_obj.cpu_pinning) @@ -192,12 +184,46 @@ class _TestInstanceNUMATopology(object): objects.InstanceNUMATopology.get_by_instance_uuid, self.context, 'fake_uuid') - def test_cpu_pinning_requested(self): - fake_topo_obj = copy.deepcopy(fake_obj_numa_topology) - self.assertFalse(fake_topo_obj.cpu_pinning_requested) - for cell in fake_topo_obj.cells: - cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED - self.assertTrue(fake_topo_obj.cpu_pinning_requested) + def test_cpu_policy(self): + cpu_policy = fields.CPUAllocationPolicy.SHARED + topology = objects.InstanceNUMATopology( + instance_uuid=fake_instance_uuid, + cells=[ + objects.InstanceNUMACell( + cpuset=set([0, 1, 2, 3]), + cpu_pinning=None, + cpu_policy=cpu_policy, + ), + objects.InstanceNUMACell( + cpuset=set([4, 5, 6, 7]), + cpu_pinning=None, + cpu_policy=cpu_policy, + ), + ], + ) + + self.assertEqual(cpu_policy, topology.cpu_policy) + + def test_cpu_policy__error(self): + """Ensure we raise an error if cells have different values.""" + topology = objects.InstanceNUMATopology( + instance_uuid=fake_instance_uuid, + cells=[ + objects.InstanceNUMACell( + cpuset=set([0, 1, 2, 3]), + cpu_pinning=None, + cpu_policy=None, + ), + objects.InstanceNUMACell( + cpuset=set([4, 5, 6, 7]), + cpu_pinning=None, + cpu_policy=fields.CPUAllocationPolicy.SHARED + ), + ], + ) + + with testtools.ExpectedException(exception.InternalError): + topology.cpu_policy def test_cpuset_reserved(self): topology = objects.InstanceNUMATopology( diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py index 5b7cc716112b..758eb8564155 100644 --- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py +++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py @@ -12,7 +12,6 @@ import itertools -import mock from oslo_utils.fixture import uuidsentinel as uuids from nova import objects @@ -128,16 +127,24 @@ class TestNUMATopologyFilter(test.NoDBTestCase): self.assertEqual(limits.cpu_allocation_ratio, 21) self.assertEqual(limits.ram_allocation_ratio, 1.3) - @mock.patch('nova.objects.instance_numa.InstanceNUMACell' - '.cpu_pinning_requested', - return_value=True) def _do_test_numa_topology_filter_cpu_policy( - self, numa_topology, cpu_policy, cpu_thread_policy, passes, - mock_pinning_requested): - instance_topology = objects.InstanceNUMATopology( - cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), - objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) - ]) + self, numa_topology, cpu_policy, cpu_thread_policy, passes): + instance_topology = objects.InstanceNUMATopology(cells=[ + objects.InstanceNUMACell( + id=0, + cpuset=set([1]), + memory=512, + cpu_policy=cpu_policy, + cpu_thread_policy=cpu_thread_policy, + ), + objects.InstanceNUMACell( + id=1, + cpuset=set([3]), + memory=512, + cpu_policy=cpu_policy, + cpu_thread_policy=cpu_thread_policy, + ), + ]) spec_obj = objects.RequestSpec(numa_topology=instance_topology, pci_requests=None, instance_uuid=uuids.fake) diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index 37dd6d1618eb..4ca867e0308f 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -1126,7 +1126,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None, # NOTE(stephenfin): As with memory, do not allow an instance to overcommit # against itself on any NUMA cell - if instance_cell.cpu_pinning_requested: + if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED: # TODO(stephenfin): Is 'cpuset_reserved' present if consuming emulator # threads from shared CPU pools? If so, we don't want to add this here required_cpus = len(instance_cell.cpuset) + cpuset_reserved @@ -1151,7 +1151,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None, }) return - if instance_cell.cpu_pinning_requested: + if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED: LOG.debug('Pinning has been requested') new_instance_cell = _numa_fit_instance_cell_with_pinning( host_cell, instance_cell, cpuset_reserved) @@ -2256,7 +2256,9 @@ def numa_usage_from_instance_numa(host_topology, instance_topology, memory_usage = memory_usage + sign * instance_cell.memory - if not instance_cell.cpu_pinning_requested: + if instance_cell.cpu_policy != ( + fields.CPUAllocationPolicy.DEDICATED + ): shared_cpus_usage += sign * len(instance_cell.cpuset) continue diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 8f38588b4fb1..6b30a39211f5 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -464,7 +464,13 @@ class VMOps(object): memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) - if instance_topology.cpu_pinning_requested: + # TODO(stephenfin): We can avoid this check entirely if we rely on the + # 'supports_pcpus' driver capability (via a trait), but we need to drop + # support for the legacy 'vcpu_pin_set' path in the libvirt driver + # first + if instance_topology.cpu_policy not in ( + None, fields.CPUAllocationPolicy.SHARED, + ): raise exception.InstanceUnacceptable( reason=_("Hyper-V does not support CPU pinning."), instance_id=instance.uuid) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index a3fb4387656d..d343d4b75ab1 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -8187,7 +8187,9 @@ class LibvirtDriver(driver.ComputeDriver): if not instance.numa_topology: continue - if not instance.numa_topology.cpu_pinning_requested: + if instance.numa_topology.cpu_policy != ( + fields.CPUAllocationPolicy.DEDICATED + ): continue allocations_needing_reshape.append(instance.uuid) @@ -8209,7 +8211,9 @@ class LibvirtDriver(driver.ComputeDriver): if not instance.numa_topology: continue - if not instance.numa_topology.cpu_pinning_requested: + if instance.numa_topology.cpu_policy != ( + fields.CPUAllocationPolicy.DEDICATED + ): continue allocations_needing_reshape.append(migration.uuid)