Merge "objects: Replace 'cpu_pinning_requested' helper"

This commit is contained in:
Zuul 2020-06-25 01:20:22 +00:00 committed by Gerrit Code Review
commit 6bc1352744
7 changed files with 94 additions and 42 deletions

View File

@ -827,9 +827,11 @@ class ComputeManager(manager.Manager):
# if this is an unpinned instance and the host only has # if this is an unpinned instance and the host only has
# 'cpu_dedicated_set' configured, we need to tell the operator to # 'cpu_dedicated_set' configured, we need to tell the operator to
# correct their configuration # correct their configuration
if not (instance.numa_topology and if not instance.numa_topology or (
instance.numa_topology.cpu_pinning_requested): instance.numa_topology.cpu_policy in (
None, fields.CPUAllocationPolicy.SHARED
)
):
# we don't need to check 'vcpu_pin_set' since it can't coexist # we don't need to check 'vcpu_pin_set' since it can't coexist
# alongside 'cpu_dedicated_set' # alongside 'cpu_dedicated_set'
if (CONF.compute.cpu_dedicated_set and if (CONF.compute.cpu_dedicated_set and

View File

@ -82,10 +82,6 @@ class InstanceNUMACell(base.NovaEphemeralObject,
return list(map(set, zip(*[iter(cpu_list)] * threads))) return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu): def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset: if vcpu not in self.cpuset:
return return
@ -205,6 +201,19 @@ class InstanceNUMATopology(base.NovaObject,
"""Defined so that boolean testing works the same as for lists.""" """Defined so that boolean testing works the same as for lists."""
return len(self.cells) return len(self.cells)
# TODO(stephenfin): We should add a real 'cpu_policy' field on this object
# and deprecate the one found on the cell
@property
def cpu_policy(self):
cpu_policy = set(cell.cpu_policy for cell in self.cells)
if len(cpu_policy) > 1:
# NOTE(stephenfin): This should never happen in real life; it's to
# prevent programmer error.
raise exception.InternalError(
'Instance NUMA cells must have the same CPU policy.'
)
return cpu_policy.pop()
@property @property
def cpu_pinning(self): def cpu_pinning(self):
"""Return a set of all host CPUs this NUMATopology is pinned to.""" """Return a set of all host CPUs this NUMATopology is pinned to."""
@ -212,10 +221,6 @@ class InstanceNUMATopology(base.NovaObject,
cell.cpu_pinning.values() for cell in self.cells cell.cpu_pinning.values() for cell in self.cells
if cell.cpu_pinning])) if cell.cpu_pinning]))
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
def clear_host_pinning(self): def clear_host_pinning(self):
"""Clear any data related to how instance is pinned to the host. """Clear any data related to how instance is pinned to the host.

View File

@ -10,11 +10,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import copy
import mock import mock
from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import base as ovo_base
import testtools
from nova import exception from nova import exception
from nova import objects from nova import objects
@ -94,13 +93,6 @@ class _TestInstanceNUMACell(object):
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17)) inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning) self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
def test_cpu_pinning_requested(self):
inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
cpu_pinning=None)
self.assertFalse(inst_cell.cpu_pinning_requested)
inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
self.assertTrue(inst_cell.cpu_pinning_requested)
def test_cpu_pinning(self): def test_cpu_pinning(self):
topo_obj = get_fake_obj_numa_topology(self.context) topo_obj = get_fake_obj_numa_topology(self.context)
self.assertEqual(set(), topo_obj.cpu_pinning) self.assertEqual(set(), topo_obj.cpu_pinning)
@ -192,12 +184,46 @@ class _TestInstanceNUMATopology(object):
objects.InstanceNUMATopology.get_by_instance_uuid, objects.InstanceNUMATopology.get_by_instance_uuid,
self.context, 'fake_uuid') self.context, 'fake_uuid')
def test_cpu_pinning_requested(self): def test_cpu_policy(self):
fake_topo_obj = copy.deepcopy(fake_obj_numa_topology) cpu_policy = fields.CPUAllocationPolicy.SHARED
self.assertFalse(fake_topo_obj.cpu_pinning_requested) topology = objects.InstanceNUMATopology(
for cell in fake_topo_obj.cells: instance_uuid=fake_instance_uuid,
cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED cells=[
self.assertTrue(fake_topo_obj.cpu_pinning_requested) objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
],
)
self.assertEqual(cpu_policy, topology.cpu_policy)
def test_cpu_policy__error(self):
"""Ensure we raise an error if cells have different values."""
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
cpu_pinning=None,
cpu_policy=None,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
cpu_pinning=None,
cpu_policy=fields.CPUAllocationPolicy.SHARED
),
],
)
with testtools.ExpectedException(exception.InternalError):
topology.cpu_policy
def test_cpuset_reserved(self): def test_cpuset_reserved(self):
topology = objects.InstanceNUMATopology( topology = objects.InstanceNUMATopology(

View File

@ -12,7 +12,6 @@
import itertools import itertools
import mock
from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects from nova import objects
@ -128,16 +127,24 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertEqual(limits.cpu_allocation_ratio, 21) self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3) self.assertEqual(limits.ram_allocation_ratio, 1.3)
@mock.patch('nova.objects.instance_numa.InstanceNUMACell'
'.cpu_pinning_requested',
return_value=True)
def _do_test_numa_topology_filter_cpu_policy( def _do_test_numa_topology_filter_cpu_policy(
self, numa_topology, cpu_policy, cpu_thread_policy, passes, self, numa_topology, cpu_policy, cpu_thread_policy, passes):
mock_pinning_requested): instance_topology = objects.InstanceNUMATopology(cells=[
instance_topology = objects.InstanceNUMATopology( objects.InstanceNUMACell(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), id=0,
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) cpuset=set([1]),
]) memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
),
objects.InstanceNUMACell(
id=1,
cpuset=set([3]),
memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
),
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology, spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None, pci_requests=None,
instance_uuid=uuids.fake) instance_uuid=uuids.fake)

View File

@ -1126,7 +1126,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit # NOTE(stephenfin): As with memory, do not allow an instance to overcommit
# against itself on any NUMA cell # against itself on any NUMA cell
if instance_cell.cpu_pinning_requested: if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
# TODO(stephenfin): Is 'cpuset_reserved' present if consuming emulator # TODO(stephenfin): Is 'cpuset_reserved' present if consuming emulator
# threads from shared CPU pools? If so, we don't want to add this here # threads from shared CPU pools? If so, we don't want to add this here
required_cpus = len(instance_cell.cpuset) + cpuset_reserved required_cpus = len(instance_cell.cpuset) + cpuset_reserved
@ -1151,7 +1151,7 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
}) })
return return
if instance_cell.cpu_pinning_requested: if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
LOG.debug('Pinning has been requested') LOG.debug('Pinning has been requested')
new_instance_cell = _numa_fit_instance_cell_with_pinning( new_instance_cell = _numa_fit_instance_cell_with_pinning(
host_cell, instance_cell, cpuset_reserved) host_cell, instance_cell, cpuset_reserved)
@ -2256,7 +2256,9 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
memory_usage = memory_usage + sign * instance_cell.memory memory_usage = memory_usage + sign * instance_cell.memory
if not instance_cell.cpu_pinning_requested: if instance_cell.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
shared_cpus_usage += sign * len(instance_cell.cpuset) shared_cpus_usage += sign * len(instance_cell.cpuset)
continue continue

View File

@ -464,7 +464,13 @@ class VMOps(object):
memory_per_numa_node = instance_topology.cells[0].memory memory_per_numa_node = instance_topology.cells[0].memory
cpus_per_numa_node = len(instance_topology.cells[0].cpuset) cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
if instance_topology.cpu_pinning_requested: # TODO(stephenfin): We can avoid this check entirely if we rely on the
# 'supports_pcpus' driver capability (via a trait), but we need to drop
# support for the legacy 'vcpu_pin_set' path in the libvirt driver
# first
if instance_topology.cpu_policy not in (
None, fields.CPUAllocationPolicy.SHARED,
):
raise exception.InstanceUnacceptable( raise exception.InstanceUnacceptable(
reason=_("Hyper-V does not support CPU pinning."), reason=_("Hyper-V does not support CPU pinning."),
instance_id=instance.uuid) instance_id=instance.uuid)

View File

@ -8187,7 +8187,9 @@ class LibvirtDriver(driver.ComputeDriver):
if not instance.numa_topology: if not instance.numa_topology:
continue continue
if not instance.numa_topology.cpu_pinning_requested: if instance.numa_topology.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
continue continue
allocations_needing_reshape.append(instance.uuid) allocations_needing_reshape.append(instance.uuid)
@ -8209,7 +8211,9 @@ class LibvirtDriver(driver.ComputeDriver):
if not instance.numa_topology: if not instance.numa_topology:
continue continue
if not instance.numa_topology.cpu_pinning_requested: if instance.numa_topology.cpu_policy != (
fields.CPUAllocationPolicy.DEDICATED
):
continue continue
allocations_needing_reshape.append(migration.uuid) allocations_needing_reshape.append(migration.uuid)