hardware: Remove handling of pre-Train compute nodes

Now that we're into Ussuri, we no longer need to worry about receiving
objects from pre-Train compute nodes. Remove the code that was handling
this along with a couple of other TODOs that have either been resolved
or will never be resolved.

Part of blueprint use-pcpu-and-vcpu-in-one-instance

Change-Id: I771117f18bfab7d73de8e431168c964a96d419e8
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-03-16 16:58:02 +00:00
parent 0a0c174fbc
commit 184a2cadf0
2 changed files with 5 additions and 63 deletions

View File

@ -1918,6 +1918,7 @@ class NUMATopologyTest(test.NoDBTestCase):
objects.NUMACell(
id=0,
cpuset=set([0, 1]),
pcpuset=set(),
memory=512,
cpu_usage=0,
memory_usage=0,
@ -1929,6 +1930,7 @@ class NUMATopologyTest(test.NoDBTestCase):
objects.NUMACell(
id=1,
cpuset=set([2, 3]),
pcpuset=set(),
memory=512,
cpu_usage=0,
memory_usage=0,
@ -1959,7 +1961,7 @@ class NUMATopologyTest(test.NoDBTestCase):
self.assertEqual({2048: 64}, reserved[6])
self.assertEqual({1048576: 1}, reserved[9])
def test_reserved_hugepgaes_success(self):
def test_reserved_hugepages_success(self):
self.flags(reserved_huge_pages=[
{'node': 0, 'size': 2048, 'count': 128},
{'node': 1, 'size': 1048576, 'count': 1}])
@ -3177,41 +3179,6 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
# TODO(stephenfin): Remove in U
def test_host_numa_fit_instance_to_host_legacy_object(self):
"""Check that we're able to fit an instance NUMA topology to a legacy
host NUMA topology that doesn't have the 'pcpuset' field present.
"""
host_topo = objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
cpuset=set([0, 1]),
# we are explicitly not setting pcpuset here
memory=2048,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([0]), set([1])]),
objects.NUMACell(
id=1,
cpuset=set([2, 3]),
# we are explicitly not setting pcpuset here
memory=2048,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_single_cell_w_usage(self):
host_topo = objects.NUMATopology(cells=[
objects.NUMACell(

View File

@ -1116,14 +1116,6 @@ def _numa_fit_instance_cell(host_cell, instance_cell, limit_cell=None,
'actual': host_cell.memory})
return
# The 'pcpuset' field is only set by newer compute nodes, so if it's
# not present then we've received this object from a pre-Train compute
# node and need to query against the 'cpuset' field instead until the
# compute node has been upgraded and starts reporting things properly.
# TODO(stephenfin): Remove in U
if 'pcpuset' not in host_cell:
host_cell.pcpuset = host_cell.cpuset
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit
# against itself on any NUMA cell
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
@ -1781,7 +1773,6 @@ def get_pci_numa_policy_constraint(flavor, image_meta):
return policy
# TODO(sahid): Move numa related to hardware/numa.py
def numa_get_constraints(flavor, image_meta):
"""Return topology related to input request.
@ -1872,7 +1863,6 @@ def numa_get_constraints(flavor, image_meta):
requested_vcpus, requested_pcpus = _get_vcpu_pcpu_resources(flavor)
if cpu_policy and (requested_vcpus or requested_pcpus):
# TODO(stephenfin): Make these custom exceptions
raise exception.InvalidRequest(
"It is not possible to use the 'resources:VCPU' or "
"'resources:PCPU' extra specs in combination with the "
@ -2086,8 +2076,6 @@ def numa_fit_instance_to_host(
host_cells = sorted(host_cells, key=lambda cell: cell.id in [
pool['numa_node'] for pool in pci_stats.pools])
# TODO(ndipanov): We may want to sort permutations differently
# depending on whether we want packing/spreading over NUMA nodes
for host_cell_perm in itertools.permutations(
host_cells, len(instance_topology)):
chosen_instance_cells: ty.List['objects.InstanceNUMACell'] = []
@ -2216,23 +2204,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
memory_usage = host_cell.memory_usage
shared_cpus_usage = host_cell.cpu_usage
# The 'pcpuset' field is only set by newer compute nodes, so if it's
# not present then we've received this object from a pre-Train compute
# node and need to dual-report all CPUS listed therein as both
# dedicated and shared until the compute node has been upgraded and
# starts reporting things properly.
# TODO(stephenfin): Remove in U
if 'pcpuset' not in host_cell:
shared_cpus = host_cell.cpuset
dedicated_cpus = host_cell.cpuset
else:
shared_cpus = host_cell.cpuset
dedicated_cpus = host_cell.pcpuset
new_cell = objects.NUMACell(
id=host_cell.id,
cpuset=shared_cpus,
pcpuset=dedicated_cpus,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
cpu_usage=0,
memory_usage=0,