Merge "Handle zero pinned CPU in a cell with mixed policy"
This commit is contained in:
@@ -391,34 +391,30 @@ class NUMAServersTest(NUMAServersTestBase):
|
|||||||
}
|
}
|
||||||
flavor_id = self._create_flavor(
|
flavor_id = self._create_flavor(
|
||||||
vcpu=3, memory_mb=1024, extra_spec=extra_spec)
|
vcpu=3, memory_mb=1024, extra_spec=extra_spec)
|
||||||
|
expected_usage = {
|
||||||
|
'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
|
||||||
|
}
|
||||||
# The only possible solution (ignoring the order of vCPU1,2):
|
# The only possible solution (ignoring the order of vCPU1,2):
|
||||||
# vCPU 0 => pCPU 0, NUMA0, shared
|
# vCPU 0 => pCPU 0, NUMA0, shared
|
||||||
# vCPU 1 => pCPU 6, NUMA1, dedicated
|
# vCPU 1 => pCPU 6, NUMA1, dedicated
|
||||||
# vCPU 2 => pCPU 7, NUMA1, dedicated
|
# vCPU 2 => pCPU 7, NUMA1, dedicated
|
||||||
# This is bug 1994526 as the scheduling fails
|
server = self._run_build_test(
|
||||||
self._run_build_test(flavor_id, end_status='ERROR')
|
flavor_id, expected_usage=expected_usage)
|
||||||
|
|
||||||
# # After bug 1994526 is fixed, this should pass
|
# sanity check the instance topology
|
||||||
# expected_usage = {
|
inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
|
||||||
# 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
|
self.assertEqual(2, len(inst.numa_topology.cells))
|
||||||
# }
|
|
||||||
# server = self._run_build_test(
|
self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
|
||||||
# flavor_id, expected_usage=expected_usage)
|
self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
|
||||||
#
|
self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
|
||||||
# # sanity check the instance topology
|
|
||||||
# inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
|
self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
|
||||||
# self.assertEqual(2, len(inst.numa_topology.cells))
|
self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
|
||||||
#
|
self.assertEqual(
|
||||||
# self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
|
{6, 7},
|
||||||
# self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
|
set(inst.numa_topology.cells[1].cpu_pinning.values())
|
||||||
# self.assertEqual(None, inst.numa_topology.cells[0].cpu_pinning)
|
)
|
||||||
#
|
|
||||||
# self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
|
|
||||||
# self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
|
|
||||||
# self.assertEqual(
|
|
||||||
# {6, 7},
|
|
||||||
# set(inst.numa_topology.cells[1].cpu_pinning.values())
|
|
||||||
# )
|
|
||||||
|
|
||||||
def test_create_server_with_dedicated_policy_old_configuration(self):
|
def test_create_server_with_dedicated_policy_old_configuration(self):
|
||||||
"""Create a server using the legacy extra spec and configuration.
|
"""Create a server using the legacy extra spec and configuration.
|
||||||
|
|||||||
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
|
|||||||
instance_cell.pcpuset)
|
instance_cell.pcpuset)
|
||||||
cpuset_reserved = _get_reserved(
|
cpuset_reserved = _get_reserved(
|
||||||
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
|
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
|
||||||
if not pinning or (num_cpu_reserved and not cpuset_reserved):
|
if pinning is None or (num_cpu_reserved and not cpuset_reserved):
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
|
|
||||||
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
|
|||||||
cpuset_reserved = _get_reserved(
|
cpuset_reserved = _get_reserved(
|
||||||
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
|
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
|
||||||
|
|
||||||
if not pinning or (num_cpu_reserved and not cpuset_reserved):
|
if pinning is None or (num_cpu_reserved and not cpuset_reserved):
|
||||||
return
|
return
|
||||||
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
|
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
|
||||||
host_cell.id)
|
host_cell.id)
|
||||||
@@ -2608,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
|
|||||||
None, fields.CPUAllocationPolicy.SHARED,
|
None, fields.CPUAllocationPolicy.SHARED,
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
if instance_cell.cpu_pinning:
|
||||||
pinned_cpus = set(instance_cell.cpu_pinning.values())
|
pinned_cpus = set(instance_cell.cpu_pinning.values())
|
||||||
|
else:
|
||||||
|
pinned_cpus = set()
|
||||||
if instance_cell.cpuset_reserved:
|
if instance_cell.cpuset_reserved:
|
||||||
pinned_cpus |= instance_cell.cpuset_reserved
|
pinned_cpus |= instance_cell.cpuset_reserved
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user