scheduler: Don't modify RequestSpec.numa_topology
The 'NUMATopologyFilter' makes a call to 'numa_fit_instance_to_host' in order to determine whether an instance with a sample topology could fit on a given host. This function is provided with an InstanceNUMATopology object, which was extracted from the RequestSpec provided to the filter. However, the 'numa_fit_instance_to_host' call has the side effect of modifying a couple of fields on this InstanceNUMATopology object, most notably the pinning information, and these changes are then propagated to subsequent calls of the filter. The reason for this propagation is presumably Python's "call-by-object" model [1]. While this doesn't cause any issues currently, it is a latent bug that has caused issues downstream. Resolve the issue by copying the entire RequestSpec object, thus ensuring any changes to this or the contained NUMA topology are not stored and cannot affect future calls to this or other filters. [1] https://jeffknupp.com/blog/2012/11/13/is-python-callbyvalue-or-callbyreference-neither/ Change-Id: If26cbdd5189c53891554c8d128be9b90578616aa Closes-Bug: #1655979
This commit is contained in:
parent
5ba04f4335
commit
bff2030ece
|
@ -60,6 +60,15 @@ class NUMATopologyFilter(filters.BaseHostFilter):
|
|||
return True
|
||||
|
||||
def host_passes(self, host_state, spec_obj):
|
||||
# TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
|
||||
# unfortunate side effect of modifying 'spec_obj.numa_topology' - an
|
||||
# InstanceNUMATopology object - by populating the 'cpu_pinning' field.
|
||||
# This is rather rude and said function should be reworked to avoid
|
||||
# doing this. That's a large, non-backportable cleanup however, so for
|
||||
# now we just duplicate spec_obj to prevent changes propagating to
|
||||
# future filter calls.
|
||||
spec_obj = spec_obj.obj_clone()
|
||||
|
||||
ram_ratio = host_state.ram_allocation_ratio
|
||||
cpu_ratio = host_state.cpu_allocation_ratio
|
||||
extra_specs = spec_obj.flavor.extra_specs
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
import itertools
|
||||
|
||||
import mock
|
||||
|
||||
from nova import objects
|
||||
from nova.objects import fields
|
||||
from nova.scheduler.filters import numa_topology_filter
|
||||
|
@ -121,8 +123,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
self.assertEqual(limits.cpu_allocation_ratio, 21)
|
||||
self.assertEqual(limits.ram_allocation_ratio, 1.3)
|
||||
|
||||
@mock.patch('nova.objects.instance_numa_topology.InstanceNUMACell'
|
||||
'.cpu_pinning_requested',
|
||||
return_value=True)
|
||||
def _do_test_numa_topology_filter_cpu_policy(
|
||||
self, numa_topology, cpu_policy, cpu_thread_policy, passes):
|
||||
self, numa_topology, cpu_policy, cpu_thread_policy, passes,
|
||||
mock_pinning_requested):
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
|
||||
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
|
||||
|
@ -166,6 +172,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
spec_obj.flavor = fake_flavor
|
||||
|
||||
assertion(self.filt_cls.host_passes(host, spec_obj))
|
||||
self.assertIsNone(spec_obj.numa_topology.cells[0].cpu_pinning)
|
||||
|
||||
def test_numa_topology_filter_fail_cpu_thread_policy_require(self):
|
||||
cpu_policy = fields.CPUAllocationPolicy.DEDICATED
|
||||
|
|
Loading…
Reference in New Issue