Set instance NUMA topology on HostState
NUMATopologyFilter will try to fit an instance onto every host (represented by a HostState instance) so assigning the resulting instance topology to the instance dict really makes no sense, as we end up with only the last calculated topology from all the filter runs. This in turn makes consume_from_instance not work as expected, as it will consume NUMA topology calculated from the last host the filter was run on, not the host that was chosen by the scheduler. This patch stashes the calculated NUMA topology onto the HostState instance passed to the filter, that will be used in consume_from_instance, and makes sure that is what gets used for updating the usage. Change-Id: Ifacccadf73dc114e50f46b8e6087ffb2b2fc9d6b Closes-Bug: #1405359
This commit is contained in:
parent
c11cd4fa26
commit
c206d162fe
|
@ -51,7 +51,7 @@ class NUMATopologyFilter(filters.BaseHostFilter):
|
|||
if not instance_topology:
|
||||
return False
|
||||
host_state.limits['numa_topology'] = limits.to_json()
|
||||
instance['numa_topology'] = instance_topology
|
||||
host_state.instance_numa_topology = instance_topology
|
||||
return True
|
||||
elif requested_topology:
|
||||
return False
|
||||
|
|
|
@ -125,6 +125,7 @@ class HostState(object):
|
|||
self.vcpus_total = 0
|
||||
self.vcpus_used = 0
|
||||
self.numa_topology = None
|
||||
self.instance_numa_topology = None
|
||||
|
||||
# Additional host information from the compute node stats:
|
||||
self.num_instances = 0
|
||||
|
@ -204,6 +205,7 @@ class HostState(object):
|
|||
self.vcpus_used = compute.vcpus_used
|
||||
self.updated = compute.updated_at
|
||||
self.numa_topology = compute.numa_topology
|
||||
self.instance_numa_topology = None
|
||||
if compute.pci_device_pools is not None:
|
||||
self.pci_stats = pci_stats.PciDeviceStats(
|
||||
compute.pci_device_pools)
|
||||
|
@ -267,6 +269,7 @@ class HostState(object):
|
|||
instance_cells)
|
||||
|
||||
# Calculate the numa usage
|
||||
instance['numa_topology'] = self.instance_numa_topology
|
||||
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
|
||||
self, instance)
|
||||
self.numa_topology = updated_numa_topology
|
||||
|
|
|
@ -43,6 +43,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsInstance(host.instance_numa_topology,
|
||||
objects.InstanceNUMATopology)
|
||||
|
||||
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
|
@ -58,6 +60,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
||||
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
|
||||
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
|
||||
|
@ -69,6 +72,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
||||
def test_numa_topology_filter_fail_fit(self):
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
|
@ -86,6 +90,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
||||
def test_numa_topology_filter_fail_memory(self):
|
||||
self.flags(ram_allocation_ratio=1)
|
||||
|
@ -105,6 +110,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
||||
def test_numa_topology_filter_fail_cpu(self):
|
||||
self.flags(cpu_allocation_ratio=1)
|
||||
|
@ -123,6 +129,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
||||
def test_numa_topology_filter_pass_set_limit(self):
|
||||
self.flags(cpu_allocation_ratio=21)
|
||||
|
@ -142,6 +149,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertIsInstance(host.instance_numa_topology,
|
||||
objects.InstanceNUMATopology)
|
||||
limits_topology = hardware.VirtNUMALimitTopology.from_json(
|
||||
host.limits['numa_topology'])
|
||||
self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
|
||||
|
|
|
@ -521,6 +521,7 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
def test_stat_consumption_from_instance(self, numa_usage_mock):
|
||||
numa_usage_mock.return_value = 'fake-consumed-once'
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.instance_numa_topology = 'fake-instance-topology'
|
||||
|
||||
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
|
||||
project_id='12345', vm_state=vm_states.BUILDING,
|
||||
|
@ -529,6 +530,7 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
host.consume_from_instance(instance)
|
||||
numa_usage_mock.assert_called_once_with(host, instance)
|
||||
self.assertEqual('fake-consumed-once', host.numa_topology)
|
||||
self.assertEqual('fake-instance-topology', instance['numa_topology'])
|
||||
|
||||
numa_usage_mock.return_value = 'fake-consumed-twice'
|
||||
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
|
||||
|
@ -536,6 +538,7 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
task_state=None, os_type='Linux',
|
||||
uuid='fake-uuid', numa_topology=None)
|
||||
host.consume_from_instance(instance)
|
||||
self.assertEqual('fake-instance-topology', instance['numa_topology'])
|
||||
|
||||
self.assertEqual(2, host.num_instances)
|
||||
self.assertEqual(1, host.num_io_ops)
|
||||
|
@ -577,3 +580,21 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
self.assertEqual('string2', host.metrics['res2'].value)
|
||||
self.assertEqual('source2', host.metrics['res2'].source)
|
||||
self.assertIsInstance(host.numa_topology, six.string_types)
|
||||
|
||||
def test_update_from_compute_node_resets_stashed_numa(self):
|
||||
hyper_ver_int = utils.convert_version_to_int('6.0.0')
|
||||
compute = objects.ComputeNode(
|
||||
memory_mb=0, free_disk_gb=0, local_gb=0, metrics=None,
|
||||
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
|
||||
disk_available_least=None,
|
||||
updated_at=None, host_ip='127.0.0.1',
|
||||
hypervisor_type='htype',
|
||||
hypervisor_hostname='hostname', cpu_info='cpu_info',
|
||||
supported_hv_specs=[],
|
||||
hypervisor_version=hyper_ver_int,
|
||||
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
|
||||
stats=None, pci_device_pools=None)
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.instance_numa_topology = 'fake-instance-topology'
|
||||
host.update_from_compute_node(compute)
|
||||
self.assertIsNone(host.instance_numa_topology)
|
||||
|
|
Loading…
Reference in New Issue