Expose numa_topology to the resource tracker
This patch makes all the current drivers that implement get_available_resource() return an additional 'numa_topology' field in the resources dict. The libvirt driver will actually return useful data, while other drivers only return 'None' at this time. We also make the resource tracker check for the presence of this field. The intent is that it be mandatory for all drivers to expose, but to avoid breaking out of tree virt drivers, if the field is missing it will be set to None. Blueprint: virt-driver-numa-placement Change-Id: I75e4aeea4636532cfd724f718e136fac5bb2d15e
This commit is contained in:
parent
42e4c04e9e
commit
0954fb18d6
@ -306,6 +306,11 @@ class ResourceTracker(object):
|
||||
return
|
||||
resources['host_ip'] = CONF.my_ip
|
||||
|
||||
# TODO(berrange): remove this once all virt drivers are updated
|
||||
# to report topology
|
||||
if "numa_topology" not in resources:
|
||||
resources["numa_topology"] = None
|
||||
|
||||
self._verify_resources(resources)
|
||||
|
||||
self._report_hypervisor_resource_view(resources)
|
||||
@ -703,7 +708,8 @@ class ResourceTracker(object):
|
||||
|
||||
def _verify_resources(self, resources):
|
||||
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
|
||||
"vcpus_used", "memory_mb_used", "local_gb_used"]
|
||||
"vcpus_used", "memory_mb_used", "local_gb_used",
|
||||
"numa_topology"]
|
||||
|
||||
missing_keys = [k for k in resource_keys if k not in resources]
|
||||
if missing_keys:
|
||||
|
@ -109,6 +109,7 @@ class FakeVirtDriver(driver.ComputeDriver):
|
||||
'hypervisor_version': 0,
|
||||
'hypervisor_hostname': 'fakehost',
|
||||
'cpu_info': '',
|
||||
'numa_topology': None,
|
||||
}
|
||||
if self.pci_support:
|
||||
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
|
||||
|
@ -9511,6 +9511,11 @@ class HostStateTestCase(test.TestCase):
|
||||
"vendor_id": '8086',
|
||||
"dev_type": 'type-PF',
|
||||
"phys_function": None}]
|
||||
numa_topology = hardware.VirtNUMAHostTopology(
|
||||
cells=[hardware.VirtNUMATopologyCellUsage(
|
||||
1, set([1, 2]), 1024),
|
||||
hardware.VirtNUMATopologyCellUsage(
|
||||
2, set([3, 4]), 1024)])
|
||||
|
||||
class FakeConnection(object):
|
||||
"""Fake connection object."""
|
||||
@ -9558,6 +9563,9 @@ class HostStateTestCase(test.TestCase):
|
||||
def _get_pci_passthrough_devices(self):
|
||||
return jsonutils.dumps(HostStateTestCase.pci_devices)
|
||||
|
||||
def _get_host_numa_topology(self):
|
||||
return HostStateTestCase.numa_topology
|
||||
|
||||
def test_update_status(self):
|
||||
hs = libvirt_driver.HostState(self.FakeConnection())
|
||||
stats = hs._stats
|
||||
@ -9581,6 +9589,10 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertEqual(stats["disk_available_least"], 80)
|
||||
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
|
||||
HostStateTestCase.pci_devices)
|
||||
self.assertThat(hardware.VirtNUMAHostTopology.from_json(
|
||||
stats['numa_topology'])._to_dict(),
|
||||
matchers.DictMatches(
|
||||
HostStateTestCase.numa_topology._to_dict()))
|
||||
|
||||
|
||||
class NWFilterFakes:
|
||||
|
@ -395,7 +395,8 @@ class FakeDriver(driver.ComputeDriver):
|
||||
'hypervisor_hostname': nodename,
|
||||
'disk_available_least': 0,
|
||||
'cpu_info': '?',
|
||||
'supported_instances': jsonutils.dumps([(None, 'fake', None)])
|
||||
'supported_instances': jsonutils.dumps([(None, 'fake', None)]),
|
||||
'numa_topology': None,
|
||||
}
|
||||
return dic
|
||||
|
||||
|
@ -131,7 +131,8 @@ class HostOps(object):
|
||||
'cpu_info': jsonutils.dumps(cpu_info),
|
||||
'supported_instances': jsonutils.dumps(
|
||||
[(arch.I686, 'hyperv', 'hvm'),
|
||||
(arch.X86_64, 'hyperv', 'hvm')])
|
||||
(arch.X86_64, 'hyperv', 'hvm')]),
|
||||
'numa_topology': None,
|
||||
}
|
||||
|
||||
return dic
|
||||
|
@ -6103,6 +6103,12 @@ class HostState(object):
|
||||
data['pci_passthrough_devices'] = \
|
||||
self.driver._get_pci_passthrough_devices()
|
||||
|
||||
numa_topology = self.driver._get_host_numa_topology()
|
||||
if numa_topology:
|
||||
data['numa_topology'] = numa_topology.to_json()
|
||||
else:
|
||||
data['numa_topology'] = None
|
||||
|
||||
self._stats = data
|
||||
|
||||
return data
|
||||
|
@ -385,6 +385,7 @@ class VMwareVCDriver(driver.ComputeDriver):
|
||||
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
|
||||
'supported_instances': jsonutils.dumps(
|
||||
host_stats['supported_instances']),
|
||||
'numa_topology': None,
|
||||
}
|
||||
|
||||
def get_available_resource(self, nodename):
|
||||
|
@ -465,7 +465,8 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||
'supported_instances': jsonutils.dumps(
|
||||
host_stats['supported_instances']),
|
||||
'pci_passthrough_devices': jsonutils.dumps(
|
||||
host_stats['pci_passthrough_devices'])}
|
||||
host_stats['pci_passthrough_devices']),
|
||||
'numa_topology': None}
|
||||
|
||||
return dic
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user