libvirt: start tracking NUMACell.socket for hosts

This patch adds a `socket` field to NUMACell, and the libvirt driver
starts populating it. For testing, we need to fix how fakelibvirt's
HostInfo handled sockets: it previously assumed one or more sockets
within a NUMA node, but we want the reverse - one or more NUMA nodes
within a socket.

Implements: blueprint pci-socket-affinity
Change-Id: Ie4deb265f6093558ab86dc69f6ffab9da62ca15d
This commit is contained in:
Artom Lifshitz 2020-12-10 13:45:49 -05:00
parent ede0147abd
commit 95b9481aa4
8 changed files with 59 additions and 27 deletions

View File

@ -28,7 +28,8 @@ class NUMACell(base.NovaObject):
# Version 1.2: Added mempages field
# Version 1.3: Add network_metadata field
# Version 1.4: Add pcpuset
VERSION = '1.4'
# Version 1.5: Add socket
VERSION = '1.5'
fields = {
'id': obj_fields.IntegerField(read_only=True),
@ -41,11 +42,14 @@ class NUMACell(base.NovaObject):
'siblings': obj_fields.ListOfSetsOfIntegersField(),
'mempages': obj_fields.ListOfObjectsField('NUMAPagesTopology'),
'network_metadata': obj_fields.ObjectField('NetworkMetadata'),
'socket': obj_fields.IntegerField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
super(NUMACell, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 5):
primitive.pop('socket', None)
if target_version < (1, 4):
primitive.pop('pcpuset', None)
if target_version < (1, 3):

View File

@ -118,12 +118,12 @@ class NUMALiveMigrationPositiveBase(NUMALiveMigrationBase):
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=4, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1,
kB_mem=10740000))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=4, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1,
kB_mem=10740000))
# Create a 2-CPU flavor
@ -471,12 +471,12 @@ class NUMALiveMigrationLegacyBase(NUMALiveMigrationPositiveBase):
self.start_compute(
hostname='source',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
kB_mem=10740000))
self.start_compute(
hostname='dest',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
kB_mem=10740000))
ctxt = context.get_admin_context()
@ -597,12 +597,12 @@ class NUMALiveMigrationNegativeTests(NUMALiveMigrationBase):
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=3, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=3, cpu_threads=1,
kB_mem=10740000))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=2, cpu_sockets=2, cpu_cores=1, cpu_threads=1,
cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
kB_mem=10740000))
extra_spec = {'hw:numa_nodes': 1,
@ -638,14 +638,14 @@ class NUMALiveMigrationNegativeTests(NUMALiveMigrationBase):
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
kB_mem=1024000, mempages={
0: fakelibvirt.create_mempages([(4, 256000), (1024, 1000)])
}))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=2, cpu_cores=1, cpu_threads=1,
cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1,
kB_mem=1024000, mempages={
0: fakelibvirt.create_mempages([(4, 256000), (2048, 500)]),
}))

View File

@ -342,6 +342,7 @@ class _TestNUMACell(object):
physnets=set(['foo', 'bar']), tunneled=True)
cell = objects.NUMACell(
id=0,
socket=0,
cpuset=set([1, 2]),
pcpuset=set([3, 4]),
memory=32,
@ -351,10 +352,14 @@ class _TestNUMACell(object):
network_metadata=network_metadata)
versions = ovo_base.obj_tree_get_versions('NUMACell')
primitive = cell.obj_to_primitive(target_version='1.5',
version_manifest=versions)
self.assertIn('socket', primitive['nova_object.data'])
primitive = cell.obj_to_primitive(target_version='1.4',
version_manifest=versions)
self.assertIn('pcpuset', primitive['nova_object.data'])
self.assertNotIn('socket', primitive['nova_object.data'])
primitive = cell.obj_to_primitive(target_version='1.3',
version_manifest=versions)

View File

@ -1105,7 +1105,7 @@ object_data = {
'MigrationList': '1.5-36793f8d65bae421bd5564d09a4de7be',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NUMACell': '1.4-7695303e820fa855d76954be2eb2680e',
'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',

View File

@ -608,15 +608,16 @@ class NUMATopology(vconfig.LibvirtConfigCapsNUMATopology):
super(NUMATopology, self).__init__(**kwargs)
cpu_count = 0
for cell_count in range(cpu_nodes):
cell = vconfig.LibvirtConfigCapsNUMACell()
cell.id = cell_count
cell.memory = kb_mem // cpu_nodes
for socket_count in range(cpu_sockets):
cell_count = 0
for socket_count in range(cpu_sockets):
for cell_num in range(cpu_nodes):
cell = vconfig.LibvirtConfigCapsNUMACell()
cell.id = cell_count
cell.memory = kb_mem // (cpu_nodes * cpu_sockets)
for cpu_num in range(cpu_cores * cpu_threads):
cpu = vconfig.LibvirtConfigCapsNUMACPU()
cpu.id = cpu_count
cpu.socket_id = cell_count
cpu.socket_id = socket_count
cpu.core_id = cpu_num // cpu_threads
cpu.siblings = set([cpu_threads *
(cpu_count // cpu_threads) + thread
@ -625,13 +626,15 @@ class NUMATopology(vconfig.LibvirtConfigCapsNUMATopology):
cpu_count += 1
# If no mempages are provided, use only the default 4K pages
if mempages:
cell.mempages = mempages[cell_count]
else:
cell.mempages = create_mempages([(4, cell.memory // 4)])
# If no mempages are provided, use only the default 4K pages
if mempages:
cell.mempages = mempages[cell_count]
else:
cell.mempages = create_mempages([(4, cell.memory // 4)])
self.cells.append(cell)
self.cells.append(cell)
cell_count += 1
def create_mempages(mappings):

View File

@ -17309,8 +17309,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, 'has_min_version',
new=mock.Mock(return_value=True))
def _test_get_host_numa_topology(self):
nodes = 4
sockets = 1
nodes = 1
sockets = 4
cores = 1
threads = 2
total_cores = nodes * sockets * cores * threads
@ -17352,6 +17352,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
# Each cell should be in its own socket
self.assertEqual(0, got_topo.cells[0].socket)
self.assertEqual(1, got_topo.cells[1].socket)
self.assertEqual(2, got_topo.cells[2].socket)
self.assertEqual(3, got_topo.cells[3].socket)
# return to caller for further checks
return got_topo

View File

@ -390,10 +390,9 @@ class FakeLibvirtTests(test.NoDBTestCase):
</topology>
"""
host_topology = libvirt.NUMATopology(
cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
cpu_nodes=1, cpu_sockets=2, cpu_cores=2, cpu_threads=2,
kb_mem=15740000)
self.assertEqual(host_topology.to_xml(),
topology)
self.assertEqual(topology, host_topology.to_xml())
def test_pci_devices_generation(self):
def _cmp_pci_dev_addr(dev_xml, cmp_addr):

View File

@ -7575,6 +7575,20 @@ class LibvirtDriver(driver.ComputeDriver):
for cell in topology.cells:
cpus = set(cpu.id for cpu in cell.cpus)
# NOTE(artom) We assume we'll never see hardware with multipe
# sockets in a single NUMA node - IOW, the socket_id for all CPUs
# in a single cell will be the same. To make that assumption
# explicit, we leave the cell's socket_id as None if that's the
# case.
socket_id = None
sockets = set([cpu.socket_id for cpu in cell.cpus])
if len(sockets) == 1:
socket_id = sockets.pop()
else:
LOG.warning('This host appears to have multiple sockets per '
'NUMA node. The `socket` PCI NUMA affinity '
'will not be supported.')
cpuset = cpus & available_shared_cpus
pcpuset = cpus & available_dedicated_cpus
@ -7609,6 +7623,7 @@ class LibvirtDriver(driver.ComputeDriver):
# loops through all instances and calculated usage accordingly
cell = objects.NUMACell(
id=cell.id,
socket=socket_id,
cpuset=cpuset,
pcpuset=pcpuset,
memory=cell.memory / units.Ki,