objects: Introduce 'pcpuset' field for InstanceNUMACell

Introduce the 'pcpuset' to 'InstanceNUMACell' object to track the
instance pinned CPUs. The 'InstanceNUMACell.cpuset' is switched to
keep the instance unpinned CPUs only. As a result, the vCPUs of a
dedicated instance is tracked in NUMA cell object's 'pcpuset', and
vCPUs of a shared instance is put into the 'cpuset' field.

This introduces some object data migration task for an existing instance
that is in the 'dedicated' CPU allocation policy with the fact that all
the CPUs are 1:1 pinned with host CPUs, and it requires to clear the
content of 'InstanceNUMACell.cpuset' and move it to
'InstanceNUMACell.pcpuset' field.

Part of blueprint use-pcpu-and-vcpu-in-one-instance

Change-Id: I901fbd7df00e45196395ff4c69e7b8aa3359edf6
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
Signed-off-by: Wang Huaqiang <huaqiang.wang@intel.com>
This commit is contained in:
Wang Huaqiang 2020-05-11 11:09:00 +08:00
parent 55ff751775
commit 867d447101
22 changed files with 732 additions and 407 deletions

View File

@ -53,7 +53,7 @@ class ServerTopologyController(wsgi.Controller):
for cell_ in instance.numa_topology.cells:
cell = {}
cell['vcpu_set'] = cell_.cpuset
cell['vcpu_set'] = cell_.total_cpus
cell['siblings'] = cell_.siblings
cell['memory_mb'] = cell_.memory

View File

@ -33,12 +33,23 @@ class InstanceNUMACell(base.NovaEphemeralObject,
# Version 1.2: Add cpu_pinning_raw and topology fields
# Version 1.3: Add cpu_policy and cpu_thread_policy fields
# Version 1.4: Add cpuset_reserved field
VERSION = '1.4'
# Version 1.5: Add pcpuset field
VERSION = '1.5'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMACell, self).obj_make_compatible(primitive,
target_version)
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# NOTE(huaqiang): Since version 1.5, 'cpuset' is modified to track the
# unpinned CPUs only, with pinned CPUs tracked via 'pcpuset' instead.
# For a backward compatibility, move the 'dedicated' instance CPU list
# from 'pcpuset' to 'cpuset'.
if target_version < (1, 5):
if (primitive['cpu_policy'] ==
obj_fields.CPUAllocationPolicy.DEDICATED):
primitive['cpuset'] = primitive['pcpuset']
primitive.pop('pcpuset', None)
if target_version < (1, 4):
primitive.pop('cpuset_reserved', None)
@ -49,6 +60,10 @@ class InstanceNUMACell(base.NovaEphemeralObject,
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'pcpuset': obj_fields.SetOfIntegersField(),
# These physical CPUs are reserved for use by the hypervisor
'cpuset_reserved': obj_fields.SetOfIntegersField(nullable=True,
default=None),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True,
default=None),
@ -60,19 +75,20 @@ class InstanceNUMACell(base.NovaEphemeralObject,
default=None),
'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField(
nullable=True, default=None),
# These physical CPUs are reserved for use by the hypervisor
'cpuset_reserved': obj_fields.SetOfIntegersField(nullable=True,
default=None),
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __len__(self):
return len(self.cpuset)
return len(self.total_cpus)
@property
def total_cpus(self):
return self.cpuset | self.pcpuset
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
cpu_list = sorted(list(self.total_cpus))
threads = 0
if ('cpu_topology' in self) and self.cpu_topology:
@ -83,7 +99,7 @@ class InstanceNUMACell(base.NovaEphemeralObject,
return list(map(set, zip(*[iter(cpu_list)] * threads)))
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
if vcpu not in self.pcpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
@ -115,7 +131,7 @@ class InstanceNUMATopology(base.NovaObject,
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMATopology, self).obj_make_compatible(primitive,
target_version)
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 3):
primitive.pop('emulator_threads_policy', None)
@ -136,11 +152,43 @@ class InstanceNUMATopology(base.NovaObject,
if 'nova_object.name' in primitive:
obj = cls.obj_from_primitive(primitive)
cls._migrate_legacy_dedicated_instance_cpuset(
context, instance_uuid, obj)
else:
obj = cls._migrate_legacy_object(context, instance_uuid, primitive)
return obj
# TODO(huaqiang): Remove after Wallaby once we are sure these objects have
# been loaded at least once.
@classmethod
def _migrate_legacy_dedicated_instance_cpuset(cls, context, instance_uuid,
obj):
# NOTE(huaqiang): We may meet some topology object with the old version
# 'InstanceNUMACell' cells, in that case, the 'dedicated' CPU is kept
# in 'InstanceNUMACell.cpuset' field, but it should be kept in
# 'InstanceNUMACell.pcpuset' field since Victoria. Making an upgrade
# and persisting to database.
update_db = False
for cell in obj.cells:
if len(cell.cpuset) == 0:
continue
if cell.cpu_policy != obj_fields.CPUAllocationPolicy.DEDICATED:
continue
cell.pcpuset = cell.cpuset
cell.cpuset = set()
update_db = True
if update_db:
db_obj = jsonutils.dumps(obj.obj_to_primitive())
values = {
'numa_topology': db_obj,
}
db.instance_extra_update_by_uuid(context, instance_uuid,
values)
# TODO(stephenfin): Remove in X or later, once this has bedded in
@classmethod
def _migrate_legacy_object(cls, context, instance_uuid, primitive):
@ -161,6 +209,7 @@ class InstanceNUMATopology(base.NovaObject,
InstanceNUMACell(
id=cell.get('id'),
cpuset=hardware.parse_cpu_spec(cell.get('cpus', '')),
pcpuset=set(),
memory=cell.get('mem', {}).get('total', 0),
pagesize=cell.get('pagesize'),
) for cell in primitive.get('cells', [])

View File

@ -22,12 +22,14 @@ def fake_get_numa():
cell_0 = numa.InstanceNUMACell(node=0, memory=1024, pagesize=4, id=0,
cpu_topology=cpu_topology,
cpu_pinning={0: 0, 1: 5},
cpuset=set([0, 1]))
cpuset=set(),
pcpuset=set([0, 1]))
cell_1 = numa.InstanceNUMACell(node=1, memory=2048, pagesize=4, id=1,
cpu_topology=cpu_topology,
cpu_pinning={2: 1, 3: 8},
cpuset=set([2, 3]))
cpuset=set(),
pcpuset=set([2, 3]))
return numa.InstanceNUMATopology(cells=[cell_0, cell_1])

View File

@ -40,9 +40,8 @@ class ServerTopologyTestV278(test.NoDBTestCase):
def _fake_numa(self, cpu_pinning=None):
ce0 = numa.InstanceNUMACell(node=0, memory=1024, pagesize=4, id=0,
cpu_topology=None,
cpu_pinning=cpu_pinning,
cpuset=set([0, 1]))
cpu_topology=None, cpu_pinning=cpu_pinning,
cpuset=set([0, 1]), pcpuset=set())
return numa.InstanceNUMATopology(cells=[ce0])

View File

@ -186,13 +186,14 @@ class ClaimTestCase(test.NoDBTestCase):
def test_numa_topology_no_limit(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512)])
self._claim(numa_topology=huge_instance)
def test_numa_topology_fails(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)])
id=1, cpuset=set([1, 2, 3, 4, 5]), pcpuset=set(),
memory=2048)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self.assertRaises(exception.ComputeResourcesUnavailable,
@ -203,7 +204,7 @@ class ClaimTestCase(test.NoDBTestCase):
def test_numa_topology_passes(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self._claim(limits={'numa_topology': limit_topo},
@ -230,7 +231,7 @@ class ClaimTestCase(test.NoDBTestCase):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
@ -265,7 +266,7 @@ class ClaimTestCase(test.NoDBTestCase):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512)])
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
@ -294,7 +295,7 @@ class ClaimTestCase(test.NoDBTestCase):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
@ -381,11 +382,12 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
instance_type = self._fake_instance_type()
instance = self._fake_instance()
instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=1, cpuset=set([1, 2]),
memory=512, pagesize=2)])
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]),
pcpuset=set(), memory=512, pagesize=2)])
claimed_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=1, cpuset=set([1, 2]),
memory=512, pagesize=1)])
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), pcpuset=set(), memory=512, pagesize=1)])
with mock.patch('nova.virt.hardware.numa_fit_instance_to_host',
return_value=claimed_numa_topology):
self.assertRaisesRegex(
@ -402,8 +404,9 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
# This topology cannot fit in self.compute_node
# (see _fake_compute_node())
numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=1, cpuset=set([1, 2, 3]),
memory=1024)])
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2, 3]), pcpuset=set(),
memory=1024)])
with test.nested(
mock.patch('nova.virt.hardware.numa_get_constraints',
return_value=numa_topology),

View File

@ -5682,7 +5682,8 @@ class ComputeTestCase(BaseTestCase,
old_inst_topology = objects.InstanceNUMATopology(
instance_uuid=instance.uuid, cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=512, pagesize=2048,
id=0, cpuset=set(), pcpuset=set([1, 2]), memory=512,
pagesize=2048,
cpu_policy=obj_fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={'0': 1, '1': 2})
])
@ -5691,7 +5692,8 @@ class ComputeTestCase(BaseTestCase,
new_inst_topology = objects.InstanceNUMATopology(
instance_uuid=instance.uuid, cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), memory=512, pagesize=2048,
id=1, cpuset=set(), pcpuset=set([3, 4]), memory=512,
pagesize=2048,
cpu_policy=obj_fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={'0': 3, '1': 4})
])
@ -8694,9 +8696,10 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_with_numa_topology(self, numa_constraints_mock):
numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=512),
id=0, cpuset=set([1, 2]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), memory=512)])
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=512)])
numa_topology.obj_reset_changes()
numa_constraints_mock.return_value = numa_topology

View File

@ -1692,7 +1692,8 @@ class _ComputeAPIUnitTestMixIn(object):
fake_reqspec.flavor = fake_inst.flavor
fake_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=None,
id=0, cpuset=set([0]), pcpuset=set(), memory=512,
pagesize=None,
cpu_pinning_raw=None, cpuset_reserved=None, cpu_policy=None,
cpu_thread_policy=None)])
@ -1852,7 +1853,8 @@ class _ComputeAPIUnitTestMixIn(object):
fake_inst = self._create_instance_obj(params=params)
fake_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=None,
id=0, cpuset=set([0]), pcpuset=set(), memory=512,
pagesize=None,
cpu_pinning_raw=None, cpuset_reserved=None, cpu_policy=None,
cpu_thread_policy=None)])

View File

@ -1206,14 +1206,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
numa_wo_pinning = test_instance_numa.get_fake_obj_numa_topology(
self.context)
numa_wo_pinning.cells[0].pcpuset = set()
numa_wo_pinning.cells[1].pcpuset = set()
instance_2.numa_topology = numa_wo_pinning
numa_w_pinning = test_instance_numa.get_fake_obj_numa_topology(
self.context)
numa_w_pinning.cells[0].pin_vcpus((1, 10), (2, 11))
numa_w_pinning.cells[0].cpuset = set()
numa_w_pinning.cells[0].cpu_policy = (
fields.CPUAllocationPolicy.DEDICATED)
numa_w_pinning.cells[1].pin_vcpus((3, 0), (4, 1))
numa_w_pinning.cells[1].cpuset = set()
numa_w_pinning.cells[1].cpu_policy = (
fields.CPUAllocationPolicy.DEDICATED)
instance_3.numa_topology = numa_w_pinning

View File

@ -153,9 +153,9 @@ _2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
id=0, cpuset=set([1]), pcpuset=set(), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
id=1, cpuset=set([3]), pcpuset=set(), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {

View File

@ -214,9 +214,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
def test_check_instance_has_no_numa_passes_non_kvm(self, mock_get):
self.flags(enable_numa_live_migration=False, group='workarounds')
self.task.instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([0]),
memory=1024)])
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='xen')
self.task._check_instance_has_no_numa()
@ -227,9 +228,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_check_instance_has_no_numa_passes_workaround(
self, mock_get_min_ver, mock_get):
self.flags(enable_numa_live_migration=True, group='workarounds')
self.task.instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([0]),
memory=1024)])
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task._check_instance_has_no_numa()
@ -243,9 +245,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.flags(enable_numa_live_migration=False, group='workarounds')
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task.instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([0]),
memory=1024)])
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_instance_has_no_numa)
mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute')
@ -258,9 +261,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.flags(enable_numa_live_migration=False, group='workarounds')
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task.instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([0]),
memory=1024)])
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
self.task._check_instance_has_no_numa()
mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute')

View File

@ -20,8 +20,10 @@ from nova.tests.unit import fake_flavor
INSTANCE_NUMA_TOPOLOGY = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=512)])
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1, 2]),
pcpuset=set(), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]),
pcpuset=set(), memory=512)])
INSTANCE_NUMA_TOPOLOGY.obj_reset_changes(recursive=True)
IMAGE_META = objects.ImageMeta.from_dict(

View File

@ -597,8 +597,11 @@ class _TestInstanceObject(object):
def test_save_updates_numa_topology(self, mock_fdo, mock_update,
mock_extra_update):
fake_obj_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128)])
objects.InstanceNUMACell(id=0, cpuset=set([0]), pcpuset=set(),
memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([1]), pcpuset=set(),
memory=128),
])
fake_obj_numa_topology.instance_uuid = uuids.instance
jsonified = fake_obj_numa_topology._to_json()

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
@ -24,12 +25,14 @@ from nova.tests.unit.objects import test_objects
fake_instance_uuid = uuids.fake
fake_obj_numa_topology = objects.InstanceNUMATopology(
instance_uuid = fake_instance_uuid,
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=512, pagesize=2048),
id=0, cpuset=set(), pcpuset=set([1, 2]), memory=512,
pagesize=2048),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), memory=512, pagesize=2048)
id=1, cpuset=set(), pcpuset=set([3, 4]), memory=512,
pagesize=2048),
])
fake_db_topology = {
@ -52,47 +55,95 @@ def get_fake_obj_numa_topology(context):
class _TestInstanceNUMACell(object):
def test_siblings(self):
# default thread number of VirtualCPUTopology is one, one thread means
# no thread and no sibling
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]))
cpuset=set([0, 1, 2]), pcpuset=set())
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), pcpuset=set([4, 5, 6]))
self.assertEqual([], inst_cell.siblings)
# 'threads=0' means no sibling
topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), cpu_topology=topo)
cpuset=set([0, 1, 2]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2]), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
# One thread actually means no threads
topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), cpu_topology=topo)
cpuset=set([0, 1, 2]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2]), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
# 2 threads per virtual core, and numa node has only one type CPU
# pinned and un-pinned.
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), cpu_topology=topo)
cpuset=set([0, 1, 2, 3]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
# 4 threads per virtual core, numa node has only one type CPU
topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), cpu_topology=topo)
cpuset=set([0, 1, 2, 3]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
# 2 threads per virtual core, numa node with two type CPUs, the pinned
# and un-pinned
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1]), pcpuset=set([2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
def test_pin(self):
inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
cpu_pinning=None)
inst_cell = objects.InstanceNUMACell(
cpuset=set([4, 5]), pcpuset=set([0, 1, 2, 3]), cpu_pinning=None)
# Only vCPU in the 'pcpuset' list is eligible for pinning
inst_cell.pin(0, 14)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# vCPU 12 is not a CPU of this cell, drop silently
inst_cell.pin(12, 14)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# vCPU in the 'cpuset' which is for floating CPUs, drop silently
inst_cell.pin(4, 15)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# Another vCPU appeared in 'pcpuset', ready for pinning
inst_cell.pin(1, 16)
self.assertEqual({0: 14, 1: 16}, inst_cell.cpu_pinning)
def test_pin_vcpus(self):
inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
cpu_pinning=None)
inst_cell = objects.InstanceNUMACell(
cpuset=set([4, 5]), pcpuset=set([0, 1, 2, 3]), cpu_pinning=None)
# 'pcpuset' is the vCPU list for pinning
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
# vCPU out of 'pcpuset' list will not be added to the CPU pinning list
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17), (4, 18))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
# vCPU not belonging to this cell will be dropped silently
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17), (10, 18))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
def test_cpu_pinning(self):
topo_obj = get_fake_obj_numa_topology(self.context)
self.assertEqual(set(), topo_obj.cpu_pinning)
@ -124,16 +175,26 @@ class _TestInstanceNUMACell(object):
fields.CPUEmulatorThreadsPolicy.ISOLATE)
self.assertTrue(topo_obj.emulator_threads_isolated)
def test_obj_make_compatible_numa_cell_pre_1_4(self):
def test_obj_make_compatible(self):
topo_obj = objects.InstanceNUMACell(
cpuset_reserved=set([1, 2]))
cpuset=set(), pcpuset=set([0, 1]),
cpuset_reserved=set([1, 2]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)
versions = ovo_base.obj_tree_get_versions('InstanceNUMACell')
data = lambda x: x['nova_object.data']
primitive = data(topo_obj.obj_to_primitive(target_version='1.4',
version_manifest=versions))
primitive = data(topo_obj.obj_to_primitive(
target_version='1.5', version_manifest=versions))
self.assertIn('pcpuset', primitive)
primitive = data(topo_obj.obj_to_primitive(
target_version='1.4', version_manifest=versions))
self.assertNotIn('pcpuset', primitive)
self.assertEqual(set([0, 1]), set(primitive['cpuset']))
self.assertIn('cpuset_reserved', primitive)
primitive = data(topo_obj.obj_to_primitive(target_version='1.3',
version_manifest=versions))
primitive = data(topo_obj.obj_to_primitive(
target_version='1.3', version_manifest=versions))
self.assertNotIn('cpuset_reserved', primitive)
@ -191,11 +252,13 @@ class _TestInstanceNUMATopology(object):
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
@ -211,11 +274,13 @@ class _TestInstanceNUMATopology(object):
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=None,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=fields.CPUAllocationPolicy.SHARED
),
@ -230,16 +295,16 @@ class _TestInstanceNUMATopology(object):
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=512, pagesize=2048,
cpuset_reserved=set([3, 7])),
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=512,
pagesize=2048, cpuset_reserved=set([3, 7])),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), memory=512, pagesize=2048,
cpuset_reserved=set([9, 12]))
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=512,
pagesize=2048, cpuset_reserved=set([9, 12]))
])
self.assertEqual(set([3, 7]), topology.cells[0].cpuset_reserved)
self.assertEqual(set([9, 12]), topology.cells[1].cpuset_reserved)
def test_obj_make_compatible_numa_pre_1_3(self):
def test_obj_make_compatible_numa(self):
topo_obj = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.ISOLATE))
@ -252,6 +317,41 @@ class _TestInstanceNUMATopology(object):
topo_obj = objects.InstanceNUMATopology.obj_from_primitive(primitive)
self.assertFalse(topo_obj.emulator_threads_isolated)
def test_obj_from_db_obj(self):
"""Test of creating 'InstanceNUMATopology' OVO object from the
database primitives, which has an old version 'InstanceNUMACell'
primitives.
Prior to version 1.5, 'InstanceNUMACell' saves the instance CPUs in the
'cpuset' field, for both the pinned CPUs of a dedicated and the
un-pinned CPUs of a shared instances, after version 1.5, any pinned
CPUs of dedicated instance are moved to 'pcpuset'. this test verifies
the CPU movement for instance with a 'dedicated' allocation policy.
"""
fake_topo_obj_w_cell_v1_4 = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=512,
pagesize=2048),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=512,
pagesize=2048),
])
fake_topo_obj = copy.deepcopy(fake_topo_obj_w_cell_v1_4)
for cell in fake_topo_obj.cells:
cell.cpu_policy = objects.fields.CPUAllocationPolicy.DEDICATED
numa_topology = objects.InstanceNUMATopology.obj_from_db_obj(
self.context, fake_instance_uuid, fake_topo_obj._to_json())
for obj_cell, topo_cell in zip(
numa_topology.cells,
fake_topo_obj_w_cell_v1_4['cells']):
self.assertEqual(set(), obj_cell.cpuset)
self.assertEqual(topo_cell.cpuset, obj_cell.pcpuset)
class TestInstanceNUMATopology(
test_objects._LocalTest, _TestInstanceNUMATopology,

View File

@ -1093,7 +1093,7 @@ object_data = {
'InstanceList': '2.6-238f125650c25d6d12722340d726f723',
'InstanceMapping': '1.2-3bd375e65c8eb9c45498d2f87b882e03',
'InstanceMappingList': '1.3-d34b6ebb076d542ae0f8b440534118da',
'InstanceNUMACell': '1.4-b68e13eacba363ae8f196abf0ffffb5b',
'InstanceNUMACell': '1.5-d6f884326eba8cae60930e06047fc7d9',
'InstanceNUMATopology': '1.3-ec0030cb0402a49c96da7051c037082a',
'InstancePCIRequest': '1.3-f6d324f1c337fad4f34892ed5f484c9a',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',

View File

@ -174,16 +174,26 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
set([1, 2]))
def test_support_requests_numa(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0),
objects.InstanceNUMACell(id=1, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
def test_support_requests_numa_failed(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
def test_support_requests_no_numa_info(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
@ -197,7 +207,10 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
# numa node 1 has 1 device with vendor_id 'v2'
# we request two devices with vendor_id 'v1' and 'v2'.
# pci_numa_policy is 'preferred' so we can ignore numa affinity
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
@ -206,7 +219,10 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
# pci_numa_policy is 'required' so we can't use this device
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
@ -227,21 +243,31 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests_multiple))
def test_consume_requests_numa(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0),
objects.InstanceNUMACell(id=1, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(2, len(devs))
self.assertEqual(set(['v1', 'v2']),
set([dev.vendor_id for dev in devs]))
def test_consume_requests_numa_failed(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
self.assertIsNone(self.pci_stats.consume_requests(pci_requests, cells))
def test_consume_requests_no_numa_info(self):
cells = [objects.InstanceNUMACell(id=0, cpuset=set(), memory=0)]
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_request = [objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v3'}])]
spec=[{'vendor_id': 'v3'}])]
devs = self.pci_stats.consume_requests(pci_request, cells)
self.assertEqual(1, len(devs))
self.assertEqual(set(['v3']),
@ -258,8 +284,10 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
``expected``.
"""
self._add_fake_devs_with_numa()
cells = [objects.InstanceNUMACell(id=id, cpuset=set(), memory=0)
for id in cell_ids]
cells = [
objects.InstanceNUMACell(
id=id, cpuset=set(), pcpuset=set(), memory=0)
for id in cell_ids]
pci_requests = self._get_fake_requests(vendor_ids=[vendor_id],
numa_policy=policy, count=count)

View File

@ -47,7 +47,7 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
node=0, memory=1024, pagesize=4, id=123,
cpu_topology=None,
cpu_pinning={},
cpuset=set([0, 1]))])
cpuset=set([0, 1]), pcpuset=set())])
# Check that system reader or and server owner is able to get
# the server topology.

View File

@ -42,10 +42,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
return spec_obj
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), pcpuset=set(),
memory=512),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -55,10 +57,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), pcpuset=set(),
memory=512),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
@ -71,11 +75,14 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), pcpuset=set(),
memory=512),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -85,11 +92,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_memory(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), pcpuset=set(),
memory=512),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -99,10 +107,11 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_cpu(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
pcpuset=set(), memory=512)])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -112,10 +121,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_pass_set_limit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), pcpuset=set(),
memory=512),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -132,14 +143,16 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([1]),
cpuset=set(),
pcpuset=set([1]),
memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
),
objects.InstanceNUMACell(
id=1,
cpuset=set([3]),
cpuset=set(),
pcpuset=set([3]),
memory=512,
cpu_policy=cpu_policy,
cpu_thread_policy=cpu_thread_policy,
@ -213,11 +226,11 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
numa_topology, cpu_policy, cpu_thread_policy, True)
def test_numa_topology_filter_pass_mempages(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]),
memory=128, pagesize=4),
objects.InstanceNUMACell(id=1, cpuset=set([1]),
memory=128, pagesize=16)
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([3]), pcpuset=set(), memory=128, pagesize=4),
objects.InstanceNUMACell(
id=1, cpuset=set([1]), pcpuset=set(), memory=128, pagesize=16),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
@ -228,12 +241,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]),
memory=128, pagesize=8),
objects.InstanceNUMACell(id=1, cpuset=set([1]),
memory=128, pagesize=16)
])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([3]), pcpuset=set(), memory=128, pagesize=8),
objects.InstanceNUMACell(
id=1, cpuset=set([1]), pcpuset=set(), memory=128, pagesize=16),
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
@ -280,8 +293,11 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
host = self._get_fake_host_state_with_networks()
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)])
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), pcpuset=set(),
memory=512),
])
network_metadata = objects.NetworkMetadata(
physnets=set(['foo']), tunneled=False)
@ -301,7 +317,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
host = self._get_fake_host_state_with_networks()
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512)])
objects.InstanceNUMACell(id=0, cpuset=set([1]), pcpuset=set(),
memory=512),
])
# this should fail because the networks are affined to different host
# NUMA nodes but our guest only has a single NUMA node

View File

@ -684,23 +684,27 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_instance, image_meta)
def test_get_instance_vnuma_config_bad_cpuset(self):
cell1 = objects.InstanceNUMACell(cpuset=set([0]), memory=1024)
cell2 = objects.InstanceNUMACell(cpuset=set([1, 2]), memory=1024)
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=1024)
cell2 = objects.InstanceNUMACell(
cpuset=set([1, 2]), pcpuset=set(), memory=1024)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_bad_memory(self):
cell1 = objects.InstanceNUMACell(cpuset=set([0]), memory=1024)
cell2 = objects.InstanceNUMACell(cpuset=set([1]), memory=2048)
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=1024)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), pcpuset=set(), memory=2048)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_cpu_pinning(self):
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), memory=1024,
cpuset=set([0]), pcpuset=set(), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), memory=1024,
cpuset=set([1]), pcpuset=set(), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
@ -720,8 +724,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual(expected_mem_per_numa, result_memory_per_numa)
def test_get_instance_vnuma_config(self):
cell1 = objects.InstanceNUMACell(cpuset=set([0]), memory=2048)
cell2 = objects.InstanceNUMACell(cpuset=set([1]), memory=2048)
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=2048)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), pcpuset=set(), memory=2048)
numa_topology = objects.InstanceNUMATopology(cells=[cell1, cell2])
self._check_get_instance_vnuma_config(numa_topology=numa_topology,
expected_cpus_per_numa=1,

View File

@ -2966,7 +2966,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
pagesize=2048),
])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
@ -2997,7 +2999,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
pagesize=4),
])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
@ -3300,7 +3304,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
pagesize=2048),
])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
@ -3426,10 +3432,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(),
memory=1024, pagesize=pagesize),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3527,11 +3534,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), pcpuset=set(), memory=1024),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3568,7 +3576,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
@ -3578,12 +3586,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), pcpuset=set(), memory=1024,
pagesize=None),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3637,7 +3647,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
@ -3654,11 +3664,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), pcpuset=set(), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), pcpuset=set(), memory=1024),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3711,7 +3722,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
@ -3728,13 +3739,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1}),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3789,7 +3801,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
@ -3808,14 +3820,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(cpu_shared_set='2-5', cpu_dedicated_set=None,
group='compute')
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), pcpuset=set(), memory=1024,
pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), pcpuset=set(), memory=1024,
pagesize=2048),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3857,7 +3869,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
@ -3881,18 +3893,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(cpu_shared_set=None, cpu_dedicated_set='4-7',
group='compute')
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=2, cpuset=set([0, 1]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=3, cpuset=set([2, 3]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 6, 3: 7},
memory=1024, pagesize=2048)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=2, cpuset=set(), pcpuset=set([0, 1]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=3, cpuset=set(), pcpuset=set([2, 3]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 6, 3: 7},
memory=1024, pagesize=2048),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -3939,7 +3951,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.total_cpus, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
@ -3981,16 +3993,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
fields.CPUEmulatorThreadsPolicy.ISOLATE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
id=0, cpuset=set(), pcpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
id=1, cpuset=set(), pcpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 7, 3: 8})])
cpu_pinning={2: 7, 3: 8}),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
@ -4036,16 +4049,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
fields.CPUEmulatorThreadsPolicy.SHARE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
id=0, cpuset=set(), pcpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
id=1, cpuset=set(), pcpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 7, 3: 8})])
cpu_pinning={2: 7, 3: 8}),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
@ -4085,16 +4099,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
fields.CPUEmulatorThreadsPolicy.SHARE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
id=0, cpuset=set(), pcpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 2, 1: 3},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
id=1, cpuset=set(), pcpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 4, 3: 5})])
cpu_pinning={2: 4, 3: 5}),
])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
@ -4133,8 +4148,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=128),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
@ -4158,9 +4175,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=True)
def test_get_memnode_numa_config_from_instance(self, mock_numa):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128)
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=128),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=128),
objects.InstanceNUMACell(
id=16, cpuset=set([5, 6]), pcpuset=set(), memory=128),
])
host_topology = objects.NUMATopology(cells=[
@ -4205,14 +4225,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "get_capabilities")
def test_does_not_want_hugepages(self, mock_caps, mock_numa):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=4)])
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), pcpuset=set(),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), pcpuset=set(),
memory=1024, pagesize=4),
])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
@ -4250,11 +4270,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
id=1, cpuset=set([0, 1]), pcpuset=set(),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
id=2, cpuset=set([2, 3]), pcpuset=set(),
memory=1024, pagesize=2048),
])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
@ -20947,19 +20968,17 @@ class TestUpdateProviderTree(test.NoDBTestCase):
id=1, uuid=uuids.instance_a, **base_instance)
instance_a.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([0, 1]),
memory=1024)])
id=0, cpuset=set([0, 1]), pcpuset=set(), memory=1024),
])
instance_b = objects.Instance(
id=2, uuid=uuids.instance_b, **base_instance)
instance_b.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([0, 1]),
id=0, cpuset=set(), pcpuset=set([0, 1]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 2, 1: 3},
memory=1024)])
cpu_pinning={0: 2, 1: 3}, memory=1024),
])
instance_c = objects.Instance(
id=3, uuid=uuids.instance_c, **base_instance)
@ -20972,11 +20991,10 @@ class TestUpdateProviderTree(test.NoDBTestCase):
id=4, uuid=uuids.instance_d, **base_instance)
instance_d.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([0, 1]),
id=0, cpuset=set(), pcpuset=set([0, 1]),
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 0, 1: 1},
memory=1024)])
cpu_pinning={0: 0, 1: 1}, memory=1024),
])
migration = objects.Migration(
id=42,

File diff suppressed because it is too large Load Diff

View File

@ -916,7 +916,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
pinning = _get_pinning(
1, # we only want to "use" one thread per core
sibling_sets[threads_per_core],
instance_cell.cpuset)
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved,
cpu_thread_isolate=True)
@ -952,7 +952,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
pinning = _get_pinning(
threads_no, sibling_set,
instance_cell.cpuset)
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
if not pinning or (num_cpu_reserved and not cpuset_reserved):
@ -978,7 +978,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
sibling_set = [set([x]) for x in itertools.chain(*sibling_sets[1])]
pinning = _get_pinning(
threads_no, sibling_set,
instance_cell.cpuset)
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
@ -1079,12 +1079,12 @@ def _numa_fit_instance_cell(
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit
# against itself on any NUMA cell
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
required_cpus = len(instance_cell.cpuset) + cpuset_reserved
required_cpus = len(instance_cell.pcpuset) + cpuset_reserved
if required_cpus > len(host_cell.pcpuset):
LOG.debug('Not enough host cell CPUs to fit instance cell; '
'required: %(required)d + %(cpuset_reserved)d as '
'overhead, actual: %(actual)d', {
'required': len(instance_cell.cpuset),
'required': len(instance_cell.pcpuset),
'actual': len(host_cell.pcpuset),
'cpuset_reserved': cpuset_reserved
})
@ -1101,14 +1101,14 @@ def _numa_fit_instance_cell(
if instance_cell.cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
LOG.debug('Pinning has been requested')
required_cpus = len(instance_cell.cpuset) + cpuset_reserved
required_cpus = len(instance_cell.pcpuset) + cpuset_reserved
if required_cpus > host_cell.avail_pcpus:
LOG.debug('Not enough available CPUs to schedule instance. '
'Oversubscription is not possible with pinned '
'instances. Required: %(required)d (%(vcpus)d + '
'%(num_cpu_reserved)d), actual: %(actual)d',
{'required': required_cpus,
'vcpus': len(instance_cell.cpuset),
'vcpus': len(instance_cell.pcpuset),
'actual': host_cell.avail_pcpus,
'num_cpu_reserved': cpuset_reserved})
return None
@ -1562,6 +1562,8 @@ def get_cpu_thread_policy_constraint(
def _get_numa_topology_auto(
nodes: int,
flavor: 'objects.Flavor',
vcpus: ty.Set[int],
pcpus: ty.Set[int],
) -> 'objects.InstanceNUMATopology':
"""Generate a NUMA topology automatically based on CPUs and memory.
@ -1571,6 +1573,8 @@ def _get_numa_topology_auto(
:param nodes: The number of nodes required in the generated topology.
:param flavor: The flavor used for the instance, from which to extract the
CPU and memory count.
:param vcpus: A set of IDs for CPUs that should be shared.
:param pcpus: A set of IDs for CPUs that should be dedicated.
"""
if (flavor.vcpus % nodes) > 0 or (flavor.memory_mb % nodes) > 0:
raise exception.ImageNUMATopologyAsymmetric()
@ -1580,10 +1584,10 @@ def _get_numa_topology_auto(
ncpus = int(flavor.vcpus / nodes)
mem = int(flavor.memory_mb / nodes)
start = node * ncpus
cpuset = set(range(start, start + ncpus))
cpus = set(range(start, start + ncpus))
cells.append(objects.InstanceNUMACell(
id=node, cpuset=cpuset, memory=mem))
id=node, cpuset=cpus & vcpus, pcpuset=cpus & pcpus, memory=mem))
return objects.InstanceNUMATopology(cells=cells)
@ -1591,6 +1595,8 @@ def _get_numa_topology_auto(
def _get_numa_topology_manual(
nodes: int,
flavor: 'objects.Flavor',
vcpus: ty.Set[int],
pcpus: ty.Set[int],
cpu_list: ty.List[ty.Set[int]],
mem_list: ty.List[int],
) -> 'objects.InstanceNUMATopology':
@ -1599,6 +1605,8 @@ def _get_numa_topology_manual(
:param nodes: The number of nodes required in the generated topology.
:param flavor: The flavor used for the instance, from which to extract the
CPU and memory count.
:param vcpus: A set of IDs for CPUs that should be shared.
:param pcpus: A set of IDs for CPUs that should be dedicated.
:param cpu_list: A list of sets of ints; each set in the list corresponds
to the set of guest cores to assign to NUMA node $index.
:param mem_list: A list of ints; each int corresponds to the amount of
@ -1612,9 +1620,9 @@ def _get_numa_topology_manual(
for node in range(nodes):
mem = mem_list[node]
cpuset = cpu_list[node]
cpus = cpu_list[node]
for cpu in cpuset:
for cpu in cpus:
if cpu > (flavor.vcpus - 1):
raise exception.ImageNUMATopologyCPUOutOfRange(
cpunum=cpu, cpumax=(flavor.vcpus - 1))
@ -1626,7 +1634,7 @@ def _get_numa_topology_manual(
availcpus.remove(cpu)
cells.append(objects.InstanceNUMACell(
id=node, cpuset=cpuset, memory=mem))
id=node, cpuset=cpus & vcpus, pcpuset=cpus & pcpus, memory=mem))
totalmem = totalmem + mem
if availcpus:
@ -1913,20 +1921,30 @@ def numa_get_constraints(flavor, image_meta):
if nodes or pagesize or vpmems or cpu_policy in (
fields.CPUAllocationPolicy.DEDICATED,
):
nodes = nodes or 1
# NOTE(huaqiang): Here we build the instance dedicated CPU set and the
# shared CPU set, through 'pcpus' and 'vcpus' respectively,
# which will be used later to calculate the per-NUMA-cell CPU set.
cpus = set(range(flavor.vcpus))
pcpus = set()
if cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
pcpus = cpus
vcpus = cpus - pcpus
nodes = nodes or 1
cpu_list = _get_numa_cpu_constraint(flavor, image_meta)
mem_list = _get_numa_mem_constraint(flavor, image_meta)
if cpu_list is None and mem_list is None:
numa_topology = _get_numa_topology_auto(nodes, flavor)
numa_topology = _get_numa_topology_auto(
nodes, flavor, vcpus, pcpus,
)
elif cpu_list is not None and mem_list is not None:
# If any node has data set, all nodes must have data set
if len(cpu_list) != nodes or len(mem_list) != nodes:
raise exception.ImageNUMATopologyIncomplete()
numa_topology = _get_numa_topology_manual(
nodes, flavor, cpu_list, mem_list
nodes, flavor, vcpus, pcpus, cpu_list, mem_list
)
else:
# If one property list is specified both must be

View File

@ -4918,7 +4918,7 @@ class LibvirtDriver(driver.ComputeDriver):
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.cpus = instance_cell.total_cpus
guest_cell.memory = instance_cell.memory * units.Ki
# The vhost-user network backend requires file backed