Fix some dedicated CPU assumptions

Set our dedicated-per-numa value to 3 in the job to leave some room
for excluding cpu0 (which is special) in the next patch. Also try to
determine the appropriate number of dedicated cpus per test server
for cpu pinning by choosing half the minimum available amount on any
compute node.

Change-Id: If0bfd74f7ea9c3cfc7c2f21a939445ccb09e501d
This commit is contained in:
Dan Smith 2023-10-10 12:11:29 -07:00
parent 9fd80a8b5a
commit 746a981d60
2 changed files with 25 additions and 16 deletions

View File

@ -58,6 +58,7 @@
WHITEBOX_CPU_MODEL: Nehalem
WHITEBOX_CPU_MODEL_EXTRA_FLAGS: vme,+ssse3,-mmx
WHITEBOX_CPU_TOPOLOGY: "0: [0,1,2,3,4,5,6,7]"
WHITEBOX_DEDICATED_CPUS_PER_NUMA: 3
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@ -70,7 +71,7 @@
key_manager:
backend: barbican
compute:
cpu_dedicated_set: '0-3'
cpu_dedicated_set: '1-3'
cpu_shared_set: '4,5'
max_disk_devices_to_attach: '7'
libvirt:
@ -94,7 +95,7 @@
key_manager:
backend: barbican
compute:
cpu_dedicated_set: '4-7'
cpu_dedicated_set: '4-6'
cpu_shared_set: '2,3'
max_disk_devices_to_attach: '7'
libvirt:

View File

@ -124,11 +124,19 @@ class BasePinningTest(base.BaseWhiteboxComputeTest,
class CPUPolicyTest(BasePinningTest):
"""Validate CPU policy support."""
vcpus = 2
def setUp(self):
super().setUp()
self.dedicated_vcpus = (
CONF.whitebox_hardware.dedicated_cpus_per_numa *
len(CONF.whitebox_hardware.cpu_topology)) // 2
self.shared_vcpus = (
CONF.whitebox_hardware.shared_cpus_per_numa *
len(CONF.whitebox_hardware.cpu_topology)) // 2
def test_cpu_shared(self):
"""Ensure an instance with an explicit 'shared' policy work."""
flavor = self.create_flavor(vcpus=self.vcpus,
flavor = self.create_flavor(vcpus=self.shared_vcpus,
extra_specs=self.shared_cpu_policy)
self.create_test_server(flavor=flavor['id'], wait_until='ACTIVE')
@ -139,7 +147,7 @@ class CPUPolicyTest(BasePinningTest):
default. However, we check specifics of that later and only assert that
things aren't overlapping here.
"""
flavor = self.create_flavor(vcpus=self.vcpus,
flavor = self.create_flavor(vcpus=self.dedicated_vcpus,
extra_specs=self.dedicated_cpu_policy)
server_a = self.create_test_server(flavor=flavor['id'],
wait_until='ACTIVE')
@ -150,10 +158,10 @@ class CPUPolicyTest(BasePinningTest):
cpu_pinnings_b = self.get_server_cpu_pinning(server_b['id'])
self.assertEqual(
len(cpu_pinnings_a), self.vcpus,
len(cpu_pinnings_a), self.dedicated_vcpus,
"Instance should be pinned but it is unpinned")
self.assertEqual(
len(cpu_pinnings_b), self.vcpus,
len(cpu_pinnings_b), self.dedicated_vcpus,
"Instance should be pinned but it is unpinned")
self.assertTrue(
@ -167,17 +175,17 @@ class CPUPolicyTest(BasePinningTest):
'Resize not available.')
def test_resize_pinned_server_to_unpinned(self):
"""Ensure resizing an instance to unpinned actually drops pinning."""
flavor_a = self.create_flavor(vcpus=self.vcpus,
flavor_a = self.create_flavor(vcpus=self.dedicated_vcpus,
extra_specs=self.dedicated_cpu_policy)
server = self.create_test_server(flavor=flavor_a['id'],
wait_until='ACTIVE')
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
self.assertEqual(
len(cpu_pinnings), self.vcpus,
len(cpu_pinnings), self.dedicated_vcpus,
"Instance should be pinned but is unpinned")
flavor_b = self.create_flavor(vcpus=self.vcpus,
flavor_b = self.create_flavor(vcpus=self.shared_vcpus,
extra_specs=self.shared_cpu_policy)
self.resize_server(server['id'], flavor_b['id'])
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
@ -190,7 +198,7 @@ class CPUPolicyTest(BasePinningTest):
'Resize not available.')
def test_resize_unpinned_server_to_pinned(self):
"""Ensure resizing an instance to pinned actually applies pinning."""
flavor_a = self.create_flavor(vcpus=self.vcpus,
flavor_a = self.create_flavor(vcpus=self.shared_vcpus,
extra_specs=self.shared_cpu_policy)
server = self.create_test_server(flavor=flavor_a['id'],
wait_until='ACTIVE')
@ -200,25 +208,25 @@ class CPUPolicyTest(BasePinningTest):
len(cpu_pinnings), 0,
"Instance should be unpinned but is pinned")
flavor_b = self.create_flavor(vcpus=self.vcpus,
flavor_b = self.create_flavor(vcpus=self.dedicated_vcpus,
extra_specs=self.dedicated_cpu_policy)
self.resize_server(server['id'], flavor_b['id'])
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
self.assertEqual(
len(cpu_pinnings), self.vcpus,
len(cpu_pinnings), self.dedicated_vcpus,
"Resized instance should be pinned but is still unpinned")
def test_reboot_pinned_server(self):
"""Ensure pinning information is persisted after a reboot."""
flavor = self.create_flavor(vcpus=self.vcpus,
flavor = self.create_flavor(vcpus=self.dedicated_vcpus,
extra_specs=self.dedicated_cpu_policy)
server = self.create_test_server(flavor=flavor['id'],
wait_until='ACTIVE')
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
self.assertEqual(
len(cpu_pinnings), self.vcpus,
len(cpu_pinnings), self.dedicated_vcpus,
"CPU pinning was not applied to new instance.")
self.reboot_server(server['id'], 'HARD')
@ -228,7 +236,7 @@ class CPUPolicyTest(BasePinningTest):
# because that's not expected. We just care that _some_ pinning is in
# effect
self.assertEqual(
len(cpu_pinnings), self.vcpus,
len(cpu_pinnings), self.dedicated_vcpus,
"Rebooted instance has lost its pinning information")