Allow CPU profiles with isolated cores

Update CPU profile rules to work with the constraints for isolated
cores. Use the same validation for CPU profiles and general CPU
modification.

Update the host_cpus_modify functionality for clarity. Validate and
apply all requested CPU changes in one shot.

Change-Id: I83b4771809fad9323ff8ea0aed1bc02e78ce356b
Story: 2006565
Task: 36899
Signed-off-by: David Sullivan <david.sullivan@windriver.com>
This commit is contained in:
David Sullivan 2019-10-02 11:41:02 -04:00
parent 0f07b5a426
commit f17cc885c2
8 changed files with 176 additions and 181 deletions

View File

@ -182,6 +182,7 @@ def get_cpuprofile_data(cc, iprofile):
iprofile.vswitch_cores = get_core_list_str(iprofile, icpu_utils.VSWITCH_CPU_TYPE)
iprofile.shared_cores = get_core_list_str(iprofile, icpu_utils.SHARED_CPU_TYPE)
iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.APPLICATION_CPU_TYPE)
iprofile.isolated_cores = get_core_list_str(iprofile, icpu_utils.ISOLATED_CPU_TYPE)
def get_core_list_str(iprofile, function):
@ -209,23 +210,29 @@ def do_cpuprofile_list(cc, args):
icpu_utils.SHARED_CPU_TYPE)
profile.vms_cores = get_core_list_str(profile,
icpu_utils.APPLICATION_CPU_TYPE)
profile.isolated_cores = get_core_list_str(profile,
icpu_utils.ISOLATED_CPU_TYPE)
field_labels = ['uuid', 'name',
'processors', 'phy cores per proc', 'hyperthreading',
'platform cores', 'vswitch cores', 'shared cores', 'vm cores']
'platform cores', 'vswitch cores', 'shared cores',
'vm cores', 'isolated_cores']
fields = ['uuid', 'profilename',
'sockets', 'physical_cores', 'hyperthreading',
'platform_cores', 'vswitch_cores', 'shared_cores', 'vms_cores']
'platform_cores', 'vswitch_cores', 'shared_cores', 'vms_cores',
'isolated_cores']
utils.print_list(profiles, fields, field_labels, sortby=0)
def _print_cpuprofile_show(cpuprofile):
labels = ['uuid', 'name',
'processors', 'phy cores per proc', 'hyperthreading',
'platform cores', 'vswitch cores', 'shared cores', 'vm cores', 'created_at', 'updated_at']
'platform cores', 'vswitch cores', 'shared cores', 'vm cores',
'isolated_cores', 'created_at', 'updated_at']
fields = ['uuid', 'profilename',
'sockets', 'physical_cores', 'hyperthreading',
'platform_cores', 'vswitch_cores', 'shared_cores', 'vms_cores', 'created_at', 'updated_at']
'platform_cores', 'vswitch_cores', 'shared_cores', 'vms_cores',
'isolated_cores', 'created_at', 'updated_at']
data = [(f, getattr(cpuprofile, f, '')) for f in fields]
utils.print_tuple_list(data, labels)

View File

@ -1,2 +1,2 @@
SRC_DIR="sysinv"
TIS_PATCH_VER=335
TIS_PATCH_VER=336

View File

@ -648,9 +648,7 @@ def _check_cpu(cpu, ihost):
cpu_counts = _update_isolated_cpu_counts(ihost, cpu, cpu_counts)
# Semantic check to ensure the minimum/maximum values are enforced
error_string = cpu_utils.check_core_allocations(ihost, cpu_counts, func)
if error_string:
raise wsme.exc.ClientSideError(_(error_string))
cpu_utils.check_core_allocations(ihost, cpu_counts, func)
# Update cpu assignments to new values
cpu_utils.update_core_allocations(ihost, cpu_counts)

View File

@ -5,8 +5,10 @@
import pecan
import wsme
from sysinv.common import constants
from sysinv.common import utils as cutils
from sysinv.openstack.common import log
LOG = log.getLogger(__name__)
@ -247,7 +249,7 @@ def restructure_host_cpu_data(host):
host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))
def check_core_allocations(host, cpu_counts, func):
def check_core_allocations(host, cpu_counts):
"""Check that minimum and maximum core values are respected."""
total_platform_cores = 0
total_vswitch_cores = 0
@ -262,71 +264,87 @@ def check_core_allocations(host, cpu_counts, func):
requested_cores = \
platform_cores + vswitch_cores + shared_cores + isolated_cores
if requested_cores > available_cores:
return ("More total logical cores requested than present on "
"'Processor %s' (%s cores)." % (s, available_cores))
raise wsme.exc.ClientSideError(
"More total logical cores requested than present on Processor "
"%s (%s cores)." % (s, available_cores))
total_platform_cores += platform_cores
total_vswitch_cores += vswitch_cores
total_shared_cores += shared_cores
total_isolated_cores += isolated_cores
if func.lower() == constants.PLATFORM_FUNCTION.lower():
if ((constants.CONTROLLER in host.subfunctions) and
(constants.WORKER in host.subfunctions)):
if total_platform_cores < 2:
return "%s must have at least two cores." % \
constants.PLATFORM_FUNCTION
elif total_platform_cores == 0:
return "%s must have at least one core." % \
constants.PLATFORM_FUNCTION
# Validate Platform cores
if ((constants.CONTROLLER in host.subfunctions) and
(constants.WORKER in host.subfunctions)):
if total_platform_cores < 2:
raise wsme.exc.ClientSideError("%s must have at least two cores." %
constants.PLATFORM_FUNCTION)
elif total_platform_cores == 0:
raise wsme.exc.ClientSideError("%s must have at least one core." %
constants.PLATFORM_FUNCTION)
for s in range(1, len(host.nodes)):
if cpu_counts[s][constants.PLATFORM_FUNCTION] > 0:
raise wsme.exc.ClientSideError(
"%s cores can only be allocated on Processor 0" %
constants.PLATFORM_FUNCTION)
# Validate shared cores
for s in range(0, len(host.nodes)):
shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]
if host.hyperthreading:
shared_cores /= 2
if shared_cores > 1:
raise wsme.exc.ClientSideError(
'%s cores are limited to 1 per processor.'
% constants.SHARED_FUNCTION)
# Validate vswitch cores
if total_vswitch_cores != 0:
vswitch_type = cutils.get_vswitch_type(pecan.request.dbapi)
if constants.VSWITCH_TYPE_NONE == vswitch_type:
raise wsme.exc.ClientSideError(
('vSwitch cpus can only be used with a vswitch_type '
'specified.'))
vswitch_physical_cores = total_vswitch_cores
if host.hyperthreading:
vswitch_physical_cores /= 2
if vswitch_physical_cores > VSWITCH_MAX_CORES:
raise wsme.exc.ClientSideError(
"The %s function can only be assigned up to %s cores." %
(constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES))
# Validate Isolated cores
# We can allocate platform cores on numa 0, otherwise all isolated
# cores must in a contiguous block after the platform cores.
if total_isolated_cores > 0:
if total_vswitch_cores != 0 or total_shared_cores != 0:
raise wsme.exc.ClientSideError(
"%s cores can only be configured with %s and %s core types." %
(constants.ISOLATED_FUNCTION, constants.PLATFORM_FUNCTION,
constants.APPLICATION_FUNCTION))
has_application_cpus = False
for s in range(0, len(host.nodes)):
if s > 0 and cpu_counts[s][constants.PLATFORM_FUNCTION] > 0:
return "%s cores can only be allocated on Processor 0" % \
constants.PLATFORM_FUNCTION
if constants.WORKER in (host.subfunctions or host.personality):
if func.lower() == constants.VSWITCH_FUNCTION.lower():
if host.hyperthreading:
total_physical_cores = total_vswitch_cores / 2
else:
total_physical_cores = total_vswitch_cores
if total_physical_cores < VSWITCH_MIN_CORES:
return ("The %s function must have at least %s core(s)." %
(constants.VSWITCH_FUNCTION.lower(), VSWITCH_MIN_CORES))
elif total_physical_cores > VSWITCH_MAX_CORES:
return ("The %s function can only be assigned up to %s cores." %
(constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES))
numa_counts = cpu_counts[s]
isolated_cores_requested = \
numa_counts[constants.ISOLATED_FUNCTION]
if has_application_cpus and isolated_cores_requested:
raise wsme.exc.ClientSideError(
"%s and %s cpus must be contiguous" %
(constants.PLATFORM_FUNCTION, constants.ISOLATED_FUNCTION))
platform_cores_requested = \
numa_counts[constants.PLATFORM_FUNCTION]
available_cores = len(host.cpu_lists[s])
# Validate Isolated cores
# We can allocate platform cores on numa 0, otherwise all isolated
# cores must in a contiguous block after the platform cores.
if total_isolated_cores > 0:
if total_vswitch_cores != 0 or total_shared_cores != 0:
return "%s cores can only be configured with %s and %s core " \
"types." % (constants.ISOLATED_FUNCTION,
constants.PLATFORM_FUNCTION,
constants.APPLICATION_FUNCTION)
has_application_cpus = False
for s in range(0, len(host.nodes)):
numa_counts = cpu_counts[s]
isolated_cores_requested = \
numa_counts[constants.ISOLATED_FUNCTION]
if has_application_cpus and isolated_cores_requested:
return "%s cpus must be contiguous" % \
constants.ISOLATED_FUNCTION
platform_cores_requested = \
numa_counts[constants.PLATFORM_FUNCTION]
available_cores = len(host.cpu_lists[s])
if platform_cores_requested + isolated_cores_requested \
!= available_cores:
has_application_cpus = True
if platform_cores_requested + isolated_cores_requested \
!= available_cores:
has_application_cpus = True
reserved_for_vms = len(host.cpus) - total_platform_cores - total_vswitch_cores
if reserved_for_vms <= 0:
return "There must be at least one unused core for %s." % \
constants.APPLICATION_FUNCTION
else:
if total_platform_cores != len(host.cpus):
return "All logical cores must be reserved for platform use"
return ""
reserved_for_applications = len(host.cpus) - total_platform_cores - \
total_vswitch_cores
if reserved_for_applications <= 0:
raise wsme.exc.ClientSideError(
"There must be at least one unused core for %s." %
constants.APPLICATION_FUNCTION)
def update_core_allocations(host, cpu_counts):

View File

@ -227,30 +227,6 @@ class HostStatesController(rest.RestController):
{'function': 'vswitch', 'sockets': [{'0': 2}]},
{'function': 'shared', 'sockets': [{'0': 1}, {'1': 1}]}]
"""
def cpu_function_sort_key(capability):
function = capability.get('function', '')
if function.lower() == constants.PLATFORM_FUNCTION.lower():
rank = 0
elif function.lower() == constants.SHARED_FUNCTION.lower():
rank = 1
elif function.lower() == constants.VSWITCH_FUNCTION.lower():
rank = 2
elif function.lower() == constants.ISOLATED_FUNCTION.lower():
rank = 3
elif function.lower() == constants.APPLICATION_FUNCTION.lower():
rank = 4
else:
rank = 5
return rank
specified_function = None
# patch_obj = jsonpatch.JsonPatch(patch)
# for p in patch_obj:
# if p['path'] == '/capabilities':
# capabilities = p['value']
# break
LOG.info("host_cpus_modify host_uuid=%s capabilities=%s" %
(host_uuid, capabilities))
@ -260,9 +236,8 @@ class HostStatesController(rest.RestController):
ihost.nodes = pecan.request.dbapi.inode_get_by_ihost(ihost.uuid)
num_nodes = len(ihost.nodes)
# Perform allocation in platform, shared, vswitch order
sorted_capabilities = sorted(capabilities, key=cpu_function_sort_key)
for icap in sorted_capabilities:
# Perform basic sanity on the input
for icap in capabilities:
specified_function = icap.get('function', None)
specified_sockets = icap.get('sockets', None)
if not specified_function or not specified_sockets:
@ -271,64 +246,57 @@ class HostStatesController(rest.RestController):
'for host %s.') % (host_uuid,
specified_function,
specified_sockets))
capability = {}
for specified_socket in specified_sockets:
socket, value = specified_socket.items()[0]
if int(socket) >= num_nodes:
raise wsme.exc.ClientSideError(
_('There is no Processor (Socket) '
'%s on this host.') % socket)
capability.update({'num_cores_on_processor%s' % socket:
int(value)})
if int(value) < 0:
raise wsme.exc.ClientSideError(
_('Specified cpu values must be non-negative.'))
LOG.debug("host_cpus_modify capability=%s" % capability)
# Query the database to get the current set of CPUs and then
# organize the data by socket and function for convenience.
ihost.cpus = pecan.request.dbapi.icpu_get_by_ihost(ihost.uuid)
cpu_utils.restructure_host_cpu_data(ihost)
# Query the database to get the current set of CPUs and then
# organize the data by socket and function for convenience.
ihost.cpus = pecan.request.dbapi.icpu_get_by_ihost(ihost.uuid)
cpu_utils.restructure_host_cpu_data(ihost)
# Get the CPU counts for each socket and function for this host
cpu_counts = cpu_utils.get_cpu_counts(ihost)
# Get the CPU counts for each socket and function for this host
cpu_counts = cpu_utils.get_cpu_counts(ihost)
# Update the CPU counts for each socket and function for this host based
# on the incoming requested core counts
if (specified_function.lower() == constants.VSWITCH_FUNCTION.lower()):
cpu_counts = cpu_api._update_vswitch_cpu_counts(ihost, None,
cpu_counts,
capability)
elif (specified_function.lower() == constants.SHARED_FUNCTION.lower()):
cpu_counts = cpu_api._update_shared_cpu_counts(ihost, None,
cpu_counts,
capability)
elif (specified_function.lower() == constants.PLATFORM_FUNCTION.lower()):
cpu_counts = cpu_api._update_platform_cpu_counts(ihost, None,
cpu_counts,
capability)
elif (specified_function.lower() ==
constants.ISOLATED_FUNCTION.lower()):
cpu_counts = cpu_api._update_isolated_cpu_counts(
ihost, None, cpu_counts, capability)
# Update the CPU counts based on the provided values
for cap in capabilities:
function = cap.get('function', None)
# Normalize the function input
for const_function in constants.CPU_FUNCTIONS:
if const_function.lower() == function.lower():
function = const_function
sockets = cap.get('sockets', None)
for numa in sockets:
numa_node, value = numa.items()[0]
numa_node = int(numa_node)
value = int(value)
if ihost.hyperthreading:
value *= 2
cpu_counts[numa_node][function] = value
# Semantic check to ensure the minimum/maximum values are enforced
error_msg = cpu_utils.check_core_allocations(ihost, cpu_counts,
specified_function)
if error_msg:
raise wsme.exc.ClientSideError(_(error_msg))
# Semantic check to ensure the minimum/maximum values are enforced
cpu_utils.check_core_allocations(ihost, cpu_counts)
# Update cpu assignments to new values
cpu_utils.update_core_allocations(ihost, cpu_counts)
# Update cpu assignments to new values
cpu_utils.update_core_allocations(ihost, cpu_counts)
for cpu in ihost.cpus:
function = cpu_utils.get_cpu_function(ihost, cpu)
if function == constants.NO_FUNCTION:
raise wsme.exc.ClientSideError(_('Could not determine '
'assigned function for CPU %d' % cpu.cpu))
if (not cpu.allocated_function or
cpu.allocated_function.lower() != function.lower()):
values = {'allocated_function': function}
LOG.info("icpu_update uuid=%s value=%s" %
(cpu.uuid, values))
pecan.request.dbapi.icpu_update(cpu.uuid, values)
for cpu in ihost.cpus:
function = cpu_utils.get_cpu_function(ihost, cpu)
if function == constants.NO_FUNCTION:
raise wsme.exc.ClientSideError(_('Could not determine '
'assigned function for CPU %d' % cpu.cpu))
if (not cpu.allocated_function or
cpu.allocated_function.lower() != function.lower()):
values = {'allocated_function': function}
LOG.info("icpu_update uuid=%s value=%s" %
(cpu.uuid, values))
pecan.request.dbapi.icpu_update(cpu.uuid, values)
# perform inservice apply
pecan.request.rpcapi.update_grub_config(pecan.request.context,

View File

@ -2426,58 +2426,48 @@ def apply_profile(host_id, profile_id):
@cutils.synchronized(cpu_api.LOCK_NAME)
def cpuprofile_apply_to_host(host, profile):
host.cpus = pecan.request.dbapi.icpu_get_by_ihost(host.uuid, sort_key=['forinodeid', 'core', 'thread'])
host.nodes = pecan.request.dbapi.inode_get_by_ihost(host.uuid, sort_key='numa_node')
cpu_api._check_host(host)
# Populate the host and profile CPU data, order by logical core
host.cpus = pecan.request.dbapi.icpu_get_by_ihost(
host.uuid, sort_key='cpu')
host.nodes = pecan.request.dbapi.inode_get_by_ihost(host.uuid)
if not host.cpus or not host.nodes:
raise wsme.exc.ClientSideError("Host (%s) has no processors "
"or cores." % host.hostname)
profile.cpus = pecan.request.dbapi.icpu_get_by_ihost(profile.uuid, sort_key=['forinodeid', 'core', 'thread'])
profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile.uuid, sort_key='numa_node')
profile.cpus = pecan.request.dbapi.icpu_get_by_ihost(
profile.uuid, sort_key='cpu')
profile.nodes = pecan.request.dbapi.inode_get_by_ihost(profile.uuid)
if not profile.cpus or not profile.nodes:
raise wsme.exc.ClientSideError("Profile (%s) has no processors "
"or cores." % profile.hostname)
h_struct = cpu_utils.HostCpuProfile(host.subfunctions, host.cpus, host.nodes)
cpu_profile = cpu_utils.CpuProfile(profile.cpus, profile.nodes)
if len(profile.nodes) != len(host.nodes) or len(profile.cpus) != \
len(host.cpus):
raise wsme.exc.ClientSideError(
"Profile (%s) does not match CPU structure of host (%s)" %
(profile.hostname, host.hostname))
errorstring = h_struct.profile_applicable(cpu_profile)
# Reorganize the profile cpu data for convenience
cpu_utils.restructure_host_cpu_data(profile)
if errorstring:
raise wsme.exc.ClientSideError(errorstring)
# Get the CPU counts for each socket and function for this host
cpu_counts = cpu_utils.get_cpu_counts(profile)
numa_node_idx = -1
core_idx = 0
cur_numa_node = None
cur_core = None
for hcpu in host.cpus:
if hcpu.numa_node != cur_numa_node:
cur_numa_node = hcpu.numa_node
numa_node_idx += 1
core_idx = 0
cur_core = hcpu.core
p_processor = cpu_profile.processors[numa_node_idx]
vswitch_core_start = p_processor.platform
shared_core_start = p_processor.vswitch + vswitch_core_start
vm_core_start = p_processor.shared + shared_core_start
vm_core_end = p_processor.vms + vm_core_start
else:
if hcpu.core != cur_core:
core_idx += 1
cur_core = hcpu.core
# Semantic check to ensure the minimum/maximum values are enforced
cpu_utils.check_core_allocations(profile, cpu_counts)
if core_idx < vswitch_core_start:
new_func = constants.PLATFORM_FUNCTION
elif core_idx < shared_core_start:
new_func = constants.VSWITCH_FUNCTION
elif core_idx < vm_core_start:
new_func = constants.SHARED_FUNCTION
elif core_idx < vm_core_end:
new_func = constants.APPLICATION_FUNCTION
if new_func != hcpu.allocated_function:
values = {'allocated_function': new_func}
cpu_api._update(hcpu.uuid, values, from_profile=True)
# Update the host cpu allocations as required
for index in range(len(profile.cpus)):
host_cpu = host.cpus[index]
profile_cpu = profile.cpus[index]
if (not host_cpu.allocated_function or
host_cpu.allocated_function.lower() !=
profile_cpu.allocated_function.lower()):
values = {'allocated_function': profile_cpu.allocated_function}
pecan.request.dbapi.icpu_update(host_cpu.uuid, values)
def ifprofile_applicable(host, profile):

View File

@ -118,6 +118,15 @@ APPLICATION_FUNCTION = "Applications"
ISOLATED_FUNCTION = "Isolated"
NO_FUNCTION = "None"
CPU_FUNCTIONS = [
PLATFORM_FUNCTION,
VSWITCH_FUNCTION,
SHARED_FUNCTION,
APPLICATION_FUNCTION,
ISOLATED_FUNCTION,
NO_FUNCTION
]
# Host Personality Sub-Types
HOST_ADD = 'host_add' # for personality sub-type validation
HOST_DELETE = 'host_delete' # for personality sub-type validation

View File

@ -83,6 +83,10 @@ class ProfileTestCase(base.FunctionalTest):
dbutils.get_test_icpu(id=5, cpu=3,
forinodeid=self.compnode.id,
forihostid=self.worker.id))
self.compcpuapp = self.dbapi.icpu_create(
self.worker.id,
dbutils.get_test_icpu(id=6, cpu=4, forinodeid=self.compnode.id, forihostid=self.worker.id,
allocated_function=constants.APPLICATION_FUNCTION))
self.compmemory = self.dbapi.imemory_create(
self.worker.id,
dbutils.get_test_imemory(id=2, Hugepagesize=constants.MIB_1G,
@ -285,11 +289,12 @@ class ProfileApplyTestCase(ProfileTestCase):
def test_apply_cpu_success(self):
self.profile["profiletype"] = constants.PROFILE_TYPE_CPU
self.profile["ihost_uuid"] = self.worker.uuid
response = self.post_json('%s' % self._get_path(), self.profile)
self.assertEqual(http_client.OK, response.status_int)
list_data = self.get_json('%s' % self._get_path())
profile_uuid = list_data['iprofiles'][0]['uuid']
result = self.patch_dict_json('/ihosts/%s' % self.controller.id,
result = self.patch_dict_json('/ihosts/%s' % self.worker.id,
headers=HEADER,
action=constants.APPLY_PROFILE_ACTION,
iprofile_uuid=profile_uuid)