Change compute node to worker node personality
This update replaced the compute personality & subfunction to worker, and updated internal and customer visible references. In addition, the compute-huge package has been renamed to worker-utils as it contains various scripts/services that used to affine running tasks or interface IRQ to specific CPUs. The worker_reserved.conf is now installed to /etc/platform. The cpu function 'VM' has also been renamed to 'Application'. Tests Performed: Non-containerized deployment AIO-SX: Sanity and Nightly automated test suite AIO-DX: Sanity and Nightly automated test suite 2+2 System: Sanity and Nightly automated test suite 2+2 System: Horizon Patch Orchestration Kubernetes deployment: AIO-SX: Create, delete, reboot and rebuild instances 2+2+2 System: worker nodes are unlock enable and no alarms Story: 2004022 Task: 27013 Change-Id: I0e0be6b3a6f25f7fb8edf64ea4326854513aa396 Signed-off-by: Tao Liu <tao.liu@windriver.com>
This commit is contained in:
parent
b10b557afe
commit
6256b0d106
@ -2,13 +2,13 @@
|
||||
# If these have dependencies, they will be pulled in automatically
|
||||
#
|
||||
|
||||
# compute-huge
|
||||
compute-huge
|
||||
# worker-utils
|
||||
worker-utils
|
||||
|
||||
# computeconfig
|
||||
computeconfig
|
||||
computeconfig-standalone
|
||||
computeconfig-subfunction
|
||||
# workerconfig
|
||||
workerconfig
|
||||
workerconfig-standalone
|
||||
workerconfig-subfunction
|
||||
|
||||
# configutilities
|
||||
configutilities
|
||||
@ -30,7 +30,7 @@ sysinv
|
||||
|
||||
# config-gate
|
||||
config-gate
|
||||
config-gate-compute
|
||||
config-gate-worker
|
||||
|
||||
# puppet-manifests
|
||||
puppet-manifests
|
||||
|
@ -1,5 +1,5 @@
|
||||
compute-huge
|
||||
computeconfig
|
||||
worker-utils
|
||||
workerconfig
|
||||
configutilities
|
||||
controllerconfig
|
||||
storageconfig
|
||||
|
@ -1,13 +0,0 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: compute-huge
|
||||
Version: 1.0
|
||||
Summary: Initial compute node hugepages and reserved cpus configuration
|
||||
Home-page:
|
||||
Author: Windriver
|
||||
Author-email: info@windriver.com
|
||||
License: Apache-2.0
|
||||
|
||||
Description: Initial compute node hugepages and reserved cpus configuration
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
@ -1,3 +0,0 @@
|
||||
SRC_DIR="compute-huge"
|
||||
COPY_LIST="$SRC_DIR/LICENSE"
|
||||
TIS_PATCH_VER=10
|
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2014,2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# compute-huge.sh "goenabled" check.
|
||||
#
|
||||
# If a problem was detected during configuration of huge pages and compute
|
||||
# resources then the board is not allowed to enable.
|
||||
#
|
||||
COMPUTE_HUGE_GOENABLED="/var/run/compute_huge_goenabled"
|
||||
|
||||
source "/etc/init.d/log_functions.sh"
|
||||
source "/usr/bin/tsconfig"
|
||||
|
||||
if [ -e ${VOLATILE_COMPUTE_CONFIG_COMPLETE} -a ! -f ${COMPUTE_HUGE_GOENABLED} ]; then
|
||||
log_error "Compute manifest CPU configuration check failed. Failing goenabled check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
@ -1,13 +0,0 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: computeconfig
|
||||
Version: 1.0
|
||||
Summary: Initial compute node configuration
|
||||
Home-page:
|
||||
Author: Windriver
|
||||
Author-email: info@windriver.com
|
||||
License: Apache-2.0
|
||||
|
||||
Description: Initial compute node configuration
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
@ -1,2 +0,0 @@
|
||||
SRC_DIR="computeconfig"
|
||||
TIS_PATCH_VER=11
|
@ -1,85 +0,0 @@
|
||||
Summary: computeconfig
|
||||
Name: computeconfig
|
||||
Version: 1.0
|
||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||
License: Apache-2.0
|
||||
Group: base
|
||||
Packager: Wind River <info@windriver.com>
|
||||
URL: unknown
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
%define debug_package %{nil}
|
||||
|
||||
Requires: systemd
|
||||
|
||||
%description
|
||||
Initial compute node configuration
|
||||
|
||||
%package -n computeconfig-standalone
|
||||
Summary: computeconfig
|
||||
Group: base
|
||||
|
||||
%description -n computeconfig-standalone
|
||||
Initial compute node configuration
|
||||
|
||||
%package -n computeconfig-subfunction
|
||||
Summary: computeconfig
|
||||
Group: base
|
||||
|
||||
%description -n computeconfig-subfunction
|
||||
Initial compute node configuration
|
||||
|
||||
%define initddir /etc/init.d/
|
||||
%define goenableddir /etc/goenabled.d/
|
||||
%define systemddir /etc/systemd/system/
|
||||
|
||||
%prep
|
||||
%setup
|
||||
|
||||
%build
|
||||
|
||||
%install
|
||||
make install INITDDIR=%{buildroot}%{initddir} GOENABLEDDIR=%{buildroot}%{goenableddir} SYSTEMDDIR=%{buildroot}%{systemddir}
|
||||
|
||||
%post -n computeconfig-standalone
|
||||
if [ ! -e $D%{systemddir}/computeconfig.service ]; then
|
||||
cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
||||
else
|
||||
cmp -s $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
||||
if [ $? -ne 0 ]; then
|
||||
rm -f $D%{systemddir}/computeconfig.service
|
||||
cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
||||
fi
|
||||
fi
|
||||
systemctl enable computeconfig.service
|
||||
|
||||
|
||||
%post -n computeconfig-subfunction
|
||||
if [ ! -e $D%{systemddir}/computeconfig.service ]; then
|
||||
cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
||||
else
|
||||
cmp -s $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
||||
if [ $? -ne 0 ]; then
|
||||
rm -f $D%{systemddir}/computeconfig.service
|
||||
cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
||||
fi
|
||||
fi
|
||||
systemctl enable computeconfig.service
|
||||
|
||||
%clean
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{initddir}/*
|
||||
|
||||
%files -n computeconfig-standalone
|
||||
%defattr(-,root,root,-)
|
||||
%dir %{systemddir}/config
|
||||
%{systemddir}/config/computeconfig-standalone.service
|
||||
%{goenableddir}/*
|
||||
|
||||
%files -n computeconfig-subfunction
|
||||
%defattr(-,root,root,-)
|
||||
%dir %{systemddir}/config
|
||||
%{systemddir}/config/computeconfig-combined.service
|
@ -15,12 +15,12 @@ Requires: systemd
|
||||
%description
|
||||
Startup configuration gate
|
||||
|
||||
%package -n %{name}-compute
|
||||
Summary: config-gate-compute
|
||||
%package -n %{name}-worker
|
||||
Summary: config-gate-worker
|
||||
Group: base
|
||||
|
||||
%description -n %{name}-compute
|
||||
Startup compute configuration gate
|
||||
%description -n %{name}-worker
|
||||
Startup worker configuration gate
|
||||
|
||||
%define local_etc_systemd /etc/systemd/system/
|
||||
|
||||
@ -35,8 +35,8 @@ make install SBINDIR=%{buildroot}%{_sbindir} SYSTEMDDIR=%{buildroot}%{local_etc_
|
||||
%post
|
||||
systemctl enable config.service
|
||||
|
||||
%post -n %{name}-compute
|
||||
systemctl enable compute-config-gate.service
|
||||
%post -n %{name}-worker
|
||||
systemctl enable worker-config-gate.service
|
||||
|
||||
%clean
|
||||
|
||||
@ -46,7 +46,7 @@ systemctl enable compute-config-gate.service
|
||||
%{_sbindir}/wait_for_config_init.sh
|
||||
%{local_etc_systemd}/config.service
|
||||
|
||||
%files -n %{name}-compute
|
||||
%files -n %{name}-worker
|
||||
%defattr(-,root,root,-)
|
||||
%{_sbindir}/wait_for_compute_config_init.sh
|
||||
%{local_etc_systemd}/compute-config-gate.service
|
||||
%{_sbindir}/wait_for_worker_config_init.sh
|
||||
%{local_etc_systemd}/worker-config-gate.service
|
||||
|
@ -9,6 +9,6 @@ install:
|
||||
install -d -m 755 $(SBINDIR)
|
||||
install -d -m 755 $(SYSTEMDDIR)
|
||||
install -p -D -m 555 wait_for_config_init.sh $(SBINDIR)/wait_for_config_init.sh
|
||||
install -p -D -m 555 wait_for_compute_config_init.sh $(SBINDIR)/wait_for_compute_config_init.sh
|
||||
install -p -D -m 555 wait_for_worker_config_init.sh $(SBINDIR)/wait_for_worker_config_init.sh
|
||||
install -p -D -m 444 config.service $(SYSTEMDDIR)/config.service
|
||||
install -p -D -m 444 compute-config-gate.service $(SYSTEMDDIR)/compute-config-gate.service
|
||||
install -p -D -m 444 worker-config-gate.service $(SYSTEMDDIR)/worker-config-gate.service
|
||||
|
@ -14,8 +14,8 @@ case $nodetype in
|
||||
controller)
|
||||
SERVICE=controllerconfig.service
|
||||
;;
|
||||
compute)
|
||||
SERVICE=computeconfig.service
|
||||
worker)
|
||||
SERVICE=workerconfig.service
|
||||
;;
|
||||
storage)
|
||||
SERVICE=storageconfig.service
|
||||
|
@ -1,13 +1,13 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Wait for compute config service
|
||||
# Wait for worker config service
|
||||
|
||||
SERVICE=computeconfig.service
|
||||
SERVICE=workerconfig.service
|
||||
|
||||
while :; do
|
||||
systemctl status $SERVICE |grep -q running
|
@ -1,11 +1,11 @@
|
||||
[Unit]
|
||||
Description=TIS compute config gate
|
||||
After=sw-patch.service computeconfig.service
|
||||
Description=STX worker config gate
|
||||
After=sw-patch.service workerconfig.service
|
||||
Before=serial-getty@ttyS0.service getty@tty1.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/sbin/wait_for_compute_config_init.sh
|
||||
ExecStart=/usr/sbin/wait_for_worker_config_init.sh
|
||||
ExecStop=
|
||||
ExecReload=
|
||||
RemainAfterExit=yes
|
@ -1202,8 +1202,8 @@ def overwrite_iscsi_target_config():
|
||||
def restore_complete():
|
||||
"""
|
||||
Restore proper ISCSI configuration file after cinder restore.
|
||||
Enable compute functionality for AIO system.
|
||||
:return: True if compute-config-complete is executed
|
||||
Enable worker functionality for AIO system.
|
||||
:return: True if worker-config-complete is executed
|
||||
"""
|
||||
if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD:
|
||||
if not os.path.isfile(restore_system_ready):
|
||||
@ -1223,21 +1223,21 @@ def restore_complete():
|
||||
# we use use that.
|
||||
overwrite_iscsi_target_config()
|
||||
|
||||
print("\nApplying compute manifests for %s. " %
|
||||
print("\nApplying worker manifests for %s. " %
|
||||
(utils.get_controller_hostname()))
|
||||
print("Node will reboot on completion.")
|
||||
|
||||
sysinv.do_compute_config_complete(utils.get_controller_hostname())
|
||||
sysinv.do_worker_config_complete(utils.get_controller_hostname())
|
||||
|
||||
# show in-progress log on console every 30 seconds
|
||||
# until self reboot or timeout
|
||||
os.remove(restore_system_ready)
|
||||
time.sleep(30)
|
||||
for i in range(1, 10):
|
||||
print("compute manifest apply in progress ... ")
|
||||
print("worker manifest apply in progress ... ")
|
||||
time.sleep(30)
|
||||
|
||||
raise RestoreFail("Timeout running compute manifests, "
|
||||
raise RestoreFail("Timeout running worker manifests, "
|
||||
"reboot did not occur")
|
||||
|
||||
else:
|
||||
@ -1655,7 +1655,7 @@ def restore_system(backup_file, include_storage_reinstall=False, clone=False):
|
||||
print(textwrap.fill(
|
||||
"Failed to lock at least one node. " +
|
||||
"Please lock the unlocked controller-1 or " +
|
||||
"compute nodes manually.", 80
|
||||
"worker nodes manually.", 80
|
||||
))
|
||||
|
||||
if not clone:
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2014-2017 Wind River Systems, Inc.
|
||||
# Copyright (c) 2014-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -23,7 +23,7 @@ API_VERSION = 1
|
||||
HOST_PERSONALITY_NOT_SET = ""
|
||||
HOST_PERSONALITY_UNKNOWN = "unknown"
|
||||
HOST_PERSONALITY_CONTROLLER = "controller"
|
||||
HOST_PERSONALITY_COMPUTE = "compute"
|
||||
HOST_PERSONALITY_WORKER = "worker"
|
||||
HOST_PERSONALITY_STORAGE = "storage"
|
||||
|
||||
# Host Administrative State Constants
|
||||
@ -87,8 +87,8 @@ class Host(object):
|
||||
# Set personality
|
||||
if host_data['personality'] == "controller":
|
||||
self.personality = HOST_PERSONALITY_CONTROLLER
|
||||
elif host_data['personality'] == "compute":
|
||||
self.personality = HOST_PERSONALITY_COMPUTE
|
||||
elif host_data['personality'] == "worker":
|
||||
self.personality = HOST_PERSONALITY_WORKER
|
||||
elif host_data['personality'] == "storage":
|
||||
self.personality = HOST_PERSONALITY_STORAGE
|
||||
else:
|
||||
@ -334,8 +334,8 @@ def get_hosts(admin_token, region_name, personality=None,
|
||||
personality == HOST_PERSONALITY_CONTROLLER):
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
|
||||
elif (host['personality'] == "compute" and
|
||||
personality == HOST_PERSONALITY_COMPUTE):
|
||||
elif (host['personality'] == "worker" and
|
||||
personality == HOST_PERSONALITY_WORKER):
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
|
||||
elif (host['personality'] == "storage" and
|
||||
@ -537,24 +537,24 @@ def get_host_data(hostname):
|
||||
return None
|
||||
|
||||
|
||||
def do_compute_config_complete(hostname):
|
||||
""" enable compute functionality """
|
||||
def do_worker_config_complete(hostname):
|
||||
""" enable worker functionality """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
# Create/apply compute manifests
|
||||
# Create/apply worker manifests
|
||||
values = {
|
||||
'action': "subfunction_config"
|
||||
}
|
||||
patch = dict_to_patch(values)
|
||||
LOG.info("Applying compute manifests: {} [{}]"
|
||||
LOG.info("Applying worker manifests: {} [{}]"
|
||||
.format(host, patch))
|
||||
client.sysinv.ihost.update(host.uuid, patch)
|
||||
except Exception as e:
|
||||
LOG.exception("compute_config_complete failed")
|
||||
LOG.exception("worker_config_complete failed")
|
||||
raise e
|
||||
|
||||
|
||||
|
@ -325,7 +325,7 @@ def lag_mode_to_str(lag_mode):
|
||||
|
||||
|
||||
def is_combined_load():
|
||||
return 'compute' in tsconfig.subfunctions
|
||||
return 'worker' in tsconfig.subfunctions
|
||||
|
||||
|
||||
def get_system_type():
|
||||
|
@ -184,27 +184,27 @@ def update_db(archive_dir, backup_name):
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
|
||||
def config_compute():
|
||||
def config_worker():
|
||||
"""
|
||||
Enable compute functionality for AIO system.
|
||||
:return: True if compute-config-complete is executed
|
||||
Enable worker functionality for AIO system.
|
||||
:return: True if worker-config-complete is executed
|
||||
"""
|
||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||
console_log("Applying compute manifests for {}. "
|
||||
console_log("Applying worker manifests for {}. "
|
||||
"Node will reboot on completion."
|
||||
.format(utils.get_controller_hostname()))
|
||||
sysinv.do_compute_config_complete(utils.get_controller_hostname())
|
||||
sysinv.do_worker_config_complete(utils.get_controller_hostname())
|
||||
time.sleep(30)
|
||||
# compute-config-complete has no logs to console. So, wait
|
||||
# worker-config-complete has no logs to console. So, wait
|
||||
# for some time before showing the login prompt.
|
||||
for i in range(1, 10):
|
||||
console_log("compute-config in progress..")
|
||||
console_log("worker-config in progress..")
|
||||
time.sleep(30)
|
||||
console_log("Timed out on do_compute_config_complete")
|
||||
raise CloneFail("Timed out on do_compute_config_complete")
|
||||
console_log("Timed out on do_worker_config_complete")
|
||||
raise CloneFail("Timed out on do_worker_config_complete")
|
||||
return True
|
||||
else:
|
||||
# compute_config_complete is not needed.
|
||||
# worker_config_complete is not needed.
|
||||
return False
|
||||
|
||||
|
||||
@ -302,8 +302,8 @@ if os.path.exists(INI_FILE):
|
||||
console_log("Images archive installed from [%s]" % clone_name)
|
||||
finalize_install()
|
||||
set_result(clone.OK)
|
||||
if not config_compute():
|
||||
# do cleanup if compute_config_complete is not required
|
||||
if not config_worker():
|
||||
# do cleanup if worker_config_complete is not required
|
||||
cleanup()
|
||||
elif last_result == clone.OK:
|
||||
# Installation completed successfully before last reboot
|
||||
|
@ -53,11 +53,11 @@ mkdir -p ${PUPPET_TMP}/hieradata
|
||||
cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml
|
||||
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
|
||||
|
||||
# When the compute node is first booted and goes online, sysinv-agent reports
|
||||
# When the worker node is first booted and goes online, sysinv-agent reports
|
||||
# host CPU inventory which triggers the first runtime manifest apply that updates
|
||||
# the grub. At this time, copying the host file failed due to a timing issue that
|
||||
# has not yet been fully understood. Subsequent retries worked.
|
||||
if [ "${PERSONALITY}" = "compute" ]; then
|
||||
# has not yet been fully understood. Subsequent retries worked.
|
||||
if [ "${PERSONALITY}" = "worker" ]; then
|
||||
n=0
|
||||
until [ $n -ge 3 ]; do
|
||||
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break
|
||||
|
@ -46,7 +46,7 @@ CONFIG_ADMIN_PROJECT_DOMAIN_NAME: Default
|
||||
|
||||
|
||||
# mtce
|
||||
platform::mtce::agent::params::compute_boot_timeout: 720
|
||||
platform::mtce::agent::params::worker_boot_timeout: 720
|
||||
platform::mtce::agent::params::controller_boot_timeout: 1200
|
||||
platform::mtce::agent::params::heartbeat_period: 100
|
||||
platform::mtce::agent::params::heartbeat_failure_action: 'fail'
|
||||
|
@ -1,4 +1,4 @@
|
||||
# compute specific configuration data
|
||||
# worker specific configuration data
|
||||
---
|
||||
|
||||
# vswitch
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# puppet manifest for compute hosts
|
||||
# puppet manifest for worker nodes
|
||||
#
|
||||
|
||||
Exec {
|
||||
@ -48,7 +48,7 @@ include ::openstack::nova::placement
|
||||
include ::openstack::ceilometer
|
||||
include ::openstack::ceilometer::polling
|
||||
|
||||
class { '::platform::config::compute::post':
|
||||
class { '::platform::config::worker::post':
|
||||
stage => post,
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ class openstack::ceilometer::polling (
|
||||
$central_namespace = false
|
||||
}
|
||||
|
||||
if (str2bool($::disable_compute_services) or
|
||||
if (str2bool($::disable_worker_services) or
|
||||
$::platform::kubernetes::params::enabled) {
|
||||
$agent_enable = false
|
||||
$compute_namespace = false
|
||||
@ -238,7 +238,7 @@ class openstack::ceilometer::polling (
|
||||
} else {
|
||||
$agent_enable = true
|
||||
|
||||
if str2bool($::is_compute_subfunction) {
|
||||
if str2bool($::is_worker_subfunction) {
|
||||
$pmon_target = "/etc/ceilometer/ceilometer-polling-compute.conf.pmon"
|
||||
$compute_namespace = true
|
||||
} else {
|
||||
|
@ -195,7 +195,7 @@ class openstack::neutron::agents
|
||||
|
||||
include ::platform::kubernetes::params
|
||||
|
||||
if (str2bool($::disable_compute_services) or
|
||||
if (str2bool($::disable_worker_services) or
|
||||
$::platform::kubernetes::params::enabled) {
|
||||
$pmon_ensure = absent
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
# Returns true if compute services should be disabled
|
||||
|
||||
Facter.add("disable_compute_services") do
|
||||
setcode do
|
||||
File.exist?('/var/run/.disable_compute_services')
|
||||
end
|
||||
end
|
@ -0,0 +1,7 @@
|
||||
# Returns true if worker services should be disabled
|
||||
|
||||
Facter.add("disable_worker_services") do
|
||||
setcode do
|
||||
File.exist?('/var/run/.disable_worker_services')
|
||||
end
|
||||
end
|
@ -1,19 +1,19 @@
|
||||
class platform::compute::params (
|
||||
$compute_cpu_list = '',
|
||||
$worker_cpu_list = '',
|
||||
$platform_cpu_list = '',
|
||||
$reserved_vswitch_cores = '',
|
||||
$reserved_platform_cores = '',
|
||||
$compute_base_reserved = '',
|
||||
$worker_base_reserved = '',
|
||||
$compute_vswitch_reserved = '',
|
||||
) { }
|
||||
|
||||
class platform::compute::config
|
||||
inherits ::platform::compute::params {
|
||||
|
||||
file { "/etc/nova/compute_reserved.conf":
|
||||
file { "/etc/platform/worker_reserved.conf":
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/compute_reserved.conf.erb')
|
||||
content => template('platform/worker_reserved.conf.erb')
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ class platform::compute::grub::audit
|
||||
}
|
||||
}
|
||||
|
||||
file { "/var/run/compute_huge_goenabled":
|
||||
file { "/var/run/worker_goenabled":
|
||||
ensure => $ensure,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -276,7 +276,7 @@ class platform::compute::pmqos (
|
||||
$hight_wakeup_cpus = '',
|
||||
) {
|
||||
|
||||
if str2bool($::is_compute_subfunction) and str2bool($::is_lowlatency_subfunction) {
|
||||
if str2bool($::is_worker_subfunction) and str2bool($::is_lowlatency_subfunction) {
|
||||
|
||||
$script = "/usr/bin/set-cpu-wakeup-latency.sh"
|
||||
|
||||
|
@ -289,13 +289,13 @@ class platform::config::controller::post
|
||||
}
|
||||
}
|
||||
|
||||
class platform::config::compute::post
|
||||
class platform::config::worker::post
|
||||
{
|
||||
file { "/etc/platform/.initial_compute_config_complete":
|
||||
file { "/etc/platform/.initial_worker_config_complete":
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
file { "/var/run/.compute_config_complete":
|
||||
file { "/var/run/.worker_config_complete":
|
||||
ensure => present,
|
||||
}
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ class platform::kubernetes::worker
|
||||
}
|
||||
|
||||
if $enabled {
|
||||
file { "/var/run/.disable_compute_services":
|
||||
file { "/var/run/.disable_worker_services":
|
||||
ensure => file,
|
||||
replace => no,
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ class platform::mtce::params (
|
||||
$auth_user_domain = undef,
|
||||
$auth_project_domain = undef,
|
||||
$auth_region = undef,
|
||||
$compute_boot_timeout = undef,
|
||||
$worker_boot_timeout = undef,
|
||||
$controller_boot_timeout = undef,
|
||||
$heartbeat_degrade_threshold = undef,
|
||||
$heartbeat_failure_threshold = undef,
|
||||
|
@ -67,7 +67,7 @@ class platform::postgresql::server (
|
||||
# work_mem 512 MB since some ceilometer queries entail extensive
|
||||
# sorting as well as hash joins and hash based aggregation.
|
||||
# checkpoint_segments increased to reduce frequency of checkpoints
|
||||
if str2bool($::is_compute_subfunction) or str2bool($::is_virtual) {
|
||||
if str2bool($::is_worker_subfunction) or str2bool($::is_virtual) {
|
||||
# AIO or virtual box
|
||||
# 700 connections needs about 80MB shared buffer
|
||||
# Leave work_mem as the default for vbox and AIO
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
# - This file is managed by Puppet. DO NOT EDIT.
|
||||
################################################################################
|
||||
# COMPUTE Node configuration parameters for reserved memory and physical cores
|
||||
# WORKER Node configuration parameters for reserved memory and physical cores
|
||||
# used by Base software and VSWITCH. These are resources that libvirt cannot use.
|
||||
#
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
# validity against the actual number of logical CPU instances in the system.
|
||||
#
|
||||
################################################################################
|
||||
COMPUTE_CPU_LIST=<%= @compute_cpu_list %>
|
||||
WORKER_CPU_LIST=<%= @worker_cpu_list %>
|
||||
|
||||
################################################################################
|
||||
#
|
||||
@ -32,10 +32,10 @@ PLATFORM_CPU_LIST=<%= @platform_cpu_list %>
|
||||
#
|
||||
# Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core
|
||||
# on NUMA node1, the variable must be specified as follows.
|
||||
# COMPUTE_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1")
|
||||
# WORKER_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1")
|
||||
#
|
||||
################################################################################
|
||||
COMPUTE_BASE_RESERVED=<%= @compute_base_reserved %>
|
||||
WORKER_BASE_RESERVED=<%= @worker_base_reserved %>
|
||||
|
||||
################################################################################
|
||||
#
|
||||
@ -68,7 +68,7 @@ COMPUTE_VSWITCH_CORES=<%= @reserved_vswitch_cores %>
|
||||
#
|
||||
# Example: To reserve 1 core on NUMA node0, the variable must be specified
|
||||
# as follows.
|
||||
# COMPUTE_PLATFORM_CORES=("node0:0")
|
||||
# WORKER_PLATFORM_CORES=("node0:0")
|
||||
#
|
||||
################################################################################
|
||||
COMPUTE_PLATFORM_CORES=<%= @reserved_platform_cores %>
|
||||
WORKER_PLATFORM_CORES=<%= @reserved_platform_cores %>
|
@ -45,7 +45,7 @@ heartbeat_failure_action = <%= @heartbeat_failure_action %>
|
||||
mnfa_threshold = <%= @mnfa_threshold %>
|
||||
|
||||
[timeouts]
|
||||
compute_boot_timeout = <%= @compute_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive
|
||||
worker_boot_timeout = <%= @worker_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive
|
||||
controller_boot_timeout = <%= @controller_boot_timeout %> ; message after which it will time out and fail the host.
|
||||
|
||||
; Multi-Node Failure Avoidance (MNFA) Lifecycle Timer.
|
||||
|
@ -45,7 +45,7 @@ SB_STATE_CONFIGURING = 'configuring'
|
||||
SB_TASK_NONE = None
|
||||
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
|
||||
SB_TASK_PROVISION_STORAGE = 'provision-storage'
|
||||
SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute'
|
||||
SB_TASK_RECONFIG_WORKER = 'reconfig-worker'
|
||||
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
|
||||
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'
|
||||
|
||||
|
@ -92,7 +92,7 @@ def do_host_upgrade_list(cc, args):
|
||||
help='Hostname of the host')
|
||||
@utils.arg('-p', '--personality',
|
||||
metavar='<personality>',
|
||||
choices=['controller', 'compute', 'storage', 'network', 'profile'],
|
||||
choices=['controller', 'worker', 'storage', 'network', 'profile'],
|
||||
help='Personality or type of host [REQUIRED]')
|
||||
@utils.arg('-s', '--subfunctions',
|
||||
metavar='<subfunctions>',
|
||||
|
@ -20,24 +20,24 @@ CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread',
|
||||
PLATFORM_CPU_TYPE = "Platform"
|
||||
VSWITCH_CPU_TYPE = "Vswitch"
|
||||
SHARED_CPU_TYPE = "Shared"
|
||||
VMS_CPU_TYPE = "VMs"
|
||||
APPLICATION_CPU_TYPE = "Applications"
|
||||
NONE_CPU_TYPE = "None"
|
||||
|
||||
CPU_TYPE_LIST = [PLATFORM_CPU_TYPE, VSWITCH_CPU_TYPE,
|
||||
SHARED_CPU_TYPE, VMS_CPU_TYPE,
|
||||
SHARED_CPU_TYPE, APPLICATION_CPU_TYPE,
|
||||
NONE_CPU_TYPE]
|
||||
|
||||
|
||||
PLATFORM_CPU_TYPE_FORMAT = _("Platform")
|
||||
VSWITCH_CPU_TYPE_FORMAT = _("vSwitch")
|
||||
SHARED_CPU_TYPE_FORMAT = _("Shared")
|
||||
VMS_CPU_TYPE_FORMAT = _("VMs")
|
||||
APPLICATION_CPU_TYPE_FORMAT = _("Applications")
|
||||
NONE_CPU_TYPE_FORMAT = _("None")
|
||||
|
||||
CPU_TYPE_FORMATS = {PLATFORM_CPU_TYPE: PLATFORM_CPU_TYPE_FORMAT,
|
||||
VSWITCH_CPU_TYPE: VSWITCH_CPU_TYPE_FORMAT,
|
||||
SHARED_CPU_TYPE: SHARED_CPU_TYPE_FORMAT,
|
||||
VMS_CPU_TYPE: VMS_CPU_TYPE_FORMAT,
|
||||
APPLICATION_CPU_TYPE: APPLICATION_CPU_TYPE_FORMAT,
|
||||
NONE_CPU_TYPE: NONE_CPU_TYPE_FORMAT}
|
||||
|
||||
|
||||
@ -106,19 +106,19 @@ def check_core_functions(personality, icpus):
|
||||
platform_cores += 1
|
||||
elif allocated_function == VSWITCH_CPU_TYPE:
|
||||
vswitch_cores += 1
|
||||
elif allocated_function == VMS_CPU_TYPE:
|
||||
elif allocated_function == APPLICATION_CPU_TYPE:
|
||||
vm_cores += 1
|
||||
|
||||
error_string = ""
|
||||
if platform_cores == 0:
|
||||
error_string = ("There must be at least one core for %s." %
|
||||
PLATFORM_CPU_TYPE_FORMAT)
|
||||
elif personality == 'compute' and vswitch_cores == 0:
|
||||
elif personality == 'worker' and vswitch_cores == 0:
|
||||
error_string = ("There must be at least one core for %s." %
|
||||
VSWITCH_CPU_TYPE_FORMAT)
|
||||
elif personality == 'compute' and vm_cores == 0:
|
||||
elif personality == 'worker' and vm_cores == 0:
|
||||
error_string = ("There must be at least one core for %s." %
|
||||
VMS_CPU_TYPE_FORMAT)
|
||||
APPLICATION_CPU_TYPE_FORMAT)
|
||||
return error_string
|
||||
|
||||
|
||||
@ -191,7 +191,7 @@ def restructure_host_cpu_data(host):
|
||||
cpufunction.socket_cores_number[s] = number_of_cores[f][s]
|
||||
else:
|
||||
if (f == PLATFORM_CPU_TYPE or (hasattr(host, 'subfunctions')
|
||||
and 'compute' in host.subfunctions)):
|
||||
and 'worker' in host.subfunctions)):
|
||||
if f != NONE_CPU_TYPE:
|
||||
host.core_assignment.append(cpufunction)
|
||||
for s in range(0, len(host.nodes)):
|
||||
|
@ -39,11 +39,11 @@ def _print_imemory_show(imemory):
|
||||
'vSwitch Huge Pages: Size (MiB)',
|
||||
' Total',
|
||||
' Available',
|
||||
'VM Pages (4K): Total',
|
||||
'VM Huge Pages (2M): Total',
|
||||
'Application Pages (4K): Total',
|
||||
'Application Huge Pages (2M): Total',
|
||||
' Total Pending',
|
||||
' Available',
|
||||
'VM Huge Pages (1G): Total',
|
||||
'Application Huge Pages (1G): Total',
|
||||
' Total Pending',
|
||||
' Available',
|
||||
'uuid', 'ihost_uuid', 'inode_uuid',
|
||||
@ -157,7 +157,7 @@ def do_host_memory_list(cc, args):
|
||||
metavar='<1G hugepages number>',
|
||||
help='The number of 1G vm huge pages for the numa node')
|
||||
def do_host_memory_modify(cc, args):
|
||||
"""Modify platform reserved and/or libvirt vm huge page memory attributes for compute nodes."""
|
||||
"""Modify platform reserved and/or application huge page memory attributes for worker nodes."""
|
||||
|
||||
rwfields = ['platform_reserved_mib',
|
||||
'vm_hugepages_nr_2M_pending',
|
||||
|
@ -177,7 +177,7 @@ def get_cpuprofile_data(cc, iprofile):
|
||||
iprofile.platform_cores = get_core_list_str(iprofile, icpu_utils.PLATFORM_CPU_TYPE)
|
||||
iprofile.vswitch_cores = get_core_list_str(iprofile, icpu_utils.VSWITCH_CPU_TYPE)
|
||||
iprofile.shared_cores = get_core_list_str(iprofile, icpu_utils.SHARED_CPU_TYPE)
|
||||
iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.VMS_CPU_TYPE)
|
||||
iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.APPLICATION_CPU_TYPE)
|
||||
|
||||
|
||||
def get_core_list_str(iprofile, function):
|
||||
@ -204,7 +204,7 @@ def do_cpuprofile_list(cc, args):
|
||||
profile.shared_cores = get_core_list_str(profile,
|
||||
icpu_utils.SHARED_CPU_TYPE)
|
||||
profile.vms_cores = get_core_list_str(profile,
|
||||
icpu_utils.VMS_CPU_TYPE)
|
||||
icpu_utils.APPLICATION_CPU_TYPE)
|
||||
|
||||
field_labels = ['uuid', 'name',
|
||||
'processors', 'phy cores per proc', 'hyperthreading',
|
||||
|
@ -94,7 +94,7 @@ def do_host_device_list(cc, args):
|
||||
metavar='<enabled status>',
|
||||
help='The enabled status of the device')
|
||||
def do_host_device_modify(cc, args):
|
||||
"""Modify device availability for compute nodes."""
|
||||
"""Modify device availability for worker nodes."""
|
||||
|
||||
rwfields = ['enabled',
|
||||
'name']
|
||||
|
@ -181,9 +181,9 @@ class AgentManager(service.PeriodicService):
|
||||
def _update_interface_irq_affinity(self, interface_list):
|
||||
cpus = {}
|
||||
platform_cpulist = '0'
|
||||
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
|
||||
with open('/etc/platform/worker_reserved.conf', 'r') as infile:
|
||||
for line in infile:
|
||||
if "COMPUTE_PLATFORM_CORES" in line:
|
||||
if "WORKER_PLATFORM_CORES" in line:
|
||||
val = line.split("=")
|
||||
cores = val[1].strip('\n')[1:-1]
|
||||
for n in cores.split():
|
||||
@ -863,7 +863,7 @@ class AgentManager(service.PeriodicService):
|
||||
LOG.exception("Sysinv Agent exception updating ilvg conductor.")
|
||||
pass
|
||||
|
||||
if constants.COMPUTE in self.subfunctions_list_get():
|
||||
if constants.WORKER in self.subfunctions_list_get():
|
||||
platform_interfaces = []
|
||||
# retrieve the mgmt and infra interfaces and associated numa nodes
|
||||
try:
|
||||
@ -932,8 +932,8 @@ class AgentManager(service.PeriodicService):
|
||||
return: Bool whether subfunctions configuration is completed.
|
||||
"""
|
||||
if (constants.CONTROLLER in subfunctions_list and
|
||||
constants.COMPUTE in subfunctions_list):
|
||||
if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
||||
constants.WORKER in subfunctions_list):
|
||||
if not os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
|
||||
self._subfunctions_configured = False
|
||||
return False
|
||||
|
||||
@ -1011,8 +1011,8 @@ class AgentManager(service.PeriodicService):
|
||||
if constants.CONTROLLER in subfunctions:
|
||||
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
|
||||
return False
|
||||
if constants.COMPUTE in subfunctions:
|
||||
if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
||||
if constants.WORKER in subfunctions:
|
||||
if not os.path.isfile(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
|
||||
return False
|
||||
if constants.STORAGE in subfunctions:
|
||||
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
|
||||
@ -1131,7 +1131,7 @@ class AgentManager(service.PeriodicService):
|
||||
|
||||
subfunctions_list = self.subfunctions_list_get()
|
||||
if ((constants.CONTROLLER in subfunctions_list) and
|
||||
(constants.COMPUTE in subfunctions_list)):
|
||||
(constants.WORKER in subfunctions_list)):
|
||||
if self.subfunctions_configured(subfunctions_list) and \
|
||||
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid):
|
||||
|
||||
@ -1499,7 +1499,7 @@ class AgentManager(service.PeriodicService):
|
||||
|
||||
for subfunction in self.subfunctions_list_get():
|
||||
# We need to find the subfunction that matches the personality
|
||||
# being requested. e.g. in AIO systems if we request a compute
|
||||
# being requested. e.g. in AIO systems if we request a worker
|
||||
# personality we should apply the manifest with that
|
||||
# personality
|
||||
if subfunction in personalities:
|
||||
|
@ -43,10 +43,10 @@ SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB)
|
||||
# Defines the minimum size of memory for a controller node in megabyte units
|
||||
CONTROLLER_MIN_MB = 6000
|
||||
|
||||
# Defines the minimum size of memory for a compute node in megabyte units
|
||||
# Defines the minimum size of memory for a worker node in megabyte units
|
||||
COMPUTE_MIN_MB = 1600
|
||||
|
||||
# Defines the minimum size of memory for a secondary compute node in megabyte
|
||||
# Defines the minimum size of memory for a secondary worker node in megabyte
|
||||
# units
|
||||
COMPUTE_MIN_NON_0_MB = 500
|
||||
|
||||
@ -300,19 +300,19 @@ class NodeOperator(object):
|
||||
|
||||
imemory = []
|
||||
|
||||
initial_compute_config_completed = \
|
||||
os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE)
|
||||
initial_worker_config_completed = \
|
||||
os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE)
|
||||
|
||||
# check if it is initial report before the huge pages are allocated
|
||||
initial_report = not initial_compute_config_completed
|
||||
initial_report = not initial_worker_config_completed
|
||||
|
||||
# do not send report if the initial compute config is completed and
|
||||
# compute config has not finished, i.e.during subsequent
|
||||
# do not send report if the initial worker config is completed and
|
||||
# worker config has not finished, i.e.during subsequent
|
||||
# reboot before the manifest allocates the huge pages
|
||||
compute_config_completed = \
|
||||
os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE)
|
||||
if (initial_compute_config_completed and
|
||||
not compute_config_completed):
|
||||
worker_config_completed = \
|
||||
os.path.exists(tsc.VOLATILE_WORKER_CONFIG_COMPLETE)
|
||||
if (initial_worker_config_completed and
|
||||
not worker_config_completed):
|
||||
return imemory
|
||||
|
||||
for node in range(self.num_nodes):
|
||||
@ -461,14 +461,14 @@ class NodeOperator(object):
|
||||
LOG.error("Failed to execute (%s) OS error (%d)", cmd,
|
||||
e.errno)
|
||||
|
||||
# need to multiply total_mb by 1024 to match compute_huge
|
||||
# need to multiply total_mb by 1024
|
||||
node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB
|
||||
|
||||
# Read base memory from compute_reserved.conf
|
||||
# Read base memory from worker_reserved.conf
|
||||
base_mem_mb = 0
|
||||
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
|
||||
with open('/etc/platform/worker_reserved.conf', 'r') as infile:
|
||||
for line in infile:
|
||||
if "COMPUTE_BASE_RESERVED" in line:
|
||||
if "WORKER_BASE_RESERVED" in line:
|
||||
val = line.split("=")
|
||||
base_reserves = val[1].strip('\n')[1:-1]
|
||||
for reserve in base_reserves.split():
|
||||
@ -585,19 +585,13 @@ class NodeOperator(object):
|
||||
return imemory
|
||||
|
||||
def inodes_get_imemory(self):
|
||||
'''Enumerate logical memory topology based on:
|
||||
if CONF.compute_hugepages:
|
||||
self._inode_get_memory_hugepages()
|
||||
else:
|
||||
self._inode_get_memory_nonhugepages()
|
||||
|
||||
'''Collect logical memory topology
|
||||
:param self
|
||||
:returns list of memory nodes and attributes
|
||||
'''
|
||||
imemory = []
|
||||
|
||||
# if CONF.compute_hugepages:
|
||||
if os.path.isfile("/etc/nova/compute_reserved.conf"):
|
||||
if os.path.isfile("/etc/platform/worker_reserved.conf"):
|
||||
imemory = self._inode_get_memory_hugepages()
|
||||
else:
|
||||
imemory = self._inode_get_memory_nonhugepages()
|
||||
|
@ -75,7 +75,7 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
||||
:returns: none ... uses asynchronous cast().
|
||||
"""
|
||||
# fanout / broadcast message to all inventory agents
|
||||
# to change systemname on all nodes ... standby controller and compute nodes
|
||||
# to change systemname on all nodes ... standby controller and worker nodes
|
||||
LOG.debug("AgentApi.configure_isystemname: fanout_cast: sending systemname to agent")
|
||||
retval = self.fanout_cast(context, self.make_msg('configure_isystemname',
|
||||
systemname=systemname))
|
||||
|
@ -481,8 +481,8 @@ def _check_host(ihost):
|
||||
elif ihost.administrative != constants.ADMIN_LOCKED and not \
|
||||
utils.is_host_simplex_controller(ihost):
|
||||
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
||||
if constants.COMPUTE not in ihost.subfunctions:
|
||||
raise wsme.exc.ClientSideError(_('Can only modify compute node cores.'))
|
||||
if constants.WORKER not in ihost.subfunctions:
|
||||
raise wsme.exc.ClientSideError(_('Can only modify worker node cores.'))
|
||||
|
||||
|
||||
def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None):
|
||||
@ -511,7 +511,7 @@ def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None):
|
||||
count *= 2
|
||||
counts[s][constants.VSWITCH_FUNCTION] = count
|
||||
# let the remaining values grow/shrink dynamically
|
||||
counts[s][constants.VM_FUNCTION] = 0
|
||||
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||
counts[s][constants.NO_FUNCTION] = 0
|
||||
return counts
|
||||
|
||||
@ -543,7 +543,7 @@ def _update_shared_cpu_counts(host, cpu, counts, capabilities=None):
|
||||
count *= 2
|
||||
counts[s][constants.SHARED_FUNCTION] = count
|
||||
# let the remaining values grow/shrink dynamically
|
||||
counts[s][constants.VM_FUNCTION] = 0
|
||||
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||
counts[s][constants.NO_FUNCTION] = 0
|
||||
return counts
|
||||
|
||||
@ -573,7 +573,7 @@ def _update_platform_cpu_counts(host, cpu, counts, capabilities=None):
|
||||
count *= 2
|
||||
counts[s][constants.PLATFORM_FUNCTION] = count
|
||||
# let the remaining values grow/shrink dynamically
|
||||
counts[s][constants.VM_FUNCTION] = 0
|
||||
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||
counts[s][constants.NO_FUNCTION] = 0
|
||||
return counts
|
||||
|
||||
|
@ -15,7 +15,7 @@ CORE_FUNCTIONS = [
|
||||
constants.PLATFORM_FUNCTION,
|
||||
constants.VSWITCH_FUNCTION,
|
||||
constants.SHARED_FUNCTION,
|
||||
constants.VM_FUNCTION,
|
||||
constants.APPLICATION_FUNCTION,
|
||||
constants.NO_FUNCTION
|
||||
]
|
||||
|
||||
@ -64,7 +64,7 @@ class CpuProfile(object):
|
||||
cur_processor.vswitch += 1
|
||||
elif cpu.allocated_function == constants.SHARED_FUNCTION:
|
||||
cur_processor.shared += 1
|
||||
elif cpu.allocated_function == constants.VM_FUNCTION:
|
||||
elif cpu.allocated_function == constants.APPLICATION_FUNCTION:
|
||||
cur_processor.vms += 1
|
||||
|
||||
self.number_of_cpu = len(self.processors)
|
||||
@ -108,12 +108,12 @@ class HostCpuProfile(CpuProfile):
|
||||
if platform_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.PLATFORM_FUNCTION
|
||||
elif constants.COMPUTE in self.subfunctions and vswitch_cores == 0:
|
||||
elif constants.WORKER in self.subfunctions and vswitch_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VSWITCH_FUNCTION
|
||||
elif constants.COMPUTE in self.subfunctions and vm_cores == 0:
|
||||
elif constants.WORKER in self.subfunctions and vm_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VM_FUNCTION
|
||||
constants.APPLICATION_FUNCTION
|
||||
return error_string
|
||||
|
||||
|
||||
@ -140,12 +140,12 @@ def check_profile_core_functions(personality, profile):
|
||||
if platform_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.PLATFORM_FUNCTION
|
||||
elif constants.COMPUTE in personality and vswitch_cores == 0:
|
||||
elif constants.WORKER in personality and vswitch_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VSWITCH_FUNCTION
|
||||
elif constants.COMPUTE in personality and vm_cores == 0:
|
||||
elif constants.WORKER in personality and vm_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VM_FUNCTION
|
||||
constants.APPLICATION_FUNCTION
|
||||
return error_string
|
||||
|
||||
|
||||
@ -162,26 +162,26 @@ def check_core_functions(personality, icpus):
|
||||
vswitch_cores += 1
|
||||
elif allocated_function == constants.SHARED_FUNCTION:
|
||||
shared_cores += 1
|
||||
elif allocated_function == constants.VM_FUNCTION:
|
||||
elif allocated_function == constants.APPLICATION_FUNCTION:
|
||||
vm_cores += 1
|
||||
|
||||
error_string = ""
|
||||
if platform_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.PLATFORM_FUNCTION
|
||||
elif constants.COMPUTE in personality and vswitch_cores == 0:
|
||||
elif constants.WORKER in personality and vswitch_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VSWITCH_FUNCTION
|
||||
elif constants.COMPUTE in personality and vm_cores == 0:
|
||||
elif constants.WORKER in personality and vm_cores == 0:
|
||||
error_string = "There must be at least one core for %s." % \
|
||||
constants.VM_FUNCTION
|
||||
constants.APPLICATION_FUNCTION
|
||||
return error_string
|
||||
|
||||
|
||||
def get_default_function(host):
|
||||
"""Return the default function to be assigned to cpus on this host"""
|
||||
if constants.COMPUTE in host.subfunctions:
|
||||
return constants.VM_FUNCTION
|
||||
if constants.WORKER in host.subfunctions:
|
||||
return constants.APPLICATION_FUNCTION
|
||||
return constants.PLATFORM_FUNCTION
|
||||
|
||||
|
||||
@ -265,14 +265,14 @@ def check_core_allocations(host, cpu_counts, func):
|
||||
total_shared_cores += shared_cores
|
||||
if func.lower() == constants.PLATFORM_FUNCTION.lower():
|
||||
if ((constants.CONTROLLER in host.subfunctions) and
|
||||
(constants.COMPUTE in host.subfunctions)):
|
||||
(constants.WORKER in host.subfunctions)):
|
||||
if total_platform_cores < 2:
|
||||
return "%s must have at least two cores." % \
|
||||
constants.PLATFORM_FUNCTION
|
||||
elif total_platform_cores == 0:
|