Change compute node to worker node personality
This update replaced the compute personality & subfunction to worker, and updated internal and customer visible references. In addition, the compute-huge package has been renamed to worker-utils as it contains various scripts/services that used to affine running tasks or interface IRQ to specific CPUs. The worker_reserved.conf is now installed to /etc/platform. The cpu function 'VM' has also been renamed to 'Application'. Tests Performed: Non-containerized deployment AIO-SX: Sanity and Nightly automated test suite AIO-DX: Sanity and Nightly automated test suite 2+2 System: Sanity and Nightly automated test suite 2+2 System: Horizon Patch Orchestration Kubernetes deployment: AIO-SX: Create, delete, reboot and rebuild instances 2+2+2 System: worker nodes are unlock enable and no alarms Story: 2004022 Task: 27013 Change-Id: I0e0be6b3a6f25f7fb8edf64ea4326854513aa396 Signed-off-by: Tao Liu <tao.liu@windriver.com>
This commit is contained in:
parent
b10b557afe
commit
6256b0d106
@ -2,13 +2,13 @@
|
|||||||
# If these have dependencies, they will be pulled in automatically
|
# If these have dependencies, they will be pulled in automatically
|
||||||
#
|
#
|
||||||
|
|
||||||
# compute-huge
|
# worker-utils
|
||||||
compute-huge
|
worker-utils
|
||||||
|
|
||||||
# computeconfig
|
# workerconfig
|
||||||
computeconfig
|
workerconfig
|
||||||
computeconfig-standalone
|
workerconfig-standalone
|
||||||
computeconfig-subfunction
|
workerconfig-subfunction
|
||||||
|
|
||||||
# configutilities
|
# configutilities
|
||||||
configutilities
|
configutilities
|
||||||
@ -30,7 +30,7 @@ sysinv
|
|||||||
|
|
||||||
# config-gate
|
# config-gate
|
||||||
config-gate
|
config-gate
|
||||||
config-gate-compute
|
config-gate-worker
|
||||||
|
|
||||||
# puppet-manifests
|
# puppet-manifests
|
||||||
puppet-manifests
|
puppet-manifests
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
compute-huge
|
worker-utils
|
||||||
computeconfig
|
workerconfig
|
||||||
configutilities
|
configutilities
|
||||||
controllerconfig
|
controllerconfig
|
||||||
storageconfig
|
storageconfig
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
Metadata-Version: 1.1
|
|
||||||
Name: compute-huge
|
|
||||||
Version: 1.0
|
|
||||||
Summary: Initial compute node hugepages and reserved cpus configuration
|
|
||||||
Home-page:
|
|
||||||
Author: Windriver
|
|
||||||
Author-email: info@windriver.com
|
|
||||||
License: Apache-2.0
|
|
||||||
|
|
||||||
Description: Initial compute node hugepages and reserved cpus configuration
|
|
||||||
|
|
||||||
|
|
||||||
Platform: UNKNOWN
|
|
@ -1,3 +0,0 @@
|
|||||||
SRC_DIR="compute-huge"
|
|
||||||
COPY_LIST="$SRC_DIR/LICENSE"
|
|
||||||
TIS_PATCH_VER=10
|
|
@ -1,24 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Copyright (c) 2014,2016 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
#
|
|
||||||
# compute-huge.sh "goenabled" check.
|
|
||||||
#
|
|
||||||
# If a problem was detected during configuration of huge pages and compute
|
|
||||||
# resources then the board is not allowed to enable.
|
|
||||||
#
|
|
||||||
COMPUTE_HUGE_GOENABLED="/var/run/compute_huge_goenabled"
|
|
||||||
|
|
||||||
source "/etc/init.d/log_functions.sh"
|
|
||||||
source "/usr/bin/tsconfig"
|
|
||||||
|
|
||||||
if [ -e ${VOLATILE_COMPUTE_CONFIG_COMPLETE} -a ! -f ${COMPUTE_HUGE_GOENABLED} ]; then
|
|
||||||
log_error "Compute manifest CPU configuration check failed. Failing goenabled check."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,13 +0,0 @@
|
|||||||
Metadata-Version: 1.1
|
|
||||||
Name: computeconfig
|
|
||||||
Version: 1.0
|
|
||||||
Summary: Initial compute node configuration
|
|
||||||
Home-page:
|
|
||||||
Author: Windriver
|
|
||||||
Author-email: info@windriver.com
|
|
||||||
License: Apache-2.0
|
|
||||||
|
|
||||||
Description: Initial compute node configuration
|
|
||||||
|
|
||||||
|
|
||||||
Platform: UNKNOWN
|
|
@ -1,2 +0,0 @@
|
|||||||
SRC_DIR="computeconfig"
|
|
||||||
TIS_PATCH_VER=11
|
|
@ -1,85 +0,0 @@
|
|||||||
Summary: computeconfig
|
|
||||||
Name: computeconfig
|
|
||||||
Version: 1.0
|
|
||||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
|
||||||
License: Apache-2.0
|
|
||||||
Group: base
|
|
||||||
Packager: Wind River <info@windriver.com>
|
|
||||||
URL: unknown
|
|
||||||
Source0: %{name}-%{version}.tar.gz
|
|
||||||
|
|
||||||
%define debug_package %{nil}
|
|
||||||
|
|
||||||
Requires: systemd
|
|
||||||
|
|
||||||
%description
|
|
||||||
Initial compute node configuration
|
|
||||||
|
|
||||||
%package -n computeconfig-standalone
|
|
||||||
Summary: computeconfig
|
|
||||||
Group: base
|
|
||||||
|
|
||||||
%description -n computeconfig-standalone
|
|
||||||
Initial compute node configuration
|
|
||||||
|
|
||||||
%package -n computeconfig-subfunction
|
|
||||||
Summary: computeconfig
|
|
||||||
Group: base
|
|
||||||
|
|
||||||
%description -n computeconfig-subfunction
|
|
||||||
Initial compute node configuration
|
|
||||||
|
|
||||||
%define initddir /etc/init.d/
|
|
||||||
%define goenableddir /etc/goenabled.d/
|
|
||||||
%define systemddir /etc/systemd/system/
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%setup
|
|
||||||
|
|
||||||
%build
|
|
||||||
|
|
||||||
%install
|
|
||||||
make install INITDDIR=%{buildroot}%{initddir} GOENABLEDDIR=%{buildroot}%{goenableddir} SYSTEMDDIR=%{buildroot}%{systemddir}
|
|
||||||
|
|
||||||
%post -n computeconfig-standalone
|
|
||||||
if [ ! -e $D%{systemddir}/computeconfig.service ]; then
|
|
||||||
cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
|
||||||
else
|
|
||||||
cmp -s $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
rm -f $D%{systemddir}/computeconfig.service
|
|
||||||
cp $D%{systemddir}/config/computeconfig-standalone.service $D%{systemddir}/computeconfig.service
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
systemctl enable computeconfig.service
|
|
||||||
|
|
||||||
|
|
||||||
%post -n computeconfig-subfunction
|
|
||||||
if [ ! -e $D%{systemddir}/computeconfig.service ]; then
|
|
||||||
cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
|
||||||
else
|
|
||||||
cmp -s $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
rm -f $D%{systemddir}/computeconfig.service
|
|
||||||
cp $D%{systemddir}/config/computeconfig-combined.service $D%{systemddir}/computeconfig.service
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
systemctl enable computeconfig.service
|
|
||||||
|
|
||||||
%clean
|
|
||||||
|
|
||||||
%files
|
|
||||||
%defattr(-,root,root,-)
|
|
||||||
%doc LICENSE
|
|
||||||
%{initddir}/*
|
|
||||||
|
|
||||||
%files -n computeconfig-standalone
|
|
||||||
%defattr(-,root,root,-)
|
|
||||||
%dir %{systemddir}/config
|
|
||||||
%{systemddir}/config/computeconfig-standalone.service
|
|
||||||
%{goenableddir}/*
|
|
||||||
|
|
||||||
%files -n computeconfig-subfunction
|
|
||||||
%defattr(-,root,root,-)
|
|
||||||
%dir %{systemddir}/config
|
|
||||||
%{systemddir}/config/computeconfig-combined.service
|
|
@ -15,12 +15,12 @@ Requires: systemd
|
|||||||
%description
|
%description
|
||||||
Startup configuration gate
|
Startup configuration gate
|
||||||
|
|
||||||
%package -n %{name}-compute
|
%package -n %{name}-worker
|
||||||
Summary: config-gate-compute
|
Summary: config-gate-worker
|
||||||
Group: base
|
Group: base
|
||||||
|
|
||||||
%description -n %{name}-compute
|
%description -n %{name}-worker
|
||||||
Startup compute configuration gate
|
Startup worker configuration gate
|
||||||
|
|
||||||
%define local_etc_systemd /etc/systemd/system/
|
%define local_etc_systemd /etc/systemd/system/
|
||||||
|
|
||||||
@ -35,8 +35,8 @@ make install SBINDIR=%{buildroot}%{_sbindir} SYSTEMDDIR=%{buildroot}%{local_etc_
|
|||||||
%post
|
%post
|
||||||
systemctl enable config.service
|
systemctl enable config.service
|
||||||
|
|
||||||
%post -n %{name}-compute
|
%post -n %{name}-worker
|
||||||
systemctl enable compute-config-gate.service
|
systemctl enable worker-config-gate.service
|
||||||
|
|
||||||
%clean
|
%clean
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ systemctl enable compute-config-gate.service
|
|||||||
%{_sbindir}/wait_for_config_init.sh
|
%{_sbindir}/wait_for_config_init.sh
|
||||||
%{local_etc_systemd}/config.service
|
%{local_etc_systemd}/config.service
|
||||||
|
|
||||||
%files -n %{name}-compute
|
%files -n %{name}-worker
|
||||||
%defattr(-,root,root,-)
|
%defattr(-,root,root,-)
|
||||||
%{_sbindir}/wait_for_compute_config_init.sh
|
%{_sbindir}/wait_for_worker_config_init.sh
|
||||||
%{local_etc_systemd}/compute-config-gate.service
|
%{local_etc_systemd}/worker-config-gate.service
|
||||||
|
@ -9,6 +9,6 @@ install:
|
|||||||
install -d -m 755 $(SBINDIR)
|
install -d -m 755 $(SBINDIR)
|
||||||
install -d -m 755 $(SYSTEMDDIR)
|
install -d -m 755 $(SYSTEMDDIR)
|
||||||
install -p -D -m 555 wait_for_config_init.sh $(SBINDIR)/wait_for_config_init.sh
|
install -p -D -m 555 wait_for_config_init.sh $(SBINDIR)/wait_for_config_init.sh
|
||||||
install -p -D -m 555 wait_for_compute_config_init.sh $(SBINDIR)/wait_for_compute_config_init.sh
|
install -p -D -m 555 wait_for_worker_config_init.sh $(SBINDIR)/wait_for_worker_config_init.sh
|
||||||
install -p -D -m 444 config.service $(SYSTEMDDIR)/config.service
|
install -p -D -m 444 config.service $(SYSTEMDDIR)/config.service
|
||||||
install -p -D -m 444 compute-config-gate.service $(SYSTEMDDIR)/compute-config-gate.service
|
install -p -D -m 444 worker-config-gate.service $(SYSTEMDDIR)/worker-config-gate.service
|
||||||
|
@ -14,8 +14,8 @@ case $nodetype in
|
|||||||
controller)
|
controller)
|
||||||
SERVICE=controllerconfig.service
|
SERVICE=controllerconfig.service
|
||||||
;;
|
;;
|
||||||
compute)
|
worker)
|
||||||
SERVICE=computeconfig.service
|
SERVICE=workerconfig.service
|
||||||
;;
|
;;
|
||||||
storage)
|
storage)
|
||||||
SERVICE=storageconfig.service
|
SERVICE=storageconfig.service
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
# Wait for compute config service
|
# Wait for worker config service
|
||||||
|
|
||||||
SERVICE=computeconfig.service
|
SERVICE=workerconfig.service
|
||||||
|
|
||||||
while :; do
|
while :; do
|
||||||
systemctl status $SERVICE |grep -q running
|
systemctl status $SERVICE |grep -q running
|
@ -1,11 +1,11 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=TIS compute config gate
|
Description=STX worker config gate
|
||||||
After=sw-patch.service computeconfig.service
|
After=sw-patch.service workerconfig.service
|
||||||
Before=serial-getty@ttyS0.service getty@tty1.service
|
Before=serial-getty@ttyS0.service getty@tty1.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStart=/usr/sbin/wait_for_compute_config_init.sh
|
ExecStart=/usr/sbin/wait_for_worker_config_init.sh
|
||||||
ExecStop=
|
ExecStop=
|
||||||
ExecReload=
|
ExecReload=
|
||||||
RemainAfterExit=yes
|
RemainAfterExit=yes
|
@ -1202,8 +1202,8 @@ def overwrite_iscsi_target_config():
|
|||||||
def restore_complete():
|
def restore_complete():
|
||||||
"""
|
"""
|
||||||
Restore proper ISCSI configuration file after cinder restore.
|
Restore proper ISCSI configuration file after cinder restore.
|
||||||
Enable compute functionality for AIO system.
|
Enable worker functionality for AIO system.
|
||||||
:return: True if compute-config-complete is executed
|
:return: True if worker-config-complete is executed
|
||||||
"""
|
"""
|
||||||
if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD:
|
if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD:
|
||||||
if not os.path.isfile(restore_system_ready):
|
if not os.path.isfile(restore_system_ready):
|
||||||
@ -1223,21 +1223,21 @@ def restore_complete():
|
|||||||
# we use use that.
|
# we use use that.
|
||||||
overwrite_iscsi_target_config()
|
overwrite_iscsi_target_config()
|
||||||
|
|
||||||
print("\nApplying compute manifests for %s. " %
|
print("\nApplying worker manifests for %s. " %
|
||||||
(utils.get_controller_hostname()))
|
(utils.get_controller_hostname()))
|
||||||
print("Node will reboot on completion.")
|
print("Node will reboot on completion.")
|
||||||
|
|
||||||
sysinv.do_compute_config_complete(utils.get_controller_hostname())
|
sysinv.do_worker_config_complete(utils.get_controller_hostname())
|
||||||
|
|
||||||
# show in-progress log on console every 30 seconds
|
# show in-progress log on console every 30 seconds
|
||||||
# until self reboot or timeout
|
# until self reboot or timeout
|
||||||
os.remove(restore_system_ready)
|
os.remove(restore_system_ready)
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
for i in range(1, 10):
|
for i in range(1, 10):
|
||||||
print("compute manifest apply in progress ... ")
|
print("worker manifest apply in progress ... ")
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
|
|
||||||
raise RestoreFail("Timeout running compute manifests, "
|
raise RestoreFail("Timeout running worker manifests, "
|
||||||
"reboot did not occur")
|
"reboot did not occur")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -1655,7 +1655,7 @@ def restore_system(backup_file, include_storage_reinstall=False, clone=False):
|
|||||||
print(textwrap.fill(
|
print(textwrap.fill(
|
||||||
"Failed to lock at least one node. " +
|
"Failed to lock at least one node. " +
|
||||||
"Please lock the unlocked controller-1 or " +
|
"Please lock the unlocked controller-1 or " +
|
||||||
"compute nodes manually.", 80
|
"worker nodes manually.", 80
|
||||||
))
|
))
|
||||||
|
|
||||||
if not clone:
|
if not clone:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2014-2017 Wind River Systems, Inc.
|
# Copyright (c) 2014-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -23,7 +23,7 @@ API_VERSION = 1
|
|||||||
HOST_PERSONALITY_NOT_SET = ""
|
HOST_PERSONALITY_NOT_SET = ""
|
||||||
HOST_PERSONALITY_UNKNOWN = "unknown"
|
HOST_PERSONALITY_UNKNOWN = "unknown"
|
||||||
HOST_PERSONALITY_CONTROLLER = "controller"
|
HOST_PERSONALITY_CONTROLLER = "controller"
|
||||||
HOST_PERSONALITY_COMPUTE = "compute"
|
HOST_PERSONALITY_WORKER = "worker"
|
||||||
HOST_PERSONALITY_STORAGE = "storage"
|
HOST_PERSONALITY_STORAGE = "storage"
|
||||||
|
|
||||||
# Host Administrative State Constants
|
# Host Administrative State Constants
|
||||||
@ -87,8 +87,8 @@ class Host(object):
|
|||||||
# Set personality
|
# Set personality
|
||||||
if host_data['personality'] == "controller":
|
if host_data['personality'] == "controller":
|
||||||
self.personality = HOST_PERSONALITY_CONTROLLER
|
self.personality = HOST_PERSONALITY_CONTROLLER
|
||||||
elif host_data['personality'] == "compute":
|
elif host_data['personality'] == "worker":
|
||||||
self.personality = HOST_PERSONALITY_COMPUTE
|
self.personality = HOST_PERSONALITY_WORKER
|
||||||
elif host_data['personality'] == "storage":
|
elif host_data['personality'] == "storage":
|
||||||
self.personality = HOST_PERSONALITY_STORAGE
|
self.personality = HOST_PERSONALITY_STORAGE
|
||||||
else:
|
else:
|
||||||
@ -334,8 +334,8 @@ def get_hosts(admin_token, region_name, personality=None,
|
|||||||
personality == HOST_PERSONALITY_CONTROLLER):
|
personality == HOST_PERSONALITY_CONTROLLER):
|
||||||
host_list.append(Host(host['hostname'], host))
|
host_list.append(Host(host['hostname'], host))
|
||||||
|
|
||||||
elif (host['personality'] == "compute" and
|
elif (host['personality'] == "worker" and
|
||||||
personality == HOST_PERSONALITY_COMPUTE):
|
personality == HOST_PERSONALITY_WORKER):
|
||||||
host_list.append(Host(host['hostname'], host))
|
host_list.append(Host(host['hostname'], host))
|
||||||
|
|
||||||
elif (host['personality'] == "storage" and
|
elif (host['personality'] == "storage" and
|
||||||
@ -537,24 +537,24 @@ def get_host_data(hostname):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def do_compute_config_complete(hostname):
|
def do_worker_config_complete(hostname):
|
||||||
""" enable compute functionality """
|
""" enable worker functionality """
|
||||||
try:
|
try:
|
||||||
with openstack.OpenStack() as client:
|
with openstack.OpenStack() as client:
|
||||||
hosts = get_hosts(client.admin_token,
|
hosts = get_hosts(client.admin_token,
|
||||||
client.conf['region_name'])
|
client.conf['region_name'])
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if hostname == host.name:
|
if hostname == host.name:
|
||||||
# Create/apply compute manifests
|
# Create/apply worker manifests
|
||||||
values = {
|
values = {
|
||||||
'action': "subfunction_config"
|
'action': "subfunction_config"
|
||||||
}
|
}
|
||||||
patch = dict_to_patch(values)
|
patch = dict_to_patch(values)
|
||||||
LOG.info("Applying compute manifests: {} [{}]"
|
LOG.info("Applying worker manifests: {} [{}]"
|
||||||
.format(host, patch))
|
.format(host, patch))
|
||||||
client.sysinv.ihost.update(host.uuid, patch)
|
client.sysinv.ihost.update(host.uuid, patch)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception("compute_config_complete failed")
|
LOG.exception("worker_config_complete failed")
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ def lag_mode_to_str(lag_mode):
|
|||||||
|
|
||||||
|
|
||||||
def is_combined_load():
|
def is_combined_load():
|
||||||
return 'compute' in tsconfig.subfunctions
|
return 'worker' in tsconfig.subfunctions
|
||||||
|
|
||||||
|
|
||||||
def get_system_type():
|
def get_system_type():
|
||||||
|
@ -184,27 +184,27 @@ def update_db(archive_dir, backup_name):
|
|||||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
def config_compute():
|
def config_worker():
|
||||||
"""
|
"""
|
||||||
Enable compute functionality for AIO system.
|
Enable worker functionality for AIO system.
|
||||||
:return: True if compute-config-complete is executed
|
:return: True if worker-config-complete is executed
|
||||||
"""
|
"""
|
||||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||||
console_log("Applying compute manifests for {}. "
|
console_log("Applying worker manifests for {}. "
|
||||||
"Node will reboot on completion."
|
"Node will reboot on completion."
|
||||||
.format(utils.get_controller_hostname()))
|
.format(utils.get_controller_hostname()))
|
||||||
sysinv.do_compute_config_complete(utils.get_controller_hostname())
|
sysinv.do_worker_config_complete(utils.get_controller_hostname())
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
# compute-config-complete has no logs to console. So, wait
|
# worker-config-complete has no logs to console. So, wait
|
||||||
# for some time before showing the login prompt.
|
# for some time before showing the login prompt.
|
||||||
for i in range(1, 10):
|
for i in range(1, 10):
|
||||||
console_log("compute-config in progress..")
|
console_log("worker-config in progress..")
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
console_log("Timed out on do_compute_config_complete")
|
console_log("Timed out on do_worker_config_complete")
|
||||||
raise CloneFail("Timed out on do_compute_config_complete")
|
raise CloneFail("Timed out on do_worker_config_complete")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
# compute_config_complete is not needed.
|
# worker_config_complete is not needed.
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@ -302,8 +302,8 @@ if os.path.exists(INI_FILE):
|
|||||||
console_log("Images archive installed from [%s]" % clone_name)
|
console_log("Images archive installed from [%s]" % clone_name)
|
||||||
finalize_install()
|
finalize_install()
|
||||||
set_result(clone.OK)
|
set_result(clone.OK)
|
||||||
if not config_compute():
|
if not config_worker():
|
||||||
# do cleanup if compute_config_complete is not required
|
# do cleanup if worker_config_complete is not required
|
||||||
cleanup()
|
cleanup()
|
||||||
elif last_result == clone.OK:
|
elif last_result == clone.OK:
|
||||||
# Installation completed successfully before last reboot
|
# Installation completed successfully before last reboot
|
||||||
|
@ -53,11 +53,11 @@ mkdir -p ${PUPPET_TMP}/hieradata
|
|||||||
cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml
|
cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml
|
||||||
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
|
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
|
||||||
|
|
||||||
# When the compute node is first booted and goes online, sysinv-agent reports
|
# When the worker node is first booted and goes online, sysinv-agent reports
|
||||||
# host CPU inventory which triggers the first runtime manifest apply that updates
|
# host CPU inventory which triggers the first runtime manifest apply that updates
|
||||||
# the grub. At this time, copying the host file failed due to a timing issue that
|
# the grub. At this time, copying the host file failed due to a timing issue that
|
||||||
# has not yet been fully understood. Subsequent retries worked.
|
# has not yet been fully understood. Subsequent retries worked.
|
||||||
if [ "${PERSONALITY}" = "compute" ]; then
|
if [ "${PERSONALITY}" = "worker" ]; then
|
||||||
n=0
|
n=0
|
||||||
until [ $n -ge 3 ]; do
|
until [ $n -ge 3 ]; do
|
||||||
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break
|
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break
|
||||||
|
@ -46,7 +46,7 @@ CONFIG_ADMIN_PROJECT_DOMAIN_NAME: Default
|
|||||||
|
|
||||||
|
|
||||||
# mtce
|
# mtce
|
||||||
platform::mtce::agent::params::compute_boot_timeout: 720
|
platform::mtce::agent::params::worker_boot_timeout: 720
|
||||||
platform::mtce::agent::params::controller_boot_timeout: 1200
|
platform::mtce::agent::params::controller_boot_timeout: 1200
|
||||||
platform::mtce::agent::params::heartbeat_period: 100
|
platform::mtce::agent::params::heartbeat_period: 100
|
||||||
platform::mtce::agent::params::heartbeat_failure_action: 'fail'
|
platform::mtce::agent::params::heartbeat_failure_action: 'fail'
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# compute specific configuration data
|
# worker specific configuration data
|
||||||
---
|
---
|
||||||
|
|
||||||
# vswitch
|
# vswitch
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# puppet manifest for compute hosts
|
# puppet manifest for worker nodes
|
||||||
#
|
#
|
||||||
|
|
||||||
Exec {
|
Exec {
|
||||||
@ -48,7 +48,7 @@ include ::openstack::nova::placement
|
|||||||
include ::openstack::ceilometer
|
include ::openstack::ceilometer
|
||||||
include ::openstack::ceilometer::polling
|
include ::openstack::ceilometer::polling
|
||||||
|
|
||||||
class { '::platform::config::compute::post':
|
class { '::platform::config::worker::post':
|
||||||
stage => post,
|
stage => post,
|
||||||
}
|
}
|
||||||
|
|
@ -227,7 +227,7 @@ class openstack::ceilometer::polling (
|
|||||||
$central_namespace = false
|
$central_namespace = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if (str2bool($::disable_compute_services) or
|
if (str2bool($::disable_worker_services) or
|
||||||
$::platform::kubernetes::params::enabled) {
|
$::platform::kubernetes::params::enabled) {
|
||||||
$agent_enable = false
|
$agent_enable = false
|
||||||
$compute_namespace = false
|
$compute_namespace = false
|
||||||
@ -238,7 +238,7 @@ class openstack::ceilometer::polling (
|
|||||||
} else {
|
} else {
|
||||||
$agent_enable = true
|
$agent_enable = true
|
||||||
|
|
||||||
if str2bool($::is_compute_subfunction) {
|
if str2bool($::is_worker_subfunction) {
|
||||||
$pmon_target = "/etc/ceilometer/ceilometer-polling-compute.conf.pmon"
|
$pmon_target = "/etc/ceilometer/ceilometer-polling-compute.conf.pmon"
|
||||||
$compute_namespace = true
|
$compute_namespace = true
|
||||||
} else {
|
} else {
|
||||||
|
@ -195,7 +195,7 @@ class openstack::neutron::agents
|
|||||||
|
|
||||||
include ::platform::kubernetes::params
|
include ::platform::kubernetes::params
|
||||||
|
|
||||||
if (str2bool($::disable_compute_services) or
|
if (str2bool($::disable_worker_services) or
|
||||||
$::platform::kubernetes::params::enabled) {
|
$::platform::kubernetes::params::enabled) {
|
||||||
$pmon_ensure = absent
|
$pmon_ensure = absent
|
||||||
|
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
# Returns true if compute services should be disabled
|
|
||||||
|
|
||||||
Facter.add("disable_compute_services") do
|
|
||||||
setcode do
|
|
||||||
File.exist?('/var/run/.disable_compute_services')
|
|
||||||
end
|
|
||||||
end
|
|
@ -0,0 +1,7 @@
|
|||||||
|
# Returns true if worker services should be disabled
|
||||||
|
|
||||||
|
Facter.add("disable_worker_services") do
|
||||||
|
setcode do
|
||||||
|
File.exist?('/var/run/.disable_worker_services')
|
||||||
|
end
|
||||||
|
end
|
@ -1,19 +1,19 @@
|
|||||||
class platform::compute::params (
|
class platform::compute::params (
|
||||||
$compute_cpu_list = '',
|
$worker_cpu_list = '',
|
||||||
$platform_cpu_list = '',
|
$platform_cpu_list = '',
|
||||||
$reserved_vswitch_cores = '',
|
$reserved_vswitch_cores = '',
|
||||||
$reserved_platform_cores = '',
|
$reserved_platform_cores = '',
|
||||||
$compute_base_reserved = '',
|
$worker_base_reserved = '',
|
||||||
$compute_vswitch_reserved = '',
|
$compute_vswitch_reserved = '',
|
||||||
) { }
|
) { }
|
||||||
|
|
||||||
class platform::compute::config
|
class platform::compute::config
|
||||||
inherits ::platform::compute::params {
|
inherits ::platform::compute::params {
|
||||||
|
|
||||||
file { "/etc/nova/compute_reserved.conf":
|
file { "/etc/platform/worker_reserved.conf":
|
||||||
ensure => 'present',
|
ensure => 'present',
|
||||||
replace => true,
|
replace => true,
|
||||||
content => template('platform/compute_reserved.conf.erb')
|
content => template('platform/worker_reserved.conf.erb')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ class platform::compute::grub::audit
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file { "/var/run/compute_huge_goenabled":
|
file { "/var/run/worker_goenabled":
|
||||||
ensure => $ensure,
|
ensure => $ensure,
|
||||||
owner => 'root',
|
owner => 'root',
|
||||||
group => 'root',
|
group => 'root',
|
||||||
@ -276,7 +276,7 @@ class platform::compute::pmqos (
|
|||||||
$hight_wakeup_cpus = '',
|
$hight_wakeup_cpus = '',
|
||||||
) {
|
) {
|
||||||
|
|
||||||
if str2bool($::is_compute_subfunction) and str2bool($::is_lowlatency_subfunction) {
|
if str2bool($::is_worker_subfunction) and str2bool($::is_lowlatency_subfunction) {
|
||||||
|
|
||||||
$script = "/usr/bin/set-cpu-wakeup-latency.sh"
|
$script = "/usr/bin/set-cpu-wakeup-latency.sh"
|
||||||
|
|
||||||
|
@ -289,13 +289,13 @@ class platform::config::controller::post
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class platform::config::compute::post
|
class platform::config::worker::post
|
||||||
{
|
{
|
||||||
file { "/etc/platform/.initial_compute_config_complete":
|
file { "/etc/platform/.initial_worker_config_complete":
|
||||||
ensure => present,
|
ensure => present,
|
||||||
}
|
}
|
||||||
|
|
||||||
file { "/var/run/.compute_config_complete":
|
file { "/var/run/.worker_config_complete":
|
||||||
ensure => present,
|
ensure => present,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,7 +285,7 @@ class platform::kubernetes::worker
|
|||||||
}
|
}
|
||||||
|
|
||||||
if $enabled {
|
if $enabled {
|
||||||
file { "/var/run/.disable_compute_services":
|
file { "/var/run/.disable_worker_services":
|
||||||
ensure => file,
|
ensure => file,
|
||||||
replace => no,
|
replace => no,
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ class platform::mtce::params (
|
|||||||
$auth_user_domain = undef,
|
$auth_user_domain = undef,
|
||||||
$auth_project_domain = undef,
|
$auth_project_domain = undef,
|
||||||
$auth_region = undef,
|
$auth_region = undef,
|
||||||
$compute_boot_timeout = undef,
|
$worker_boot_timeout = undef,
|
||||||
$controller_boot_timeout = undef,
|
$controller_boot_timeout = undef,
|
||||||
$heartbeat_degrade_threshold = undef,
|
$heartbeat_degrade_threshold = undef,
|
||||||
$heartbeat_failure_threshold = undef,
|
$heartbeat_failure_threshold = undef,
|
||||||
|
@ -67,7 +67,7 @@ class platform::postgresql::server (
|
|||||||
# work_mem 512 MB since some ceilometer queries entail extensive
|
# work_mem 512 MB since some ceilometer queries entail extensive
|
||||||
# sorting as well as hash joins and hash based aggregation.
|
# sorting as well as hash joins and hash based aggregation.
|
||||||
# checkpoint_segments increased to reduce frequency of checkpoints
|
# checkpoint_segments increased to reduce frequency of checkpoints
|
||||||
if str2bool($::is_compute_subfunction) or str2bool($::is_virtual) {
|
if str2bool($::is_worker_subfunction) or str2bool($::is_virtual) {
|
||||||
# AIO or virtual box
|
# AIO or virtual box
|
||||||
# 700 connections needs about 80MB shared buffer
|
# 700 connections needs about 80MB shared buffer
|
||||||
# Leave work_mem as the default for vbox and AIO
|
# Leave work_mem as the default for vbox and AIO
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#
|
#
|
||||||
# - This file is managed by Puppet. DO NOT EDIT.
|
# - This file is managed by Puppet. DO NOT EDIT.
|
||||||
################################################################################
|
################################################################################
|
||||||
# COMPUTE Node configuration parameters for reserved memory and physical cores
|
# WORKER Node configuration parameters for reserved memory and physical cores
|
||||||
# used by Base software and VSWITCH. These are resources that libvirt cannot use.
|
# used by Base software and VSWITCH. These are resources that libvirt cannot use.
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -16,7 +16,7 @@
|
|||||||
# validity against the actual number of logical CPU instances in the system.
|
# validity against the actual number of logical CPU instances in the system.
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
COMPUTE_CPU_LIST=<%= @compute_cpu_list %>
|
WORKER_CPU_LIST=<%= @worker_cpu_list %>
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
@ -32,10 +32,10 @@ PLATFORM_CPU_LIST=<%= @platform_cpu_list %>
|
|||||||
#
|
#
|
||||||
# Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core
|
# Example: To reserve 1500MB and 1 core on NUMA node0, and 1500MB and 1 core
|
||||||
# on NUMA node1, the variable must be specified as follows.
|
# on NUMA node1, the variable must be specified as follows.
|
||||||
# COMPUTE_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1")
|
# WORKER_BASE_MEMORY=("node0:1500MB:1" "node1:1500MB:1")
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
COMPUTE_BASE_RESERVED=<%= @compute_base_reserved %>
|
WORKER_BASE_RESERVED=<%= @worker_base_reserved %>
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
#
|
#
|
||||||
@ -68,7 +68,7 @@ COMPUTE_VSWITCH_CORES=<%= @reserved_vswitch_cores %>
|
|||||||
#
|
#
|
||||||
# Example: To reserve 1 core on NUMA node0, the variable must be specified
|
# Example: To reserve 1 core on NUMA node0, the variable must be specified
|
||||||
# as follows.
|
# as follows.
|
||||||
# COMPUTE_PLATFORM_CORES=("node0:0")
|
# WORKER_PLATFORM_CORES=("node0:0")
|
||||||
#
|
#
|
||||||
################################################################################
|
################################################################################
|
||||||
COMPUTE_PLATFORM_CORES=<%= @reserved_platform_cores %>
|
WORKER_PLATFORM_CORES=<%= @reserved_platform_cores %>
|
@ -45,7 +45,7 @@ heartbeat_failure_action = <%= @heartbeat_failure_action %>
|
|||||||
mnfa_threshold = <%= @mnfa_threshold %>
|
mnfa_threshold = <%= @mnfa_threshold %>
|
||||||
|
|
||||||
[timeouts]
|
[timeouts]
|
||||||
compute_boot_timeout = <%= @compute_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive
|
worker_boot_timeout = <%= @worker_boot_timeout %> ; The max time (seconds) that Mtce waits for the mtcAlive
|
||||||
controller_boot_timeout = <%= @controller_boot_timeout %> ; message after which it will time out and fail the host.
|
controller_boot_timeout = <%= @controller_boot_timeout %> ; message after which it will time out and fail the host.
|
||||||
|
|
||||||
; Multi-Node Failure Avoidance (MNFA) Lifecycle Timer.
|
; Multi-Node Failure Avoidance (MNFA) Lifecycle Timer.
|
||||||
|
@ -45,7 +45,7 @@ SB_STATE_CONFIGURING = 'configuring'
|
|||||||
SB_TASK_NONE = None
|
SB_TASK_NONE = None
|
||||||
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
|
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
|
||||||
SB_TASK_PROVISION_STORAGE = 'provision-storage'
|
SB_TASK_PROVISION_STORAGE = 'provision-storage'
|
||||||
SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute'
|
SB_TASK_RECONFIG_WORKER = 'reconfig-worker'
|
||||||
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
|
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
|
||||||
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'
|
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ def do_host_upgrade_list(cc, args):
|
|||||||
help='Hostname of the host')
|
help='Hostname of the host')
|
||||||
@utils.arg('-p', '--personality',
|
@utils.arg('-p', '--personality',
|
||||||
metavar='<personality>',
|
metavar='<personality>',
|
||||||
choices=['controller', 'compute', 'storage', 'network', 'profile'],
|
choices=['controller', 'worker', 'storage', 'network', 'profile'],
|
||||||
help='Personality or type of host [REQUIRED]')
|
help='Personality or type of host [REQUIRED]')
|
||||||
@utils.arg('-s', '--subfunctions',
|
@utils.arg('-s', '--subfunctions',
|
||||||
metavar='<subfunctions>',
|
metavar='<subfunctions>',
|
||||||
|
@ -20,24 +20,24 @@ CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread',
|
|||||||
PLATFORM_CPU_TYPE = "Platform"
|
PLATFORM_CPU_TYPE = "Platform"
|
||||||
VSWITCH_CPU_TYPE = "Vswitch"
|
VSWITCH_CPU_TYPE = "Vswitch"
|
||||||
SHARED_CPU_TYPE = "Shared"
|
SHARED_CPU_TYPE = "Shared"
|
||||||
VMS_CPU_TYPE = "VMs"
|
APPLICATION_CPU_TYPE = "Applications"
|
||||||
NONE_CPU_TYPE = "None"
|
NONE_CPU_TYPE = "None"
|
||||||
|
|
||||||
CPU_TYPE_LIST = [PLATFORM_CPU_TYPE, VSWITCH_CPU_TYPE,
|
CPU_TYPE_LIST = [PLATFORM_CPU_TYPE, VSWITCH_CPU_TYPE,
|
||||||
SHARED_CPU_TYPE, VMS_CPU_TYPE,
|
SHARED_CPU_TYPE, APPLICATION_CPU_TYPE,
|
||||||
NONE_CPU_TYPE]
|
NONE_CPU_TYPE]
|
||||||
|
|
||||||
|
|
||||||
PLATFORM_CPU_TYPE_FORMAT = _("Platform")
|
PLATFORM_CPU_TYPE_FORMAT = _("Platform")
|
||||||
VSWITCH_CPU_TYPE_FORMAT = _("vSwitch")
|
VSWITCH_CPU_TYPE_FORMAT = _("vSwitch")
|
||||||
SHARED_CPU_TYPE_FORMAT = _("Shared")
|
SHARED_CPU_TYPE_FORMAT = _("Shared")
|
||||||
VMS_CPU_TYPE_FORMAT = _("VMs")
|
APPLICATION_CPU_TYPE_FORMAT = _("Applications")
|
||||||
NONE_CPU_TYPE_FORMAT = _("None")
|
NONE_CPU_TYPE_FORMAT = _("None")
|
||||||
|
|
||||||
CPU_TYPE_FORMATS = {PLATFORM_CPU_TYPE: PLATFORM_CPU_TYPE_FORMAT,
|
CPU_TYPE_FORMATS = {PLATFORM_CPU_TYPE: PLATFORM_CPU_TYPE_FORMAT,
|
||||||
VSWITCH_CPU_TYPE: VSWITCH_CPU_TYPE_FORMAT,
|
VSWITCH_CPU_TYPE: VSWITCH_CPU_TYPE_FORMAT,
|
||||||
SHARED_CPU_TYPE: SHARED_CPU_TYPE_FORMAT,
|
SHARED_CPU_TYPE: SHARED_CPU_TYPE_FORMAT,
|
||||||
VMS_CPU_TYPE: VMS_CPU_TYPE_FORMAT,
|
APPLICATION_CPU_TYPE: APPLICATION_CPU_TYPE_FORMAT,
|
||||||
NONE_CPU_TYPE: NONE_CPU_TYPE_FORMAT}
|
NONE_CPU_TYPE: NONE_CPU_TYPE_FORMAT}
|
||||||
|
|
||||||
|
|
||||||
@ -106,19 +106,19 @@ def check_core_functions(personality, icpus):
|
|||||||
platform_cores += 1
|
platform_cores += 1
|
||||||
elif allocated_function == VSWITCH_CPU_TYPE:
|
elif allocated_function == VSWITCH_CPU_TYPE:
|
||||||
vswitch_cores += 1
|
vswitch_cores += 1
|
||||||
elif allocated_function == VMS_CPU_TYPE:
|
elif allocated_function == APPLICATION_CPU_TYPE:
|
||||||
vm_cores += 1
|
vm_cores += 1
|
||||||
|
|
||||||
error_string = ""
|
error_string = ""
|
||||||
if platform_cores == 0:
|
if platform_cores == 0:
|
||||||
error_string = ("There must be at least one core for %s." %
|
error_string = ("There must be at least one core for %s." %
|
||||||
PLATFORM_CPU_TYPE_FORMAT)
|
PLATFORM_CPU_TYPE_FORMAT)
|
||||||
elif personality == 'compute' and vswitch_cores == 0:
|
elif personality == 'worker' and vswitch_cores == 0:
|
||||||
error_string = ("There must be at least one core for %s." %
|
error_string = ("There must be at least one core for %s." %
|
||||||
VSWITCH_CPU_TYPE_FORMAT)
|
VSWITCH_CPU_TYPE_FORMAT)
|
||||||
elif personality == 'compute' and vm_cores == 0:
|
elif personality == 'worker' and vm_cores == 0:
|
||||||
error_string = ("There must be at least one core for %s." %
|
error_string = ("There must be at least one core for %s." %
|
||||||
VMS_CPU_TYPE_FORMAT)
|
APPLICATION_CPU_TYPE_FORMAT)
|
||||||
return error_string
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ def restructure_host_cpu_data(host):
|
|||||||
cpufunction.socket_cores_number[s] = number_of_cores[f][s]
|
cpufunction.socket_cores_number[s] = number_of_cores[f][s]
|
||||||
else:
|
else:
|
||||||
if (f == PLATFORM_CPU_TYPE or (hasattr(host, 'subfunctions')
|
if (f == PLATFORM_CPU_TYPE or (hasattr(host, 'subfunctions')
|
||||||
and 'compute' in host.subfunctions)):
|
and 'worker' in host.subfunctions)):
|
||||||
if f != NONE_CPU_TYPE:
|
if f != NONE_CPU_TYPE:
|
||||||
host.core_assignment.append(cpufunction)
|
host.core_assignment.append(cpufunction)
|
||||||
for s in range(0, len(host.nodes)):
|
for s in range(0, len(host.nodes)):
|
||||||
|
@ -39,11 +39,11 @@ def _print_imemory_show(imemory):
|
|||||||
'vSwitch Huge Pages: Size (MiB)',
|
'vSwitch Huge Pages: Size (MiB)',
|
||||||
' Total',
|
' Total',
|
||||||
' Available',
|
' Available',
|
||||||
'VM Pages (4K): Total',
|
'Application Pages (4K): Total',
|
||||||
'VM Huge Pages (2M): Total',
|
'Application Huge Pages (2M): Total',
|
||||||
' Total Pending',
|
' Total Pending',
|
||||||
' Available',
|
' Available',
|
||||||
'VM Huge Pages (1G): Total',
|
'Application Huge Pages (1G): Total',
|
||||||
' Total Pending',
|
' Total Pending',
|
||||||
' Available',
|
' Available',
|
||||||
'uuid', 'ihost_uuid', 'inode_uuid',
|
'uuid', 'ihost_uuid', 'inode_uuid',
|
||||||
@ -157,7 +157,7 @@ def do_host_memory_list(cc, args):
|
|||||||
metavar='<1G hugepages number>',
|
metavar='<1G hugepages number>',
|
||||||
help='The number of 1G vm huge pages for the numa node')
|
help='The number of 1G vm huge pages for the numa node')
|
||||||
def do_host_memory_modify(cc, args):
|
def do_host_memory_modify(cc, args):
|
||||||
"""Modify platform reserved and/or libvirt vm huge page memory attributes for compute nodes."""
|
"""Modify platform reserved and/or application huge page memory attributes for worker nodes."""
|
||||||
|
|
||||||
rwfields = ['platform_reserved_mib',
|
rwfields = ['platform_reserved_mib',
|
||||||
'vm_hugepages_nr_2M_pending',
|
'vm_hugepages_nr_2M_pending',
|
||||||
|
@ -177,7 +177,7 @@ def get_cpuprofile_data(cc, iprofile):
|
|||||||
iprofile.platform_cores = get_core_list_str(iprofile, icpu_utils.PLATFORM_CPU_TYPE)
|
iprofile.platform_cores = get_core_list_str(iprofile, icpu_utils.PLATFORM_CPU_TYPE)
|
||||||
iprofile.vswitch_cores = get_core_list_str(iprofile, icpu_utils.VSWITCH_CPU_TYPE)
|
iprofile.vswitch_cores = get_core_list_str(iprofile, icpu_utils.VSWITCH_CPU_TYPE)
|
||||||
iprofile.shared_cores = get_core_list_str(iprofile, icpu_utils.SHARED_CPU_TYPE)
|
iprofile.shared_cores = get_core_list_str(iprofile, icpu_utils.SHARED_CPU_TYPE)
|
||||||
iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.VMS_CPU_TYPE)
|
iprofile.vms_cores = get_core_list_str(iprofile, icpu_utils.APPLICATION_CPU_TYPE)
|
||||||
|
|
||||||
|
|
||||||
def get_core_list_str(iprofile, function):
|
def get_core_list_str(iprofile, function):
|
||||||
@ -204,7 +204,7 @@ def do_cpuprofile_list(cc, args):
|
|||||||
profile.shared_cores = get_core_list_str(profile,
|
profile.shared_cores = get_core_list_str(profile,
|
||||||
icpu_utils.SHARED_CPU_TYPE)
|
icpu_utils.SHARED_CPU_TYPE)
|
||||||
profile.vms_cores = get_core_list_str(profile,
|
profile.vms_cores = get_core_list_str(profile,
|
||||||
icpu_utils.VMS_CPU_TYPE)
|
icpu_utils.APPLICATION_CPU_TYPE)
|
||||||
|
|
||||||
field_labels = ['uuid', 'name',
|
field_labels = ['uuid', 'name',
|
||||||
'processors', 'phy cores per proc', 'hyperthreading',
|
'processors', 'phy cores per proc', 'hyperthreading',
|
||||||
|
@ -94,7 +94,7 @@ def do_host_device_list(cc, args):
|
|||||||
metavar='<enabled status>',
|
metavar='<enabled status>',
|
||||||
help='The enabled status of the device')
|
help='The enabled status of the device')
|
||||||
def do_host_device_modify(cc, args):
|
def do_host_device_modify(cc, args):
|
||||||
"""Modify device availability for compute nodes."""
|
"""Modify device availability for worker nodes."""
|
||||||
|
|
||||||
rwfields = ['enabled',
|
rwfields = ['enabled',
|
||||||
'name']
|
'name']
|
||||||
|
@ -181,9 +181,9 @@ class AgentManager(service.PeriodicService):
|
|||||||
def _update_interface_irq_affinity(self, interface_list):
|
def _update_interface_irq_affinity(self, interface_list):
|
||||||
cpus = {}
|
cpus = {}
|
||||||
platform_cpulist = '0'
|
platform_cpulist = '0'
|
||||||
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
|
with open('/etc/platform/worker_reserved.conf', 'r') as infile:
|
||||||
for line in infile:
|
for line in infile:
|
||||||
if "COMPUTE_PLATFORM_CORES" in line:
|
if "WORKER_PLATFORM_CORES" in line:
|
||||||
val = line.split("=")
|
val = line.split("=")
|
||||||
cores = val[1].strip('\n')[1:-1]
|
cores = val[1].strip('\n')[1:-1]
|
||||||
for n in cores.split():
|
for n in cores.split():
|
||||||
@ -863,7 +863,7 @@ class AgentManager(service.PeriodicService):
|
|||||||
LOG.exception("Sysinv Agent exception updating ilvg conductor.")
|
LOG.exception("Sysinv Agent exception updating ilvg conductor.")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if constants.COMPUTE in self.subfunctions_list_get():
|
if constants.WORKER in self.subfunctions_list_get():
|
||||||
platform_interfaces = []
|
platform_interfaces = []
|
||||||
# retrieve the mgmt and infra interfaces and associated numa nodes
|
# retrieve the mgmt and infra interfaces and associated numa nodes
|
||||||
try:
|
try:
|
||||||
@ -932,8 +932,8 @@ class AgentManager(service.PeriodicService):
|
|||||||
return: Bool whether subfunctions configuration is completed.
|
return: Bool whether subfunctions configuration is completed.
|
||||||
"""
|
"""
|
||||||
if (constants.CONTROLLER in subfunctions_list and
|
if (constants.CONTROLLER in subfunctions_list and
|
||||||
constants.COMPUTE in subfunctions_list):
|
constants.WORKER in subfunctions_list):
|
||||||
if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
if not os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
|
||||||
self._subfunctions_configured = False
|
self._subfunctions_configured = False
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1011,8 +1011,8 @@ class AgentManager(service.PeriodicService):
|
|||||||
if constants.CONTROLLER in subfunctions:
|
if constants.CONTROLLER in subfunctions:
|
||||||
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
|
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
|
||||||
return False
|
return False
|
||||||
if constants.COMPUTE in subfunctions:
|
if constants.WORKER in subfunctions:
|
||||||
if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
|
if not os.path.isfile(tsc.INITIAL_WORKER_CONFIG_COMPLETE):
|
||||||
return False
|
return False
|
||||||
if constants.STORAGE in subfunctions:
|
if constants.STORAGE in subfunctions:
|
||||||
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
|
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
|
||||||
@ -1131,7 +1131,7 @@ class AgentManager(service.PeriodicService):
|
|||||||
|
|
||||||
subfunctions_list = self.subfunctions_list_get()
|
subfunctions_list = self.subfunctions_list_get()
|
||||||
if ((constants.CONTROLLER in subfunctions_list) and
|
if ((constants.CONTROLLER in subfunctions_list) and
|
||||||
(constants.COMPUTE in subfunctions_list)):
|
(constants.WORKER in subfunctions_list)):
|
||||||
if self.subfunctions_configured(subfunctions_list) and \
|
if self.subfunctions_configured(subfunctions_list) and \
|
||||||
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid):
|
not self._wait_for_nova_lvg(icontext, rpcapi, self._ihost_uuid):
|
||||||
|
|
||||||
@ -1499,7 +1499,7 @@ class AgentManager(service.PeriodicService):
|
|||||||
|
|
||||||
for subfunction in self.subfunctions_list_get():
|
for subfunction in self.subfunctions_list_get():
|
||||||
# We need to find the subfunction that matches the personality
|
# We need to find the subfunction that matches the personality
|
||||||
# being requested. e.g. in AIO systems if we request a compute
|
# being requested. e.g. in AIO systems if we request a worker
|
||||||
# personality we should apply the manifest with that
|
# personality we should apply the manifest with that
|
||||||
# personality
|
# personality
|
||||||
if subfunction in personalities:
|
if subfunction in personalities:
|
||||||
|
@ -43,10 +43,10 @@ SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB)
|
|||||||
# Defines the minimum size of memory for a controller node in megabyte units
|
# Defines the minimum size of memory for a controller node in megabyte units
|
||||||
CONTROLLER_MIN_MB = 6000
|
CONTROLLER_MIN_MB = 6000
|
||||||
|
|
||||||
# Defines the minimum size of memory for a compute node in megabyte units
|
# Defines the minimum size of memory for a worker node in megabyte units
|
||||||
COMPUTE_MIN_MB = 1600
|
COMPUTE_MIN_MB = 1600
|
||||||
|
|
||||||
# Defines the minimum size of memory for a secondary compute node in megabyte
|
# Defines the minimum size of memory for a secondary worker node in megabyte
|
||||||
# units
|
# units
|
||||||
COMPUTE_MIN_NON_0_MB = 500
|
COMPUTE_MIN_NON_0_MB = 500
|
||||||
|
|
||||||
@ -300,19 +300,19 @@ class NodeOperator(object):
|
|||||||
|
|
||||||
imemory = []
|
imemory = []
|
||||||
|
|
||||||
initial_compute_config_completed = \
|
initial_worker_config_completed = \
|
||||||
os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE)
|
os.path.exists(tsc.INITIAL_WORKER_CONFIG_COMPLETE)
|
||||||
|
|
||||||
# check if it is initial report before the huge pages are allocated
|
# check if it is initial report before the huge pages are allocated
|
||||||
initial_report = not initial_compute_config_completed
|
initial_report = not initial_worker_config_completed
|
||||||
|
|
||||||
# do not send report if the initial compute config is completed and
|
# do not send report if the initial worker config is completed and
|
||||||
# compute config has not finished, i.e.during subsequent
|
# worker config has not finished, i.e.during subsequent
|
||||||
# reboot before the manifest allocates the huge pages
|
# reboot before the manifest allocates the huge pages
|
||||||
compute_config_completed = \
|
worker_config_completed = \
|
||||||
os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE)
|
os.path.exists(tsc.VOLATILE_WORKER_CONFIG_COMPLETE)
|
||||||
if (initial_compute_config_completed and
|
if (initial_worker_config_completed and
|
||||||
not compute_config_completed):
|
not worker_config_completed):
|
||||||
return imemory
|
return imemory
|
||||||
|
|
||||||
for node in range(self.num_nodes):
|
for node in range(self.num_nodes):
|
||||||
@ -461,14 +461,14 @@ class NodeOperator(object):
|
|||||||
LOG.error("Failed to execute (%s) OS error (%d)", cmd,
|
LOG.error("Failed to execute (%s) OS error (%d)", cmd,
|
||||||
e.errno)
|
e.errno)
|
||||||
|
|
||||||
# need to multiply total_mb by 1024 to match compute_huge
|
# need to multiply total_mb by 1024
|
||||||
node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB
|
node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB
|
||||||
|
|
||||||
# Read base memory from compute_reserved.conf
|
# Read base memory from worker_reserved.conf
|
||||||
base_mem_mb = 0
|
base_mem_mb = 0
|
||||||
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
|
with open('/etc/platform/worker_reserved.conf', 'r') as infile:
|
||||||
for line in infile:
|
for line in infile:
|
||||||
if "COMPUTE_BASE_RESERVED" in line:
|
if "WORKER_BASE_RESERVED" in line:
|
||||||
val = line.split("=")
|
val = line.split("=")
|
||||||
base_reserves = val[1].strip('\n')[1:-1]
|
base_reserves = val[1].strip('\n')[1:-1]
|
||||||
for reserve in base_reserves.split():
|
for reserve in base_reserves.split():
|
||||||
@ -585,19 +585,13 @@ class NodeOperator(object):
|
|||||||
return imemory
|
return imemory
|
||||||
|
|
||||||
def inodes_get_imemory(self):
|
def inodes_get_imemory(self):
|
||||||
'''Enumerate logical memory topology based on:
|
'''Collect logical memory topology
|
||||||
if CONF.compute_hugepages:
|
|
||||||
self._inode_get_memory_hugepages()
|
|
||||||
else:
|
|
||||||
self._inode_get_memory_nonhugepages()
|
|
||||||
|
|
||||||
:param self
|
:param self
|
||||||
:returns list of memory nodes and attributes
|
:returns list of memory nodes and attributes
|
||||||
'''
|
'''
|
||||||
imemory = []
|
imemory = []
|
||||||
|
|
||||||
# if CONF.compute_hugepages:
|
if os.path.isfile("/etc/platform/worker_reserved.conf"):
|
||||||
if os.path.isfile("/etc/nova/compute_reserved.conf"):
|
|
||||||
imemory = self._inode_get_memory_hugepages()
|
imemory = self._inode_get_memory_hugepages()
|
||||||
else:
|
else:
|
||||||
imemory = self._inode_get_memory_nonhugepages()
|
imemory = self._inode_get_memory_nonhugepages()
|
||||||
|
@ -75,7 +75,7 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
:returns: none ... uses asynchronous cast().
|
:returns: none ... uses asynchronous cast().
|
||||||
"""
|
"""
|
||||||
# fanout / broadcast message to all inventory agents
|
# fanout / broadcast message to all inventory agents
|
||||||
# to change systemname on all nodes ... standby controller and compute nodes
|
# to change systemname on all nodes ... standby controller and worker nodes
|
||||||
LOG.debug("AgentApi.configure_isystemname: fanout_cast: sending systemname to agent")
|
LOG.debug("AgentApi.configure_isystemname: fanout_cast: sending systemname to agent")
|
||||||
retval = self.fanout_cast(context, self.make_msg('configure_isystemname',
|
retval = self.fanout_cast(context, self.make_msg('configure_isystemname',
|
||||||
systemname=systemname))
|
systemname=systemname))
|
||||||
|
@ -481,8 +481,8 @@ def _check_host(ihost):
|
|||||||
elif ihost.administrative != constants.ADMIN_LOCKED and not \
|
elif ihost.administrative != constants.ADMIN_LOCKED and not \
|
||||||
utils.is_host_simplex_controller(ihost):
|
utils.is_host_simplex_controller(ihost):
|
||||||
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
||||||
if constants.COMPUTE not in ihost.subfunctions:
|
if constants.WORKER not in ihost.subfunctions:
|
||||||
raise wsme.exc.ClientSideError(_('Can only modify compute node cores.'))
|
raise wsme.exc.ClientSideError(_('Can only modify worker node cores.'))
|
||||||
|
|
||||||
|
|
||||||
def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None):
|
def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None):
|
||||||
@ -511,7 +511,7 @@ def _update_vswitch_cpu_counts(host, cpu, counts, capabilities=None):
|
|||||||
count *= 2
|
count *= 2
|
||||||
counts[s][constants.VSWITCH_FUNCTION] = count
|
counts[s][constants.VSWITCH_FUNCTION] = count
|
||||||
# let the remaining values grow/shrink dynamically
|
# let the remaining values grow/shrink dynamically
|
||||||
counts[s][constants.VM_FUNCTION] = 0
|
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||||
counts[s][constants.NO_FUNCTION] = 0
|
counts[s][constants.NO_FUNCTION] = 0
|
||||||
return counts
|
return counts
|
||||||
|
|
||||||
@ -543,7 +543,7 @@ def _update_shared_cpu_counts(host, cpu, counts, capabilities=None):
|
|||||||
count *= 2
|
count *= 2
|
||||||
counts[s][constants.SHARED_FUNCTION] = count
|
counts[s][constants.SHARED_FUNCTION] = count
|
||||||
# let the remaining values grow/shrink dynamically
|
# let the remaining values grow/shrink dynamically
|
||||||
counts[s][constants.VM_FUNCTION] = 0
|
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||||
counts[s][constants.NO_FUNCTION] = 0
|
counts[s][constants.NO_FUNCTION] = 0
|
||||||
return counts
|
return counts
|
||||||
|
|
||||||
@ -573,7 +573,7 @@ def _update_platform_cpu_counts(host, cpu, counts, capabilities=None):
|
|||||||
count *= 2
|
count *= 2
|
||||||
counts[s][constants.PLATFORM_FUNCTION] = count
|
counts[s][constants.PLATFORM_FUNCTION] = count
|
||||||
# let the remaining values grow/shrink dynamically
|
# let the remaining values grow/shrink dynamically
|
||||||
counts[s][constants.VM_FUNCTION] = 0
|
counts[s][constants.APPLICATION_FUNCTION] = 0
|
||||||
counts[s][constants.NO_FUNCTION] = 0
|
counts[s][constants.NO_FUNCTION] = 0
|
||||||
return counts
|
return counts
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ CORE_FUNCTIONS = [
|
|||||||
constants.PLATFORM_FUNCTION,
|
constants.PLATFORM_FUNCTION,
|
||||||
constants.VSWITCH_FUNCTION,
|
constants.VSWITCH_FUNCTION,
|
||||||
constants.SHARED_FUNCTION,
|
constants.SHARED_FUNCTION,
|
||||||
constants.VM_FUNCTION,
|
constants.APPLICATION_FUNCTION,
|
||||||
constants.NO_FUNCTION
|
constants.NO_FUNCTION
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ class CpuProfile(object):
|
|||||||
cur_processor.vswitch += 1
|
cur_processor.vswitch += 1
|
||||||
elif cpu.allocated_function == constants.SHARED_FUNCTION:
|
elif cpu.allocated_function == constants.SHARED_FUNCTION:
|
||||||
cur_processor.shared += 1
|
cur_processor.shared += 1
|
||||||
elif cpu.allocated_function == constants.VM_FUNCTION:
|
elif cpu.allocated_function == constants.APPLICATION_FUNCTION:
|
||||||
cur_processor.vms += 1
|
cur_processor.vms += 1
|
||||||
|
|
||||||
self.number_of_cpu = len(self.processors)
|
self.number_of_cpu = len(self.processors)
|
||||||
@ -108,12 +108,12 @@ class HostCpuProfile(CpuProfile):
|
|||||||
if platform_cores == 0:
|
if platform_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.PLATFORM_FUNCTION
|
constants.PLATFORM_FUNCTION
|
||||||
elif constants.COMPUTE in self.subfunctions and vswitch_cores == 0:
|
elif constants.WORKER in self.subfunctions and vswitch_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VSWITCH_FUNCTION
|
constants.VSWITCH_FUNCTION
|
||||||
elif constants.COMPUTE in self.subfunctions and vm_cores == 0:
|
elif constants.WORKER in self.subfunctions and vm_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VM_FUNCTION
|
constants.APPLICATION_FUNCTION
|
||||||
return error_string
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
@ -140,12 +140,12 @@ def check_profile_core_functions(personality, profile):
|
|||||||
if platform_cores == 0:
|
if platform_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.PLATFORM_FUNCTION
|
constants.PLATFORM_FUNCTION
|
||||||
elif constants.COMPUTE in personality and vswitch_cores == 0:
|
elif constants.WORKER in personality and vswitch_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VSWITCH_FUNCTION
|
constants.VSWITCH_FUNCTION
|
||||||
elif constants.COMPUTE in personality and vm_cores == 0:
|
elif constants.WORKER in personality and vm_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VM_FUNCTION
|
constants.APPLICATION_FUNCTION
|
||||||
return error_string
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
@ -162,26 +162,26 @@ def check_core_functions(personality, icpus):
|
|||||||
vswitch_cores += 1
|
vswitch_cores += 1
|
||||||
elif allocated_function == constants.SHARED_FUNCTION:
|
elif allocated_function == constants.SHARED_FUNCTION:
|
||||||
shared_cores += 1
|
shared_cores += 1
|
||||||
elif allocated_function == constants.VM_FUNCTION:
|
elif allocated_function == constants.APPLICATION_FUNCTION:
|
||||||
vm_cores += 1
|
vm_cores += 1
|
||||||
|
|
||||||
error_string = ""
|
error_string = ""
|
||||||
if platform_cores == 0:
|
if platform_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.PLATFORM_FUNCTION
|
constants.PLATFORM_FUNCTION
|
||||||
elif constants.COMPUTE in personality and vswitch_cores == 0:
|
elif constants.WORKER in personality and vswitch_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VSWITCH_FUNCTION
|
constants.VSWITCH_FUNCTION
|
||||||
elif constants.COMPUTE in personality and vm_cores == 0:
|
elif constants.WORKER in personality and vm_cores == 0:
|
||||||
error_string = "There must be at least one core for %s." % \
|
error_string = "There must be at least one core for %s." % \
|
||||||
constants.VM_FUNCTION
|
constants.APPLICATION_FUNCTION
|
||||||
return error_string
|
return error_string
|
||||||
|
|
||||||
|
|
||||||
def get_default_function(host):
|
def get_default_function(host):
|
||||||
"""Return the default function to be assigned to cpus on this host"""
|
"""Return the default function to be assigned to cpus on this host"""
|
||||||
if constants.COMPUTE in host.subfunctions:
|
if constants.WORKER in host.subfunctions:
|
||||||
return constants.VM_FUNCTION
|
return constants.APPLICATION_FUNCTION
|
||||||
return constants.PLATFORM_FUNCTION
|
return constants.PLATFORM_FUNCTION
|
||||||
|
|
||||||
|
|
||||||
@ -265,14 +265,14 @@ def check_core_allocations(host, cpu_counts, func):
|
|||||||
total_shared_cores += shared_cores
|
total_shared_cores += shared_cores
|
||||||
if func.lower() == constants.PLATFORM_FUNCTION.lower():
|
if func.lower() == constants.PLATFORM_FUNCTION.lower():
|
||||||
if ((constants.CONTROLLER in host.subfunctions) and
|
if ((constants.CONTROLLER in host.subfunctions) and
|
||||||
(constants.COMPUTE in host.subfunctions)):
|
(constants.WORKER in host.subfunctions)):
|
||||||
if total_platform_cores < 2:
|
if total_platform_cores < 2:
|
||||||
return "%s must have at least two cores." % \
|
return "%s must have at least two cores." % \
|
||||||
constants.PLATFORM_FUNCTION
|
constants.PLATFORM_FUNCTION
|
||||||
elif total_platform_cores == 0:
|
elif total_platform_cores == 0:
|
||||||
return "%s must have at least one core." % \
|
return "%s must have at least one core." % \
|
||||||
constants.PLATFORM_FUNCTION
|
constants.PLATFORM_FUNCTION
|
||||||
if constants.COMPUTE in (host.subfunctions or host.personality):
|
if constants.WORKER in (host.subfunctions or host.personality):
|
||||||
if func.lower() == constants.VSWITCH_FUNCTION.lower():
|
if func.lower() == constants.VSWITCH_FUNCTION.lower():
|
||||||
if host.hyperthreading:
|
if host.hyperthreading:
|
||||||
total_physical_cores = total_vswitch_cores / 2
|
total_physical_cores = total_vswitch_cores / 2
|
||||||
@ -287,7 +287,7 @@ def check_core_allocations(host, cpu_counts, func):
|
|||||||
reserved_for_vms = len(host.cpus) - total_platform_cores - total_vswitch_cores
|
reserved_for_vms = len(host.cpus) - total_platform_cores - total_vswitch_cores
|
||||||
if reserved_for_vms <= 0:
|
if reserved_for_vms <= 0:
|
||||||
return "There must be at least one unused core for %s." % \
|
return "There must be at least one unused core for %s." % \
|
||||||
constants. VM_FUNCTION
|
constants.APPLICATION_FUNCTION
|
||||||
else:
|
else:
|
||||||
if total_platform_cores != len(host.cpus):
|
if total_platform_cores != len(host.cpus):
|
||||||
return "All logical cores must be reserved for platform use"
|
return "All logical cores must be reserved for platform use"
|
||||||
|
@ -403,12 +403,12 @@ class DiskController(rest.RestController):
|
|||||||
|
|
||||||
def _semantic_checks_format(idisk):
|
def _semantic_checks_format(idisk):
|
||||||
ihost_uuid = idisk.get('ihost_uuid')
|
ihost_uuid = idisk.get('ihost_uuid')
|
||||||
# Check the disk belongs to a controller or compute host.
|
# Check the disk belongs to a controller or worker host.
|
||||||
ihost = pecan.request.dbapi.ihost_get(ihost_uuid)
|
ihost = pecan.request.dbapi.ihost_get(ihost_uuid)
|
||||||
if ihost.personality not in [constants.CONTROLLER, constants.COMPUTE]:
|
if ihost.personality not in [constants.CONTROLLER, constants.WORKER]:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("ERROR: Host personality must be a one of %s, %s]") %
|
_("ERROR: Host personality must be a one of %s, %s]") %
|
||||||
(constants.CONTROLLER, constants.COMPUTE))
|
(constants.CONTROLLER, constants.WORKER))
|
||||||
|
|
||||||
# Check disk is not the rootfs disk.
|
# Check disk is not the rootfs disk.
|
||||||
capabilities = idisk['capabilities']
|
capabilities = idisk['capabilities']
|
||||||
|
@ -243,7 +243,7 @@ class HostStatesController(rest.RestController):
|
|||||||
rank = 1
|
rank = 1
|
||||||
elif function.lower() == constants.VSWITCH_FUNCTION.lower():
|
elif function.lower() == constants.VSWITCH_FUNCTION.lower():
|
||||||
rank = 2
|
rank = 2
|
||||||
elif function.lower() == constants.VM_FUNCTION.lower():
|
elif function.lower() == constants.APPLICATION_FUNCTION.lower():
|
||||||
rank = 3
|
rank = 3
|
||||||
else:
|
else:
|
||||||
rank = 4
|
rank = 4
|
||||||
@ -535,7 +535,7 @@ class Host(base.APIBase):
|
|||||||
"Represent install state extra information if there is any"
|
"Represent install state extra information if there is any"
|
||||||
|
|
||||||
iscsi_initiator_name = wtypes.text
|
iscsi_initiator_name = wtypes.text
|
||||||
"The iscsi initiator name (only used for compute hosts)"
|
"The iscsi initiator name (only used for worker hosts)"
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.fields = objects.host.fields.keys()
|
self.fields = objects.host.fields.keys()
|
||||||
@ -786,10 +786,10 @@ class Host(base.APIBase):
|
|||||||
bookmark=True)
|
bookmark=True)
|
||||||
]
|
]
|
||||||
# Don't expose the vsc_controllers field if we are not configured with
|
# Don't expose the vsc_controllers field if we are not configured with
|
||||||
# the nuage_vrs vswitch or we are not a compute node.
|
# the nuage_vrs vswitch or we are not a worker node.
|
||||||
vswitch_type = utils.get_vswitch_type()
|
vswitch_type = utils.get_vswitch_type()
|
||||||
if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or
|
if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or
|
||||||
uhost.personality != constants.COMPUTE):
|
uhost.personality != constants.WORKER):
|
||||||
uhost.vsc_controllers = wtypes.Unset
|
uhost.vsc_controllers = wtypes.Unset
|
||||||
|
|
||||||
uhost.peers = None
|
uhost.peers = None
|
||||||
@ -1273,7 +1273,7 @@ class HostController(rest.RestController):
|
|||||||
ihost_dict.get('personality') not in
|
ihost_dict.get('personality') not in
|
||||||
[constants.STORAGE, constants.CONTROLLER]):
|
[constants.STORAGE, constants.CONTROLLER]):
|
||||||
raise wsme.exc.ClientSideError(_(
|
raise wsme.exc.ClientSideError(_(
|
||||||
"Host-add Rejected: Cannot add a compute host without "
|
"Host-add Rejected: Cannot add a worker host without "
|
||||||
"specifying a mgmt_ip when static address allocation is "
|
"specifying a mgmt_ip when static address allocation is "
|
||||||
"configured."))
|
"configured."))
|
||||||
|
|
||||||
@ -1672,7 +1672,7 @@ class HostController(rest.RestController):
|
|||||||
rank = 0
|
rank = 0
|
||||||
elif host.personality == constants.STORAGE:
|
elif host.personality == constants.STORAGE:
|
||||||
rank = 1
|
rank = 1
|
||||||
elif host.personality == constants.COMPUTE:
|
elif host.personality == constants.WORKER:
|
||||||
rank = 2
|
rank = 2
|
||||||
else:
|
else:
|
||||||
rank = 3
|
rank = 3
|
||||||
@ -2334,7 +2334,7 @@ class HostController(rest.RestController):
|
|||||||
if (ihost.hostname and ihost.personality and
|
if (ihost.hostname and ihost.personality and
|
||||||
ihost.invprovision and
|
ihost.invprovision and
|
||||||
ihost.invprovision == constants.PROVISIONED and
|
ihost.invprovision == constants.PROVISIONED and
|
||||||
(constants.COMPUTE in ihost.subfunctions)):
|
(constants.WORKER in ihost.subfunctions)):
|
||||||
# wait for VIM signal
|
# wait for VIM signal
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -2511,7 +2511,7 @@ class HostController(rest.RestController):
|
|||||||
# If this is a simplex system skip this check, there's no other nodes
|
# If this is a simplex system skip this check, there's no other nodes
|
||||||
if simplex:
|
if simplex:
|
||||||
pass
|
pass
|
||||||
elif rpc_ihost.personality == constants.COMPUTE:
|
elif rpc_ihost.personality == constants.WORKER:
|
||||||
self._check_personality_load(constants.CONTROLLER, new_target_load)
|
self._check_personality_load(constants.CONTROLLER, new_target_load)
|
||||||
self._check_personality_load(constants.STORAGE, new_target_load)
|
self._check_personality_load(constants.STORAGE, new_target_load)
|
||||||
elif rpc_ihost.personality == constants.STORAGE:
|
elif rpc_ihost.personality == constants.STORAGE:
|
||||||
@ -2601,7 +2601,7 @@ class HostController(rest.RestController):
|
|||||||
elif upgrade.state == constants.UPGRADE_ABORTING_ROLLBACK:
|
elif upgrade.state == constants.UPGRADE_ABORTING_ROLLBACK:
|
||||||
if rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME:
|
if rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME:
|
||||||
# Before we downgrade controller-0 during a rollback/reinstall
|
# Before we downgrade controller-0 during a rollback/reinstall
|
||||||
# we check that all other compute/storage nodes are locked and
|
# we check that all other worker/storage nodes are locked and
|
||||||
# offline. We also disable the storage monitor on controller-1
|
# offline. We also disable the storage monitor on controller-1
|
||||||
# and set a flag on controller-1 to indicate we are in a
|
# and set a flag on controller-1 to indicate we are in a
|
||||||
# rollback. When controller-0 comes up it will check for this
|
# rollback. When controller-0 comes up it will check for this
|
||||||
@ -2620,7 +2620,7 @@ class HostController(rest.RestController):
|
|||||||
else:
|
else:
|
||||||
# Enforce downgrade order
|
# Enforce downgrade order
|
||||||
if rpc_ihost.personality == constants.CONTROLLER:
|
if rpc_ihost.personality == constants.CONTROLLER:
|
||||||
self._check_personality_load(constants.COMPUTE,
|
self._check_personality_load(constants.WORKER,
|
||||||
new_target_load)
|
new_target_load)
|
||||||
self._check_personality_load(constants.STORAGE,
|
self._check_personality_load(constants.STORAGE,
|
||||||
new_target_load)
|
new_target_load)
|
||||||
@ -2628,11 +2628,11 @@ class HostController(rest.RestController):
|
|||||||
self._check_host_load(constants.CONTROLLER_0_HOSTNAME,
|
self._check_host_load(constants.CONTROLLER_0_HOSTNAME,
|
||||||
new_target_load)
|
new_target_load)
|
||||||
elif rpc_ihost.personality == constants.STORAGE:
|
elif rpc_ihost.personality == constants.STORAGE:
|
||||||
self._check_personality_load(constants.COMPUTE,
|
self._check_personality_load(constants.WORKER,
|
||||||
new_target_load)
|
new_target_load)
|
||||||
if rpc_ihost.hostname == constants.STORAGE_0_HOSTNAME:
|
if rpc_ihost.hostname == constants.STORAGE_0_HOSTNAME:
|
||||||
self._check_storage_downgrade(new_target_load)
|
self._check_storage_downgrade(new_target_load)
|
||||||
# else we should be a compute node, no need to check other nodes
|
# else we should be a worker node, no need to check other nodes
|
||||||
|
|
||||||
# Check upgrade state
|
# Check upgrade state
|
||||||
if rpc_ihost.hostname in [constants.CONTROLLER_0_HOSTNAME,
|
if rpc_ihost.hostname in [constants.CONTROLLER_0_HOSTNAME,
|
||||||
@ -2684,12 +2684,12 @@ class HostController(rest.RestController):
|
|||||||
def _semantic_check_rollback(self):
|
def _semantic_check_rollback(self):
|
||||||
hosts = pecan.request.dbapi.ihost_get_list()
|
hosts = pecan.request.dbapi.ihost_get_list()
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if host.personality not in [constants.COMPUTE, constants.STORAGE]:
|
if host.personality not in [constants.WORKER, constants.STORAGE]:
|
||||||
continue
|
continue
|
||||||
if host.administrative != constants.ADMIN_LOCKED or \
|
if host.administrative != constants.ADMIN_LOCKED or \
|
||||||
host.availability != constants.AVAILABILITY_OFFLINE:
|
host.availability != constants.AVAILABILITY_OFFLINE:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("All compute and storage hosts must be locked and "
|
_("All worker and storage hosts must be locked and "
|
||||||
"offline before this operation can proceed"))
|
"offline before this operation can proceed"))
|
||||||
|
|
||||||
def _check_personality_load(self, personality, load):
|
def _check_personality_load(self, personality, load):
|
||||||
@ -2910,7 +2910,7 @@ class HostController(rest.RestController):
|
|||||||
|
|
||||||
def _validate_hostname(self, hostname, personality):
|
def _validate_hostname(self, hostname, personality):
|
||||||
|
|
||||||
if personality and personality == constants.COMPUTE:
|
if personality and personality == constants.WORKER:
|
||||||
# Fix of invalid hostnames
|
# Fix of invalid hostnames
|
||||||
err_tl = 'Name restricted to at most 255 characters.'
|
err_tl = 'Name restricted to at most 255 characters.'
|
||||||
err_ic = 'Name may only contain letters, ' \
|
err_ic = 'Name may only contain letters, ' \
|
||||||
@ -2920,9 +2920,9 @@ class HostController(rest.RestController):
|
|||||||
raise wsme.exc.ClientSideError(_(err_ic))
|
raise wsme.exc.ClientSideError(_(err_ic))
|
||||||
if len(hostname) > 255:
|
if len(hostname) > 255:
|
||||||
raise wsme.exc.ClientSideError(_(err_tl))
|
raise wsme.exc.ClientSideError(_(err_tl))
|
||||||
non_compute_hosts = ([constants.CONTROLLER_0_HOSTNAME,
|
non_worker_hosts = ([constants.CONTROLLER_0_HOSTNAME,
|
||||||
constants.CONTROLLER_1_HOSTNAME])
|
constants.CONTROLLER_1_HOSTNAME])
|
||||||
if (hostname and (hostname in non_compute_hosts) or
|
if (hostname and (hostname in non_worker_hosts) or
|
||||||
hostname.startswith(constants.STORAGE_HOSTNAME)):
|
hostname.startswith(constants.STORAGE_HOSTNAME)):
|
||||||
|
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
@ -2951,8 +2951,8 @@ class HostController(rest.RestController):
|
|||||||
(hostname, personality)))
|
(hostname, personality)))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _check_compute(patched_ihost, hostupdate=None):
|
def _check_worker(patched_ihost, hostupdate=None):
|
||||||
# Check for valid compute node setup
|
# Check for valid worker node setup
|
||||||
hostname = patched_ihost.get('hostname') or ""
|
hostname = patched_ihost.get('hostname') or ""
|
||||||
|
|
||||||
if not hostname:
|
if not hostname:
|
||||||
@ -2960,12 +2960,12 @@ class HostController(rest.RestController):
|
|||||||
_("Host %s of personality %s, must be provisioned with a hostname."
|
_("Host %s of personality %s, must be provisioned with a hostname."
|
||||||
% (patched_ihost.get('uuid'), patched_ihost.get('personality'))))
|
% (patched_ihost.get('uuid'), patched_ihost.get('personality'))))
|
||||||
|
|
||||||
non_compute_hosts = ([constants.CONTROLLER_0_HOSTNAME,
|
non_worker_hosts = ([constants.CONTROLLER_0_HOSTNAME,
|
||||||
constants.CONTROLLER_1_HOSTNAME])
|
constants.CONTROLLER_1_HOSTNAME])
|
||||||
if (hostname in non_compute_hosts or
|
if (hostname in non_worker_hosts or
|
||||||
hostname.startswith(constants.STORAGE_HOSTNAME)):
|
hostname.startswith(constants.STORAGE_HOSTNAME)):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Hostname %s is not allowed for personality 'compute'. "
|
_("Hostname %s is not allowed for personality 'worker'. "
|
||||||
"Please check hostname and personality." % hostname))
|
"Please check hostname and personality." % hostname))
|
||||||
|
|
||||||
def _controller_storage_node_setup(self, patched_ihost, hostupdate=None):
|
def _controller_storage_node_setup(self, patched_ihost, hostupdate=None):
|
||||||
@ -3248,7 +3248,7 @@ class HostController(rest.RestController):
|
|||||||
data_interface_configured = True
|
data_interface_configured = True
|
||||||
|
|
||||||
if not data_interface_configured:
|
if not data_interface_configured:
|
||||||
msg = _("Can not unlock a compute host without data interfaces. "
|
msg = _("Can not unlock a worker host without data interfaces. "
|
||||||
"Add at least one data interface before re-attempting "
|
"Add at least one data interface before re-attempting "
|
||||||
"this command.")
|
"this command.")
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
@ -3275,7 +3275,7 @@ class HostController(rest.RestController):
|
|||||||
address_count += len(addresses)
|
address_count += len(addresses)
|
||||||
|
|
||||||
if address_count > 1:
|
if address_count > 1:
|
||||||
msg = _("Can not unlock a compute host with multiple data "
|
msg = _("Can not unlock a worker host with multiple data "
|
||||||
"addresses while in SDN mode.")
|
"addresses while in SDN mode.")
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
@ -3292,7 +3292,7 @@ class HostController(rest.RestController):
|
|||||||
# Check whether the vsc_controllers have been configured
|
# Check whether the vsc_controllers have been configured
|
||||||
if not ihost['vsc_controllers']:
|
if not ihost['vsc_controllers']:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can not unlock compute host %s without "
|
_("Can not unlock worker host %s without "
|
||||||
"vsc_controllers. Action: Configure "
|
"vsc_controllers. Action: Configure "
|
||||||
"vsc_controllers for this host prior to unlock."
|
"vsc_controllers for this host prior to unlock."
|
||||||
% ihost['hostname']))
|
% ihost['hostname']))
|
||||||
@ -3316,7 +3316,7 @@ class HostController(rest.RestController):
|
|||||||
self.routes._check_reachable_gateway(
|
self.routes._check_reachable_gateway(
|
||||||
route['interface_id'], route)
|
route['interface_id'], route)
|
||||||
except exception.RouteGatewayNotReachable:
|
except exception.RouteGatewayNotReachable:
|
||||||
msg = _("Can not unlock a compute host with routes that are "
|
msg = _("Can not unlock a worker host with routes that are "
|
||||||
"not reachable via a local IP address. Add an IP "
|
"not reachable via a local IP address. Add an IP "
|
||||||
"address in the same subnet as each route gateway "
|
"address in the same subnet as each route gateway "
|
||||||
"address before re-attempting this command.")
|
"address before re-attempting this command.")
|
||||||
@ -3364,7 +3364,7 @@ class HostController(rest.RestController):
|
|||||||
section=section)
|
section=section)
|
||||||
neutron_parameters = neutron_parameters + parm_list
|
neutron_parameters = neutron_parameters + parm_list
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
msg = _("Cannot unock a compute host without %s->%s "
|
msg = _("Cannot unock a worker host without %s->%s "
|
||||||
",SDN service parameters being configured. "
|
",SDN service parameters being configured. "
|
||||||
"Add appropriate service parameters before "
|
"Add appropriate service parameters before "
|
||||||
"re-attempting this command." %
|
"re-attempting this command." %
|
||||||
@ -3389,7 +3389,7 @@ class HostController(rest.RestController):
|
|||||||
found = True
|
found = True
|
||||||
break
|
break
|
||||||
if not found:
|
if not found:
|
||||||
msg = _("Cannot unlock a compute host without "
|
msg = _("Cannot unlock a worker host without "
|
||||||
"\"%s\" SDN service parameter configured. "
|
"\"%s\" SDN service parameter configured. "
|
||||||
"Add service parameter before re-attempting "
|
"Add service parameter before re-attempting "
|
||||||
"this command." % sdn_param)
|
"this command." % sdn_param)
|
||||||
@ -3598,7 +3598,7 @@ class HostController(rest.RestController):
|
|||||||
(hostupdate.displayid, action))
|
(hostupdate.displayid, action))
|
||||||
|
|
||||||
# Semantic Check: Auto-Provision: Reset, Reboot or Power-On case
|
# Semantic Check: Auto-Provision: Reset, Reboot or Power-On case
|
||||||
if ((cutils.host_has_function(ihost, constants.COMPUTE)) and
|
if ((cutils.host_has_function(ihost, constants.WORKER)) and
|
||||||
(ihost['administrative'] == constants.ADMIN_LOCKED) and
|
(ihost['administrative'] == constants.ADMIN_LOCKED) and
|
||||||
((patched_ihost['action'] == constants.RESET_ACTION) or
|
((patched_ihost['action'] == constants.RESET_ACTION) or
|
||||||
(patched_ihost['action'] == constants.REBOOT_ACTION) or
|
(patched_ihost['action'] == constants.REBOOT_ACTION) or
|
||||||
@ -3756,10 +3756,10 @@ class HostController(rest.RestController):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Don't expose the vsc_controllers field if we are not configured with
|
# Don't expose the vsc_controllers field if we are not configured with
|
||||||
# the nuage_vrs vswitch or we are not a compute node.
|
# the nuage_vrs vswitch or we are not a worker node.
|
||||||
vswitch_type = utils.get_vswitch_type()
|
vswitch_type = utils.get_vswitch_type()
|
||||||
if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or
|
if (vswitch_type != constants.VSWITCH_TYPE_NUAGE_VRS or
|
||||||
ihost['personality'] != constants.COMPUTE):
|
ihost['personality'] != constants.WORKER):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("The vsc_controllers property is not applicable to this "
|
_("The vsc_controllers property is not applicable to this "
|
||||||
"host."))
|
"host."))
|
||||||
@ -4037,8 +4037,8 @@ class HostController(rest.RestController):
|
|||||||
def _semantic_check_nova_local_storage(ihost_uuid, personality):
|
def _semantic_check_nova_local_storage(ihost_uuid, personality):
|
||||||
"""
|
"""
|
||||||
Perform semantic checking for nova local storage
|
Perform semantic checking for nova local storage
|
||||||
:param ihost_uuid: uuid of host with compute functionality
|
:param ihost_uuid: uuid of host with worker functionality
|
||||||
:param personality: personality of host with compute functionality
|
:param personality: personality of host with worker functionality
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# query volume groups
|
# query volume groups
|
||||||
@ -4055,7 +4055,7 @@ class HostController(rest.RestController):
|
|||||||
if nova_local_storage_lvg:
|
if nova_local_storage_lvg:
|
||||||
if nova_local_storage_lvg.vg_state == constants.LVG_DEL:
|
if nova_local_storage_lvg.vg_state == constants.LVG_DEL:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("A host with compute functionality requires a "
|
_("A host with worker functionality requires a "
|
||||||
"nova-local volume group prior to being enabled. It is "
|
"nova-local volume group prior to being enabled. It is "
|
||||||
"currently set to be removed on unlock. Please update "
|
"currently set to be removed on unlock. Please update "
|
||||||
"the storage settings for the host."))
|
"the storage settings for the host."))
|
||||||
@ -4073,7 +4073,7 @@ class HostController(rest.RestController):
|
|||||||
|
|
||||||
if not lvg_has_pvs:
|
if not lvg_has_pvs:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("A host with compute functionality requires a "
|
_("A host with worker functionality requires a "
|
||||||
"nova-local volume group prior to being enabled."
|
"nova-local volume group prior to being enabled."
|
||||||
"The nova-local volume group does not contain any "
|
"The nova-local volume group does not contain any "
|
||||||
"physical volumes in the adding or provisioned "
|
"physical volumes in the adding or provisioned "
|
||||||
@ -4087,18 +4087,18 @@ class HostController(rest.RestController):
|
|||||||
constants.LVG_NOVA_BACKING_IMAGE,
|
constants.LVG_NOVA_BACKING_IMAGE,
|
||||||
constants.LVG_NOVA_BACKING_REMOTE]:
|
constants.LVG_NOVA_BACKING_REMOTE]:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("A host with compute functionality and a "
|
_("A host with worker functionality and a "
|
||||||
"nova-local volume group requires that a valid "
|
"nova-local volume group requires that a valid "
|
||||||
"instance backing is configured. "))
|
"instance backing is configured. "))
|
||||||
else:
|
else:
|
||||||
# This method is only called with hosts that have a compute
|
# This method is only called with hosts that have a worker
|
||||||
# subfunction and is locked or if subfunction_config action is
|
# subfunction and is locked or if subfunction_config action is
|
||||||
# being called. Without a nova-local volume group, prevent
|
# being called. Without a nova-local volume group, prevent
|
||||||
# unlocking.
|
# unlocking.
|
||||||
if personality == constants.CONTROLLER:
|
if personality == constants.CONTROLLER:
|
||||||
host_description = 'controller with compute functionality'
|
host_description = 'controller with worker functionality'
|
||||||
else:
|
else:
|
||||||
host_description = 'compute'
|
host_description = 'worker'
|
||||||
|
|
||||||
msg = _('A %s requires a nova-local volume group prior to being '
|
msg = _('A %s requires a nova-local volume group prior to being '
|
||||||
'enabled. Please update the storage settings for the '
|
'enabled. Please update the storage settings for the '
|
||||||
@ -4109,7 +4109,7 @@ class HostController(rest.RestController):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _semantic_check_restore_complete(ihost):
|
def _semantic_check_restore_complete(ihost):
|
||||||
"""
|
"""
|
||||||
During a restore procedure, checks compute nodes can be unlocked
|
During a restore procedure, checks worker nodes can be unlocked
|
||||||
only after running "config_controller --restore-complete"
|
only after running "config_controller --restore-complete"
|
||||||
"""
|
"""
|
||||||
if os.path.isfile(tsc.RESTORE_SYSTEM_FLAG):
|
if os.path.isfile(tsc.RESTORE_SYSTEM_FLAG):
|
||||||
@ -4123,13 +4123,13 @@ class HostController(rest.RestController):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _semantic_check_cgts_storage(ihost_uuid, personality):
|
def _semantic_check_cgts_storage(ihost_uuid, personality):
|
||||||
"""
|
"""
|
||||||
Perform semantic checking for cgts storage on compute hosts.
|
Perform semantic checking for cgts storage on worker hosts.
|
||||||
CGTS VG on computes used for kubernetes docker lv only at this time.
|
CGTS VG on workers used for kubernetes docker lv only at this time.
|
||||||
:param ihost_uuid: uuid of host with compute functionality
|
:param ihost_uuid: uuid of host with worker functionality
|
||||||
:param personality: personality of host with compute functionality
|
:param personality: personality of host with worker functionality
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if personality != constants.COMPUTE:
|
if personality != constants.WORKER:
|
||||||
return
|
return
|
||||||
|
|
||||||
# query volume groups
|
# query volume groups
|
||||||
@ -4145,7 +4145,7 @@ class HostController(rest.RestController):
|
|||||||
if cgts_local_storage_lvg.vg_state == constants.LVG_DEL:
|
if cgts_local_storage_lvg.vg_state == constants.LVG_DEL:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("With kubernetes configured, "
|
_("With kubernetes configured, "
|
||||||
"a compute host requires a "
|
"a worker host requires a "
|
||||||
"cgts volume group prior to being enabled. It is "
|
"cgts volume group prior to being enabled. It is "
|
||||||
"currently set to be removed on unlock. Please update "
|
"currently set to be removed on unlock. Please update "
|
||||||
"the storage settings for the host."))
|
"the storage settings for the host."))
|
||||||
@ -4165,19 +4165,19 @@ class HostController(rest.RestController):
|
|||||||
if not lvg_has_pvs:
|
if not lvg_has_pvs:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("With kubernetes configured, "
|
_("With kubernetes configured, "
|
||||||
"a compute host requires a "
|
"a worker host requires a "
|
||||||
"cgts volume group prior to being enabled."
|
"cgts volume group prior to being enabled."
|
||||||
"The cgts volume group does not contain any "
|
"The cgts volume group does not contain any "
|
||||||
"physical volumes in the adding or provisioned "
|
"physical volumes in the adding or provisioned "
|
||||||
"state."))
|
"state."))
|
||||||
else:
|
else:
|
||||||
# This method is only called with hosts that have a compute
|
# This method is only called with hosts that have a worker
|
||||||
# subfunction and is locked or if subfunction_config action is
|
# subfunction and is locked or if subfunction_config action is
|
||||||
# being called. Without a cgts volume group, prevent
|
# being called. Without a cgts volume group, prevent
|
||||||
# unlocking.
|
# unlocking.
|
||||||
|
|
||||||
msg = _('With kubernetes configured, '
|
msg = _('With kubernetes configured, '
|
||||||
'a compute host requires a cgts volume group prior to being '
|
'a worker host requires a cgts volume group prior to being '
|
||||||
'enabled. Please update the storage settings for the '
|
'enabled. Please update the storage settings for the '
|
||||||
'host.')
|
'host.')
|
||||||
|
|
||||||
@ -4494,19 +4494,19 @@ class HostController(rest.RestController):
|
|||||||
if backend.task == constants.SB_TASK_PROVISION_STORAGE:
|
if backend.task == constants.SB_TASK_PROVISION_STORAGE:
|
||||||
if HostController._check_provisioned_storage_hosts():
|
if HostController._check_provisioned_storage_hosts():
|
||||||
api.storage_backend_update(backend.uuid, {
|
api.storage_backend_update(backend.uuid, {
|
||||||
'task': constants.SB_TASK_RECONFIG_COMPUTE
|
'task': constants.SB_TASK_RECONFIG_WORKER
|
||||||
})
|
})
|
||||||
# update manifest for all online/enabled compute nodes
|
# update manifest for all online/enabled worker nodes
|
||||||
# live apply new ceph manifest for all compute nodes that
|
# live apply new ceph manifest for all worker nodes that
|
||||||
# are online/enabled. The rest will pickup when unlock
|
# are online/enabled. The rest will pickup when unlock
|
||||||
LOG.info(
|
LOG.info(
|
||||||
'Apply new Ceph manifest to provisioned compute nodes.'
|
'Apply new Ceph manifest to provisioned worker nodes.'
|
||||||
)
|
)
|
||||||
pecan.request.rpcapi.config_compute_for_ceph(
|
pecan.request.rpcapi.config_worker_for_ceph(
|
||||||
pecan.request.context
|
pecan.request.context
|
||||||
)
|
)
|
||||||
# mark all tasks completed after updating the manifests for
|
# mark all tasks completed after updating the manifests for
|
||||||
# all compute nodes.
|
# all worker nodes.
|
||||||
api.storage_backend_update(backend.uuid, {'task': None})
|
api.storage_backend_update(backend.uuid, {'task': None})
|
||||||
|
|
||||||
elif backend.task == constants.SB_TASK_RESIZE_CEPH_MON_LV:
|
elif backend.task == constants.SB_TASK_RESIZE_CEPH_MON_LV:
|
||||||
@ -4633,8 +4633,8 @@ class HostController(rest.RestController):
|
|||||||
# check the subfunctions are updated properly
|
# check the subfunctions are updated properly
|
||||||
LOG.info("hostupdate.ihost_patch.subfunctions %s" %
|
LOG.info("hostupdate.ihost_patch.subfunctions %s" %
|
||||||
hostupdate.ihost_patch['subfunctions'])
|
hostupdate.ihost_patch['subfunctions'])
|
||||||
elif hostupdate.ihost_patch['personality'] == constants.COMPUTE:
|
elif hostupdate.ihost_patch['personality'] == constants.WORKER:
|
||||||
self._check_compute(hostupdate.ihost_patch, hostupdate)
|
self._check_worker(hostupdate.ihost_patch, hostupdate)
|
||||||
else:
|
else:
|
||||||
LOG.error("Unexpected personality: %s" %
|
LOG.error("Unexpected personality: %s" %
|
||||||
hostupdate.ihost_patch['personality'])
|
hostupdate.ihost_patch['personality'])
|
||||||
@ -4660,12 +4660,12 @@ class HostController(rest.RestController):
|
|||||||
"Host %s must be deleted and re-added in order to change "
|
"Host %s must be deleted and re-added in order to change "
|
||||||
"the subfunctions." % hostupdate.ihost_orig['hostname']))
|
"the subfunctions." % hostupdate.ihost_orig['hostname']))
|
||||||
|
|
||||||
if hostupdate.ihost_patch['personality'] == constants.COMPUTE:
|
if hostupdate.ihost_patch['personality'] == constants.WORKER:
|
||||||
valid_subfunctions = (constants.COMPUTE,
|
valid_subfunctions = (constants.WORKER,
|
||||||
constants.LOWLATENCY)
|
constants.LOWLATENCY)
|
||||||
elif hostupdate.ihost_patch['personality'] == constants.CONTROLLER:
|
elif hostupdate.ihost_patch['personality'] == constants.CONTROLLER:
|
||||||
valid_subfunctions = (constants.CONTROLLER,
|
valid_subfunctions = (constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.LOWLATENCY)
|
constants.LOWLATENCY)
|
||||||
elif hostupdate.ihost_patch['personality'] == constants.STORAGE:
|
elif hostupdate.ihost_patch['personality'] == constants.STORAGE:
|
||||||
# Comparison is expecting a list
|
# Comparison is expecting a list
|
||||||
@ -4679,11 +4679,11 @@ class HostController(rest.RestController):
|
|||||||
("%s subfunctions %s contains unsupported values. Allowable: %s." %
|
("%s subfunctions %s contains unsupported values. Allowable: %s." %
|
||||||
(hostupdate.displayid, subfunctions_set, valid_subfunctions)))
|
(hostupdate.displayid, subfunctions_set, valid_subfunctions)))
|
||||||
|
|
||||||
if hostupdate.ihost_patch['personality'] == constants.COMPUTE:
|
if hostupdate.ihost_patch['personality'] == constants.WORKER:
|
||||||
if constants.COMPUTE not in subfunctions_set:
|
if constants.WORKER not in subfunctions_set:
|
||||||
# Automatically add it
|
# Automatically add it
|
||||||
subfunctions_list = list(subfunctions_set)
|
subfunctions_list = list(subfunctions_set)
|
||||||
subfunctions_list.insert(0, constants.COMPUTE)
|
subfunctions_list.insert(0, constants.WORKER)
|
||||||
subfunctions = ','.join(subfunctions_list)
|
subfunctions = ','.join(subfunctions_list)
|
||||||
|
|
||||||
LOG.info("%s update subfunctions=%s" %
|
LOG.info("%s update subfunctions=%s" %
|
||||||
@ -4732,10 +4732,10 @@ class HostController(rest.RestController):
|
|||||||
if not personality:
|
if not personality:
|
||||||
return
|
return
|
||||||
|
|
||||||
if personality == constants.COMPUTE and utils.is_aio_duplex_system():
|
if personality == constants.WORKER and utils.is_aio_duplex_system():
|
||||||
if utils.get_compute_count() >= constants.AIO_DUPLEX_MAX_COMPUTES:
|
if utils.get_worker_count() >= constants.AIO_DUPLEX_MAX_WORKERS:
|
||||||
msg = _("All-in-one Duplex is restricted to "
|
msg = _("All-in-one Duplex is restricted to "
|
||||||
"%s computes.") % constants.AIO_DUPLEX_MAX_COMPUTES
|
"%s workers.") % constants.AIO_DUPLEX_MAX_WORKERS
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
@ -4883,8 +4883,8 @@ class HostController(rest.RestController):
|
|||||||
if personality == constants.CONTROLLER:
|
if personality == constants.CONTROLLER:
|
||||||
self.check_unlock_controller(hostupdate, force_unlock)
|
self.check_unlock_controller(hostupdate, force_unlock)
|
||||||
|
|
||||||
if cutils.host_has_function(hostupdate.ihost_patch, constants.COMPUTE):
|
if cutils.host_has_function(hostupdate.ihost_patch, constants.WORKER):
|
||||||
self.check_unlock_compute(hostupdate)
|
self.check_unlock_worker(hostupdate)
|
||||||
elif personality == constants.STORAGE:
|
elif personality == constants.STORAGE:
|
||||||
self.check_unlock_storage(hostupdate)
|
self.check_unlock_storage(hostupdate)
|
||||||
|
|
||||||
@ -4956,8 +4956,8 @@ class HostController(rest.RestController):
|
|||||||
|
|
||||||
subfunctions_set = \
|
subfunctions_set = \
|
||||||
set(hostupdate.ihost_patch[constants.SUBFUNCTIONS].split(','))
|
set(hostupdate.ihost_patch[constants.SUBFUNCTIONS].split(','))
|
||||||
if constants.COMPUTE in subfunctions_set:
|
if constants.WORKER in subfunctions_set:
|
||||||
self.check_lock_compute(hostupdate)
|
self.check_lock_worker(hostupdate)
|
||||||
|
|
||||||
hostupdate.notify_vim = True
|
hostupdate.notify_vim = True
|
||||||
hostupdate.notify_mtce = True
|
hostupdate.notify_mtce = True
|
||||||
@ -5081,9 +5081,9 @@ class HostController(rest.RestController):
|
|||||||
if utils.get_https_enabled():
|
if utils.get_https_enabled():
|
||||||
self._semantic_check_tpm_config(hostupdate.ihost_orig)
|
self._semantic_check_tpm_config(hostupdate.ihost_orig)
|
||||||
|
|
||||||
def check_unlock_compute(self, hostupdate):
|
def check_unlock_worker(self, hostupdate):
|
||||||
"""Check semantics on host-unlock of a compute."""
|
"""Check semantics on host-unlock of a worker."""
|
||||||
LOG.info("%s ihost check_unlock_compute" % hostupdate.displayid)
|
LOG.info("%s ihost check_unlock_worker" % hostupdate.displayid)
|
||||||
ihost = hostupdate.ihost_orig
|
ihost = hostupdate.ihost_orig
|
||||||
if ihost['invprovision'] is None:
|
if ihost['invprovision'] is None:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
@ -5093,7 +5093,7 @@ class HostController(rest.RestController):
|
|||||||
|
|
||||||
# Check whether a restore was properly completed
|
# Check whether a restore was properly completed
|
||||||
self._semantic_check_restore_complete(ihost)
|
self._semantic_check_restore_complete(ihost)
|
||||||
# Disable compute unlock checks in a kubernetes config
|
# Disable worker unlock checks in a kubernetes config
|
||||||
if not utils.is_kubernetes_config():
|
if not utils.is_kubernetes_config():
|
||||||
# sdn configuration check
|
# sdn configuration check
|
||||||
self._semantic_check_sdn_attributes(ihost)
|
self._semantic_check_sdn_attributes(ihost)
|
||||||
@ -5142,7 +5142,7 @@ class HostController(rest.RestController):
|
|||||||
# calculate the VM 4K huge pages for nova
|
# calculate the VM 4K huge pages for nova
|
||||||
self._update_vm_4k_pages(ihost)
|
self._update_vm_4k_pages(ihost)
|
||||||
|
|
||||||
if cutils.is_virtual() or cutils.is_virtual_compute(ihost):
|
if cutils.is_virtual() or cutils.is_virtual_worker(ihost):
|
||||||
mib_platform_reserved_no_io = mib_reserved
|
mib_platform_reserved_no_io = mib_reserved
|
||||||
required_platform = \
|
required_platform = \
|
||||||
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX
|
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX
|
||||||
@ -5236,7 +5236,7 @@ class HostController(rest.RestController):
|
|||||||
personality=constants.STORAGE)
|
personality=constants.STORAGE)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can not unlock a compute node until at "
|
_("Can not unlock a worker node until at "
|
||||||
"least one storage node is unlocked and enabled."))
|
"least one storage node is unlocked and enabled."))
|
||||||
is_storage_host_unlocked = False
|
is_storage_host_unlocked = False
|
||||||
if storage_nodes:
|
if storage_nodes:
|
||||||
@ -5250,7 +5250,7 @@ class HostController(rest.RestController):
|
|||||||
|
|
||||||
if not is_storage_host_unlocked:
|
if not is_storage_host_unlocked:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can not unlock a compute node until at "
|
_("Can not unlock a worker node until at "
|
||||||
"least one storage node is unlocked and enabled."))
|
"least one storage node is unlocked and enabled."))
|
||||||
|
|
||||||
# Local Storage checks
|
# Local Storage checks
|
||||||
@ -5435,7 +5435,7 @@ class HostController(rest.RestController):
|
|||||||
elif to_host_load_id == upgrade.from_load:
|
elif to_host_load_id == upgrade.from_load:
|
||||||
# On CPE loads we must abort before we swact back to the old load
|
# On CPE loads we must abort before we swact back to the old load
|
||||||
# Any VMs on the active controller will be lost during the swact
|
# Any VMs on the active controller will be lost during the swact
|
||||||
if constants.COMPUTE in to_host.subfunctions:
|
if constants.WORKER in to_host.subfunctions:
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Upgrading: %s must be using load %s before this "
|
_("Upgrading: %s must be using load %s before this "
|
||||||
"operation can proceed. Currently using load %s.") %
|
"operation can proceed. Currently using load %s.") %
|
||||||
@ -5493,7 +5493,7 @@ class HostController(rest.RestController):
|
|||||||
"Standby controller must be in available status.") %
|
"Standby controller must be in available status.") %
|
||||||
(ihost_ctr.hostname))
|
(ihost_ctr.hostname))
|
||||||
|
|
||||||
if constants.COMPUTE in ihost_ctr.subfunctions:
|
if constants.WORKER in ihost_ctr.subfunctions:
|
||||||
if (ihost_ctr.subfunction_oper !=
|
if (ihost_ctr.subfunction_oper !=
|
||||||
constants.OPERATIONAL_ENABLED):
|
constants.OPERATIONAL_ENABLED):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
@ -5659,10 +5659,10 @@ class HostController(rest.RestController):
|
|||||||
"and replication is lost. This may result in data loss. ")
|
"and replication is lost. This may result in data loss. ")
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
def check_lock_compute(self, hostupdate, force=False):
|
def check_lock_worker(self, hostupdate, force=False):
|
||||||
"""Pre lock semantic checks for compute"""
|
"""Pre lock semantic checks for worker"""
|
||||||
|
|
||||||
LOG.info("%s host check_lock_compute" % hostupdate.displayid)
|
LOG.info("%s host check_lock_worker" % hostupdate.displayid)
|
||||||
if force:
|
if force:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -5692,7 +5692,7 @@ class HostController(rest.RestController):
|
|||||||
# Allow AIO-DX lock of controller-1
|
# Allow AIO-DX lock of controller-1
|
||||||
return
|
return
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Rejected: Can not lock %s with compute function "
|
_("Rejected: Can not lock %s with worker function "
|
||||||
"at this upgrade stage '%s'.") %
|
"at this upgrade stage '%s'.") %
|
||||||
(hostupdate.displayid, upgrade_state))
|
(hostupdate.displayid, upgrade_state))
|
||||||
|
|
||||||
@ -5703,17 +5703,17 @@ class HostController(rest.RestController):
|
|||||||
if hostname == constants.CONTROLLER_0_HOSTNAME:
|
if hostname == constants.CONTROLLER_0_HOSTNAME:
|
||||||
return
|
return
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Rejected: Can not lock %s with compute function "
|
_("Rejected: Can not lock %s with worker function "
|
||||||
"at this upgrade stage '%s'.") %
|
"at this upgrade stage '%s'.") %
|
||||||
(hostupdate.displayid, upgrade_state))
|
(hostupdate.displayid, upgrade_state))
|
||||||
|
|
||||||
def check_unlock_interfaces(self, hostupdate):
|
def check_unlock_interfaces(self, hostupdate):
|
||||||
"""Semantic check for interfaces on host-unlock."""
|
"""Semantic check for interfaces on host-unlock."""
|
||||||
ihost = hostupdate.ihost_patch
|
ihost = hostupdate.ihost_patch
|
||||||
if ihost['personality'] in [constants.CONTROLLER, constants.COMPUTE,
|
if ihost['personality'] in [constants.CONTROLLER, constants.WORKER,
|
||||||
constants.STORAGE]:
|
constants.STORAGE]:
|
||||||
# Check if there is an infra interface on
|
# Check if there is an infra interface on
|
||||||
# controller/compute/storage
|
# controller/worker/storage
|
||||||
ihost_iinterfaces = \
|
ihost_iinterfaces = \
|
||||||
pecan.request.dbapi.iinterface_get_by_ihost(ihost['uuid'])
|
pecan.request.dbapi.iinterface_get_by_ihost(ihost['uuid'])
|
||||||
|
|
||||||
@ -5754,7 +5754,7 @@ class HostController(rest.RestController):
|
|||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
# Check if there is an management interface on
|
# Check if there is an management interface on
|
||||||
# controller/compute/storage
|
# controller/worker/storage
|
||||||
ihost_iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(
|
ihost_iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost(
|
||||||
ihost['uuid'])
|
ihost['uuid'])
|
||||||
network = pecan.request.dbapi.network_get_by_type(
|
network = pecan.request.dbapi.network_get_by_type(
|
||||||
@ -5796,7 +5796,7 @@ class HostController(rest.RestController):
|
|||||||
# management and infrastrucutre interfaces via DHCP. This
|
# management and infrastrucutre interfaces via DHCP. This
|
||||||
# 'check' updates the 'imtu' value based on what will be served
|
# 'check' updates the 'imtu' value based on what will be served
|
||||||
# via DHCP.
|
# via DHCP.
|
||||||
if ihost['personality'] in [constants.COMPUTE, constants.STORAGE]:
|
if ihost['personality'] in [constants.WORKER, constants.STORAGE]:
|
||||||
host_list = pecan.request.dbapi.ihost_get_by_personality(
|
host_list = pecan.request.dbapi.ihost_get_by_personality(
|
||||||
personality=constants.CONTROLLER)
|
personality=constants.CONTROLLER)
|
||||||
interface_list_active = []
|
interface_list_active = []
|
||||||
@ -5938,7 +5938,7 @@ class HostController(rest.RestController):
|
|||||||
ihost_obj['hostname'])
|
ihost_obj['hostname'])
|
||||||
pecan.request.rpcapi.configure_ihost(pecan.request.context,
|
pecan.request.rpcapi.configure_ihost(pecan.request.context,
|
||||||
ihost_obj,
|
ihost_obj,
|
||||||
do_compute_apply=True)
|
do_worker_apply=True)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _stage_reboot(hostupdate):
|
def _stage_reboot(hostupdate):
|
||||||
@ -6186,7 +6186,7 @@ class HostController(rest.RestController):
|
|||||||
def _create_node(host, xml_node, personality, is_dynamic_ip):
|
def _create_node(host, xml_node, personality, is_dynamic_ip):
|
||||||
host_node = et.SubElement(xml_node, 'host')
|
host_node = et.SubElement(xml_node, 'host')
|
||||||
et.SubElement(host_node, 'personality').text = personality
|
et.SubElement(host_node, 'personality').text = personality
|
||||||
if personality == constants.COMPUTE:
|
if personality == constants.WORKER:
|
||||||
et.SubElement(host_node, 'hostname').text = host.hostname
|
et.SubElement(host_node, 'hostname').text = host.hostname
|
||||||
et.SubElement(host_node, 'subfunctions').text = host.subfunctions
|
et.SubElement(host_node, 'subfunctions').text = host.subfunctions
|
||||||
|
|
||||||
|
@ -979,9 +979,9 @@ def _check_network_type_validity(networktypelist):
|
|||||||
def _check_network_type_and_host_type(ihost, networktypelist):
|
def _check_network_type_and_host_type(ihost, networktypelist):
|
||||||
for nt in DATA_NETWORK_TYPES:
|
for nt in DATA_NETWORK_TYPES:
|
||||||
if (nt in networktypelist and
|
if (nt in networktypelist and
|
||||||
constants.COMPUTE not in ihost['subfunctions']):
|
constants.WORKER not in ihost['subfunctions']):
|
||||||
msg = _("The '%s' network type is only supported on nodes "
|
msg = _("The '%s' network type is only supported on nodes "
|
||||||
"supporting compute functions" % nt)
|
"supporting worker functions" % nt)
|
||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
if (constants.NETWORK_TYPE_OAM in networktypelist and
|
if (constants.NETWORK_TYPE_OAM in networktypelist and
|
||||||
|
@ -513,32 +513,32 @@ def _check_host(lvg):
|
|||||||
raise wsme.exc.ClientSideError(_("Volume group operations not allowed "
|
raise wsme.exc.ClientSideError(_("Volume group operations not allowed "
|
||||||
"on hosts with personality: %s") %
|
"on hosts with personality: %s") %
|
||||||
constants.STORAGE)
|
constants.STORAGE)
|
||||||
elif (constants.COMPUTE not in ihost.subfunctions and
|
elif (constants.WORKER not in ihost.subfunctions and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL):
|
lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL):
|
||||||
raise wsme.exc.ClientSideError(_("%s can only be added to a host which "
|
raise wsme.exc.ClientSideError(_("%s can only be added to a host which "
|
||||||
"has a %s subfunction.") %
|
"has a %s subfunction.") %
|
||||||
(constants.LVG_NOVA_LOCAL,
|
(constants.LVG_NOVA_LOCAL,
|
||||||
constants.COMPUTE))
|
constants.WORKER))
|
||||||
elif (ihost.personality == constants.COMPUTE and
|
elif (ihost.personality == constants.WORKER and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
||||||
not utils.is_kubernetes_config()):
|
not utils.is_kubernetes_config()):
|
||||||
raise wsme.exc.ClientSideError(_("%s can not be provisioned for %s "
|
raise wsme.exc.ClientSideError(_("%s can not be provisioned for %s "
|
||||||
"hosts.") % (constants.LVG_CGTS_VG,
|
"hosts.") % (constants.LVG_CGTS_VG,
|
||||||
constants.COMPUTE))
|
constants.WORKER))
|
||||||
elif (ihost.personality in [constants.COMPUTE, constants.STORAGE] and
|
elif (ihost.personality in [constants.WORKER, constants.STORAGE] and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES):
|
lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES):
|
||||||
raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s "
|
raise wsme.exc.ClientSideError(_("%s can only be provisioned for %s "
|
||||||
"hosts.") % (constants.LVG_CINDER_VOLUMES,
|
"hosts.") % (constants.LVG_CINDER_VOLUMES,
|
||||||
constants.CONTROLLER))
|
constants.CONTROLLER))
|
||||||
|
|
||||||
if (constants.COMPUTE in ihost['subfunctions'] and
|
if (constants.WORKER in ihost['subfunctions'] and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
|
lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
||||||
|
|
||||||
if utils.is_kubernetes_config():
|
if utils.is_kubernetes_config():
|
||||||
if (ihost.personality == constants.COMPUTE and
|
if (ihost.personality == constants.WORKER and
|
||||||
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
lvg['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
@ -662,7 +662,7 @@ def _check(op, lvg):
|
|||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can't modify the volume group: %s. There are currently "
|
_("Can't modify the volume group: %s. There are currently "
|
||||||
"%d instance volumes present in the volume group. "
|
"%d instance volumes present in the volume group. "
|
||||||
"Terminate or migrate all instances from the compute to "
|
"Terminate or migrate all instances from the worker to "
|
||||||
"allow volume group madifications." %
|
"allow volume group madifications." %
|
||||||
(lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))
|
(lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))
|
||||||
|
|
||||||
@ -683,7 +683,7 @@ def _check(op, lvg):
|
|||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Can't delete volume group: %s. There are currently %d "
|
_("Can't delete volume group: %s. There are currently %d "
|
||||||
"instance volumes present in the volume group. Terminate"
|
"instance volumes present in the volume group. Terminate"
|
||||||
" or migrate all instances from the compute to allow "
|
" or migrate all instances from the worker to allow "
|
||||||
"volume group deletion." % (lvg['lvm_vg_name'],
|
"volume group deletion." % (lvg['lvm_vg_name'],
|
||||||
lvg['lvm_cur_lv'] - 1)))
|
lvg['lvm_cur_lv'] - 1)))
|
||||||
else:
|
else:
|
||||||
|
@ -559,7 +559,7 @@ def _check_memory(rpc_port, ihost, platform_reserved_mib=None,
|
|||||||
required_platform_reserved,
|
required_platform_reserved,
|
||||||
max_platform_reserved))
|
max_platform_reserved))
|
||||||
|
|
||||||
if cutils.is_virtual() or cutils.is_virtual_compute(ihost):
|
if cutils.is_virtual() or cutils.is_virtual_worker(ihost):
|
||||||
LOG.warn(msg_platform_over)
|
LOG.warn(msg_platform_over)
|
||||||
else:
|
else:
|
||||||
raise wsme.exc.ClientSideError(msg_platform_over)
|
raise wsme.exc.ClientSideError(msg_platform_over)
|
||||||
|
@ -336,16 +336,16 @@ class PartitionController(rest.RestController):
|
|||||||
|
|
||||||
def _check_host(partition, ihost, idisk):
|
def _check_host(partition, ihost, idisk):
|
||||||
"""Semantic checks for valid host"""
|
"""Semantic checks for valid host"""
|
||||||
# Partitions should only be created on computes/controllers.
|
# Partitions should only be created on workers/controllers.
|
||||||
if not ihost.personality:
|
if not ihost.personality:
|
||||||
raise wsme.exc.ClientSideError(_("Host %s has uninitialized "
|
raise wsme.exc.ClientSideError(_("Host %s has uninitialized "
|
||||||
"personality.") %
|
"personality.") %
|
||||||
ihost.hostname)
|
ihost.hostname)
|
||||||
elif ihost.personality not in [constants.CONTROLLER, constants.COMPUTE]:
|
elif ihost.personality not in [constants.CONTROLLER, constants.WORKER]:
|
||||||
raise wsme.exc.ClientSideError(_("Host personality must be a one of "
|
raise wsme.exc.ClientSideError(_("Host personality must be a one of "
|
||||||
"[%s, %s]") %
|
"[%s, %s]") %
|
||||||
(constants.CONTROLLER,
|
(constants.CONTROLLER,
|
||||||
constants.COMPUTE))
|
constants.WORKER))
|
||||||
|
|
||||||
# The disk must be present on the specified host.
|
# The disk must be present on the specified host.
|
||||||
if ihost['id'] != idisk['forihostid']:
|
if ihost['id'] != idisk['forihostid']:
|
||||||
@ -656,8 +656,8 @@ def _create(partition, iprofile=None, applyprofile=None):
|
|||||||
# Check if this host has been provisioned. If so, attempt an in-service
|
# Check if this host has been provisioned. If so, attempt an in-service
|
||||||
# action. If not, we'll just stage the DB changes to and let the unlock
|
# action. If not, we'll just stage the DB changes to and let the unlock
|
||||||
# apply the manifest changes
|
# apply the manifest changes
|
||||||
# - PROVISIONED: standard controller/compute (after config_controller)
|
# - PROVISIONED: standard controller/worker (after config_controller)
|
||||||
# - PROVISIONING: AIO (after config_controller) and before compute
|
# - PROVISIONING: AIO (after config_controller) and before worker
|
||||||
# configuration
|
# configuration
|
||||||
if (ihost.invprovision in [constants.PROVISIONED,
|
if (ihost.invprovision in [constants.PROVISIONED,
|
||||||
constants.PROVISIONING] and
|
constants.PROVISIONING] and
|
||||||
|
@ -294,8 +294,8 @@ def _check_host(host):
|
|||||||
elif host.administrative != constants.ADMIN_LOCKED and not \
|
elif host.administrative != constants.ADMIN_LOCKED and not \
|
||||||
utils.is_host_simplex_controller(host):
|
utils.is_host_simplex_controller(host):
|
||||||
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
raise wsme.exc.ClientSideError(_('Host must be locked.'))
|
||||||
if constants.COMPUTE not in host.subfunctions:
|
if constants.WORKER not in host.subfunctions:
|
||||||
raise wsme.exc.ClientSideError(_('Can only modify compute node cores.'))
|
raise wsme.exc.ClientSideError(_('Can only modify worker node cores.'))
|
||||||
|
|
||||||
|
|
||||||
def _check_field(field):
|
def _check_field(field):
|
||||||
|
@ -898,7 +898,7 @@ class ProfileController(rest.RestController):
|
|||||||
if 'profiletype' in profile_dict and profile_dict['profiletype']:
|
if 'profiletype' in profile_dict and profile_dict['profiletype']:
|
||||||
profiletype = profile_dict['profiletype']
|
profiletype = profile_dict['profiletype']
|
||||||
if profiletype == constants.PROFILE_TYPE_STORAGE:
|
if profiletype == constants.PROFILE_TYPE_STORAGE:
|
||||||
if constants.COMPUTE in from_ihost.subfunctions:
|
if constants.WORKER in from_ihost.subfunctions:
|
||||||
# combo has no ceph
|
# combo has no ceph
|
||||||
profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE
|
profiletype = constants.PROFILE_TYPE_LOCAL_STORAGE
|
||||||
LOG.info("No ceph backend for stor profile, assuming "
|
LOG.info("No ceph backend for stor profile, assuming "
|
||||||
@ -1136,7 +1136,7 @@ def _create_cpu_profile(profile_name, profile_node):
|
|||||||
self.processor_index = p_index
|
self.processor_index = p_index
|
||||||
self.core_index = c_index
|
self.core_index = c_index
|
||||||
self.thread_index = t_index
|
self.thread_index = t_index
|
||||||
self.core_function = constants.VM_FUNCTION
|
self.core_function = constants.APPLICATION_FUNCTION
|
||||||
|
|
||||||
# The xml is validated against schema.
|
# The xml is validated against schema.
|
||||||
# Validations that are covered by the schema are not checked below.
|
# Validations that are covered by the schema are not checked below.
|
||||||
@ -1750,7 +1750,7 @@ def _create_localstorage_profile(profile_name, profile_node):
|
|||||||
"""
|
"""
|
||||||
values = dict(recordtype="profile",
|
values = dict(recordtype="profile",
|
||||||
hostname=profile_name,
|
hostname=profile_name,
|
||||||
subfunctions=constants.COMPUTE)
|
subfunctions=constants.WORKER)
|
||||||
|
|
||||||
disks = profile_node.findall('disk')
|
disks = profile_node.findall('disk')
|
||||||
all_ilvg_nodes = profile_node.findall('lvg') # should only be ONE ?
|
all_ilvg_nodes = profile_node.findall('lvg') # should only be ONE ?
|
||||||
@ -2179,7 +2179,7 @@ def _create_device_profile(device, pv_type, iprofile_id):
|
|||||||
def localstorageprofile_copy_data(host, profile):
|
def localstorageprofile_copy_data(host, profile):
|
||||||
"""Create nova-local storage profile from host data
|
"""Create nova-local storage profile from host data
|
||||||
|
|
||||||
All computes will have nova local storage and is independent of
|
All workers will have nova local storage and is independent of
|
||||||
the Cinder backend.
|
the Cinder backend.
|
||||||
|
|
||||||
Controller nodes in the small footprint scenario will always be
|
Controller nodes in the small footprint scenario will always be
|
||||||
@ -2189,7 +2189,7 @@ def localstorageprofile_copy_data(host, profile):
|
|||||||
A storage node should be the only host with a stor profile
|
A storage node should be the only host with a stor profile
|
||||||
(idisks + istors).
|
(idisks + istors).
|
||||||
|
|
||||||
A compute will only have a local stor profile
|
A worker will only have a local stor profile
|
||||||
(idisks + ipvs + ilvgs).
|
(idisks + ipvs + ilvgs).
|
||||||
|
|
||||||
A combo controller should have a local stor profile
|
A combo controller should have a local stor profile
|
||||||
@ -2467,7 +2467,7 @@ def cpuprofile_apply_to_host(host, profile):
|
|||||||
elif core_idx < vm_core_start:
|
elif core_idx < vm_core_start:
|
||||||
new_func = constants.SHARED_FUNCTION
|
new_func = constants.SHARED_FUNCTION
|
||||||
elif core_idx < vm_core_end:
|
elif core_idx < vm_core_end:
|
||||||
new_func = constants.VM_FUNCTION
|
new_func = constants.APPLICATION_FUNCTION
|
||||||
|
|
||||||
if new_func != hcpu.allocated_function:
|
if new_func != hcpu.allocated_function:
|
||||||
values = {'allocated_function': new_func}
|
values = {'allocated_function': new_func}
|
||||||
@ -2949,10 +2949,10 @@ def check_localstorageprofile_applicable(host, profile):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
subfunctions = host.subfunctions
|
subfunctions = host.subfunctions
|
||||||
if constants.COMPUTE not in subfunctions:
|
if constants.WORKER not in subfunctions:
|
||||||
raise wsme.exc.ClientSideError(_("%s with subfunctions: %s "
|
raise wsme.exc.ClientSideError(_("%s with subfunctions: %s "
|
||||||
"profile %s: Local storage profiles are applicable only to "
|
"profile %s: Local storage profiles are applicable only to "
|
||||||
"hosts with 'compute' subfunction." %
|
"hosts with 'worker' subfunction." %
|
||||||
(host.hostname, host.subfunctions, profile.hostname)))
|
(host.hostname, host.subfunctions, profile.hostname)))
|
||||||
|
|
||||||
if not profile.disks:
|
if not profile.disks:
|
||||||
@ -3143,8 +3143,8 @@ def memoryprofile_applicable(host, profile):
|
|||||||
LOG.warn("Host nodes %s not same as profile nodes=%s" %
|
LOG.warn("Host nodes %s not same as profile nodes=%s" %
|
||||||
(len(host.nodes), len(profile.nodes)))
|
(len(host.nodes), len(profile.nodes)))
|
||||||
return False
|
return False
|
||||||
if constants.COMPUTE not in host.subfunctions:
|
if constants.WORKER not in host.subfunctions:
|
||||||
LOG.warn("Profile cannot be applied to non-compute host")
|
LOG.warn("Profile cannot be applied to non-worker host")
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -477,12 +477,12 @@ def _check_host(pv, ihost, op):
|
|||||||
if utils.is_kubernetes_config():
|
if utils.is_kubernetes_config():
|
||||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
||||||
if (ihost['personality'] != constants.CONTROLLER and
|
if (ihost['personality'] != constants.CONTROLLER and
|
||||||
ihost['personality'] != constants.COMPUTE):
|
ihost['personality'] != constants.WORKER):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Physical volume operations for %s are only "
|
_("Physical volume operations for %s are only "
|
||||||
"supported on %s and %s hosts" %
|
"supported on %s and %s hosts" %
|
||||||
(constants.LVG_CGTS_VG,
|
(constants.LVG_CGTS_VG,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.CONTROLLER)))
|
constants.CONTROLLER)))
|
||||||
elif (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
elif (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
||||||
if ihost['personality'] != constants.CONTROLLER:
|
if ihost['personality'] != constants.CONTROLLER:
|
||||||
@ -492,17 +492,17 @@ def _check_host(pv, ihost, op):
|
|||||||
constants.CONTROLLER))
|
constants.CONTROLLER))
|
||||||
|
|
||||||
# semantic check: host must be locked for a nova-local change on
|
# semantic check: host must be locked for a nova-local change on
|
||||||
# a host with a compute subfunction (compute or AIO)
|
# a host with a worker subfunction (worker or AIO)
|
||||||
if (constants.COMPUTE in ihost['subfunctions'] and
|
if (constants.WORKER in ihost['subfunctions'] and
|
||||||
ilvg.lvm_vg_name == constants.LVG_NOVA_LOCAL and
|
ilvg.lvm_vg_name == constants.LVG_NOVA_LOCAL and
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
raise wsme.exc.ClientSideError(_("Host must be locked"))
|
||||||
|
|
||||||
# semantic check: host must be locked for a CGTS change on
|
# semantic check: host must be locked for a CGTS change on
|
||||||
# a compute host.
|
# a worker host.
|
||||||
if utils.is_kubernetes_config():
|
if utils.is_kubernetes_config():
|
||||||
if (ihost['personality'] == constants.COMPUTE and
|
if (ihost['personality'] == constants.WORKER and
|
||||||
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||||
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
(ihost['administrative'] != constants.ADMIN_LOCKED or
|
||||||
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
ihost['ihost_action'] == constants.UNLOCK_ACTION)):
|
||||||
@ -599,7 +599,7 @@ def _check_lvg(op, pv):
|
|||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
elif op == "delete":
|
elif op == "delete":
|
||||||
# Possible Kubernetes issue, do we want to allow this on compute nodes?
|
# Possible Kubernetes issue, do we want to allow this on worker nodes?
|
||||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Physical volumes cannot be removed from the cgts-vg volume "
|
_("Physical volumes cannot be removed from the cgts-vg volume "
|
||||||
|
@ -205,7 +205,7 @@ class SDNControllerController(rest.RestController):
|
|||||||
# Clear any existing OVSDB manager alarm, corresponding
|
# Clear any existing OVSDB manager alarm, corresponding
|
||||||
# to this SDN controller. We need to clear this alarm
|
# to this SDN controller. We need to clear this alarm
|
||||||
# for all hosts on which it is set, i.e. all unlocked
|
# for all hosts on which it is set, i.e. all unlocked
|
||||||
# compute nodes.
|
# worker nodes.
|
||||||
key = "sdn-controller=%s" % uuid
|
key = "sdn-controller=%s" % uuid
|
||||||
obj = fm_api.FaultAPIs()
|
obj = fm_api.FaultAPIs()
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ class SDNControllerController(rest.RestController):
|
|||||||
|
|
||||||
# Clear any existing Openflow Controller alarm, corresponding
|
# Clear any existing Openflow Controller alarm, corresponding
|
||||||
# to this SDN controller. We need need to clear this alarm
|
# to this SDN controller. We need need to clear this alarm
|
||||||
# for all hosts on which it is set, i.e. all unlocked computes.
|
# for all hosts on which it is set, i.e. all unlocked workers.
|
||||||
sdn_controller = objects.sdn_controller.get_by_uuid(
|
sdn_controller = objects.sdn_controller.get_by_uuid(
|
||||||
pecan.request.context, uuid)
|
pecan.request.context, uuid)
|
||||||
uri = "%s://%s" % (sdn_controller.transport,
|
uri = "%s://%s" % (sdn_controller.transport,
|
||||||
|
@ -733,7 +733,7 @@ def _apply_backend_changes(op, sb_obj):
|
|||||||
|
|
||||||
def _apply_nova_specific_changes(sb_obj, old_sb_obj=None):
|
def _apply_nova_specific_changes(sb_obj, old_sb_obj=None):
|
||||||
"""If the backend's services have been modified and nova has been either
|
"""If the backend's services have been modified and nova has been either
|
||||||
added or (re)moved, set the hosts with compute functionality and a
|
added or (re)moved, set the hosts with worker functionality and a
|
||||||
certain nova-local instance backing to Config out-of-date.
|
certain nova-local instance backing to Config out-of-date.
|
||||||
"""
|
"""
|
||||||
services = api_helper.getListFromServices(sb_obj.as_dict())
|
services = api_helper.getListFromServices(sb_obj.as_dict())
|
||||||
|
@ -282,7 +282,7 @@ class SystemController(rest.RestController):
|
|||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
_("Host {} must be locked.".format(h['hostname'])))
|
_("Host {} must be locked.".format(h['hostname'])))
|
||||||
elif (h['administrative'] != constants.ADMIN_LOCKED and
|
elif (h['administrative'] != constants.ADMIN_LOCKED and
|
||||||
constants.COMPUTE in h['subfunctions'] and
|
constants.WORKER in h['subfunctions'] and
|
||||||
not api_utils.is_host_active_controller(h) and
|
not api_utils.is_host_active_controller(h) and
|
||||||
not api_utils.is_host_simplex_controller(h)):
|
not api_utils.is_host_simplex_controller(h)):
|
||||||
raise wsme.exc.ClientSideError(
|
raise wsme.exc.ClientSideError(
|
||||||
|
@ -317,7 +317,7 @@ class SystemHelper(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_product_build():
|
def get_product_build():
|
||||||
active_controller = HostHelper.get_active_controller()
|
active_controller = HostHelper.get_active_controller()
|
||||||
if constants.COMPUTE in active_controller.subfunctions:
|
if constants.WORKER in active_controller.subfunctions:
|
||||||
return constants.TIS_AIO_BUILD
|
return constants.TIS_AIO_BUILD
|
||||||
return constants.TIS_STD_BUILD
|
return constants.TIS_STD_BUILD
|
||||||
|
|
||||||
@ -413,10 +413,10 @@ def is_aio_kubernetes(dbapi=None):
|
|||||||
is_kubernetes_config(dbapi)
|
is_kubernetes_config(dbapi)
|
||||||
|
|
||||||
|
|
||||||
def get_compute_count(dbapi=None):
|
def get_worker_count(dbapi=None):
|
||||||
if not dbapi:
|
if not dbapi:
|
||||||
dbapi = pecan.request.dbapi
|
dbapi = pecan.request.dbapi
|
||||||
return len(dbapi.ihost_get_by_personality(constants.COMPUTE))
|
return len(dbapi.ihost_get_by_personality(constants.WORKER))
|
||||||
|
|
||||||
|
|
||||||
class SBApiHelper(object):
|
class SBApiHelper(object):
|
||||||
|
@ -649,7 +649,7 @@ class CephApiOperator(object):
|
|||||||
inventory_monitor_names = []
|
inventory_monitor_names = []
|
||||||
ihosts = db_api.ihost_get_list()
|
ihosts = db_api.ihost_get_list()
|
||||||
for ihost in ihosts:
|
for ihost in ihosts:
|
||||||
if ihost['personality'] == constants.COMPUTE:
|
if ihost['personality'] == constants.WORKER:
|
||||||
continue
|
continue
|
||||||
capabilities = ihost['capabilities']
|
capabilities = ihost['capabilities']
|
||||||
if 'stor_function' in capabilities:
|
if 'stor_function' in capabilities:
|
||||||
|
@ -102,9 +102,9 @@ CONFIG_ACTIONS = [SUBFUNCTION_CONFIG_ACTION,
|
|||||||
# Personalities
|
# Personalities
|
||||||
CONTROLLER = 'controller'
|
CONTROLLER = 'controller'
|
||||||
STORAGE = 'storage'
|
STORAGE = 'storage'
|
||||||
COMPUTE = 'compute'
|
WORKER = 'worker'
|
||||||
|
|
||||||
PERSONALITIES = [CONTROLLER, STORAGE, COMPUTE]
|
PERSONALITIES = [CONTROLLER, STORAGE, WORKER]
|
||||||
|
|
||||||
# SUBFUNCTION FEATURES
|
# SUBFUNCTION FEATURES
|
||||||
SUBFUNCTIONS = 'subfunctions'
|
SUBFUNCTIONS = 'subfunctions'
|
||||||
@ -114,7 +114,7 @@ LOWLATENCY = 'lowlatency'
|
|||||||
PLATFORM_FUNCTION = "Platform"
|
PLATFORM_FUNCTION = "Platform"
|
||||||
VSWITCH_FUNCTION = "Vswitch"
|
VSWITCH_FUNCTION = "Vswitch"
|
||||||
SHARED_FUNCTION = "Shared"
|
SHARED_FUNCTION = "Shared"
|
||||||
VM_FUNCTION = "VMs"
|
APPLICATION_FUNCTION = "Applications"
|
||||||
NO_FUNCTION = "None"
|
NO_FUNCTION = "None"
|
||||||
|
|
||||||
# Host Personality Sub-Types
|
# Host Personality Sub-Types
|
||||||
@ -223,8 +223,8 @@ COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_XEOND = 7000
|
|||||||
# Max number of physical cores in a xeon-d cpu
|
# Max number of physical cores in a xeon-d cpu
|
||||||
NUMBER_CORES_XEOND = 8
|
NUMBER_CORES_XEOND = 8
|
||||||
|
|
||||||
# Max number of computes that can be added to an AIO duplex system
|
# Max number of workers that can be added to an AIO duplex system
|
||||||
AIO_DUPLEX_MAX_COMPUTES = 4
|
AIO_DUPLEX_MAX_WORKERS = 4
|
||||||
|
|
||||||
# Network overhead for DHCP or vrouter, assume 100 networks * 40 MB each
|
# Network overhead for DHCP or vrouter, assume 100 networks * 40 MB each
|
||||||
NETWORK_METADATA_OVERHEAD_MIB = 4000
|
NETWORK_METADATA_OVERHEAD_MIB = 4000
|
||||||
@ -244,7 +244,7 @@ NEUTRON_PROVIDERNET_FLAT = "flat"
|
|||||||
NEUTRON_PROVIDERNET_VXLAN = "vxlan"
|
NEUTRON_PROVIDERNET_VXLAN = "vxlan"
|
||||||
NEUTRON_PROVIDERNET_VLAN = "vlan"
|
NEUTRON_PROVIDERNET_VLAN = "vlan"
|
||||||
|
|
||||||
# Supported compute node vswitch types
|
# Supported worker node vswitch types
|
||||||
VSWITCH_TYPE_OVS_DPDK = "ovs-dpdk"
|
VSWITCH_TYPE_OVS_DPDK = "ovs-dpdk"
|
||||||
VSWITCH_TYPE_NUAGE_VRS = "nuage_vrs"
|
VSWITCH_TYPE_NUAGE_VRS = "nuage_vrs"
|
||||||
|
|
||||||
@ -420,7 +420,7 @@ SB_TASK_APPLY_CONFIG_FILE = 'applying-config-file'
|
|||||||
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
|
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
|
||||||
SB_TASK_PROVISION_STORAGE = 'provision-storage'
|
SB_TASK_PROVISION_STORAGE = 'provision-storage'
|
||||||
SB_TASK_PROVISION_SERVICES = 'provision-services'
|
SB_TASK_PROVISION_SERVICES = 'provision-services'
|
||||||
SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute'
|
SB_TASK_RECONFIG_WORKER = 'reconfig-worker'
|
||||||
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
|
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
|
||||||
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'
|
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'
|
||||||
SB_TASK_RESTORE = 'restore'
|
SB_TASK_RESTORE = 'restore'
|
||||||
@ -1003,7 +1003,7 @@ SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE = 'maintenance'
|
|||||||
SERVICE_PARAM_SECTION_PLATFORM_SYSINV = 'sysinv'
|
SERVICE_PARAM_SECTION_PLATFORM_SYSINV = 'sysinv'
|
||||||
SERVICE_PARAM_NAME_SYSINV_FIREWALL_RULES_ID = 'firewall_rules_id'
|
SERVICE_PARAM_NAME_SYSINV_FIREWALL_RULES_ID = 'firewall_rules_id'
|
||||||
|
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT = 'compute_boot_timeout'
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT = 'worker_boot_timeout'
|
||||||
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT = 'controller_boot_timeout'
|
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT = 'controller_boot_timeout'
|
||||||
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD = 'heartbeat_period'
|
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD = 'heartbeat_period'
|
||||||
SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION = 'heartbeat_failure_action'
|
SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION = 'heartbeat_failure_action'
|
||||||
@ -1012,7 +1012,7 @@ SERVICE_PARAM_PLAT_MTCE_HBS_DEGRADE_THRESHOLD = 'heartbeat_degrade_threshold'
|
|||||||
SERVICE_PARAM_PLAT_MTCE_MNFA_THRESHOLD = 'mnfa_threshold'
|
SERVICE_PARAM_PLAT_MTCE_MNFA_THRESHOLD = 'mnfa_threshold'
|
||||||
SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT = 'mnfa_timeout'
|
SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT = 'mnfa_timeout'
|
||||||
|
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_DEFAULT = 720
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_DEFAULT = 720
|
||||||
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_DEFAULT = 1200
|
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_DEFAULT = 1200
|
||||||
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_DEFAULT = 100
|
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_DEFAULT = 100
|
||||||
SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION_DEFAULT = 'fail'
|
SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION_DEFAULT = 'fail'
|
||||||
@ -1286,11 +1286,11 @@ WARN_CINDER_ON_ROOT_WITH_CEPH = 2
|
|||||||
WARNING_ROOT_PV_CINDER_LVM_MSG = (
|
WARNING_ROOT_PV_CINDER_LVM_MSG = (
|
||||||
"Warning: All deployed VMs must be booted from Cinder volumes and "
|
"Warning: All deployed VMs must be booted from Cinder volumes and "
|
||||||
"not use ephemeral or swap disks. See Titanium Cloud System Engineering "
|
"not use ephemeral or swap disks. See Titanium Cloud System Engineering "
|
||||||
"Guidelines for more details on supported compute configurations.")
|
"Guidelines for more details on supported worker configurations.")
|
||||||
WARNING_ROOT_PV_CINDER_CEPH_MSG = (
|
WARNING_ROOT_PV_CINDER_CEPH_MSG = (
|
||||||
"Warning: This compute must have instance_backing set to 'remote' "
|
"Warning: This worker must have instance_backing set to 'remote' "
|
||||||
"or use a secondary disk for local storage. See Titanium Cloud System "
|
"or use a secondary disk for local storage. See Titanium Cloud System "
|
||||||
"Engineering Guidelines for more details on supported compute configurations.")
|
"Engineering Guidelines for more details on supported worker configurations.")
|
||||||
PV_WARNINGS = {WARN_CINDER_ON_ROOT_WITH_LVM: WARNING_ROOT_PV_CINDER_LVM_MSG,
|
PV_WARNINGS = {WARN_CINDER_ON_ROOT_WITH_LVM: WARNING_ROOT_PV_CINDER_LVM_MSG,
|
||||||
WARN_CINDER_ON_ROOT_WITH_CEPH: WARNING_ROOT_PV_CINDER_CEPH_MSG}
|
WARN_CINDER_ON_ROOT_WITH_CEPH: WARNING_ROOT_PV_CINDER_CEPH_MSG}
|
||||||
|
|
||||||
|
@ -336,7 +336,7 @@ class Health(object):
|
|||||||
# If we are running on CPE we don't want any instances running
|
# If we are running on CPE we don't want any instances running
|
||||||
# on controller-1 before we start the upgrade, otherwise the
|
# on controller-1 before we start the upgrade, otherwise the
|
||||||
# databases will be out of sync after we lock controller-1
|
# databases will be out of sync after we lock controller-1
|
||||||
if constants.COMPUTE in controller_1.subfunctions:
|
if constants.WORKER in controller_1.subfunctions:
|
||||||
success, running_instances = self._check_running_instances(
|
success, running_instances = self._check_running_instances(
|
||||||
controller_1)
|
controller_1)
|
||||||
output += \
|
output += \
|
||||||
|
@ -244,7 +244,7 @@ def _validate_ip_address(name, value):
|
|||||||
def _validate_emc_vnx_iscsi_initiators(name, value):
|
def _validate_emc_vnx_iscsi_initiators(name, value):
|
||||||
"""Check if iscsi_initiators value is valid. An example of valid
|
"""Check if iscsi_initiators value is valid. An example of valid
|
||||||
iscsi_initiators string:
|
iscsi_initiators string:
|
||||||
{"compute-0": ["10.0.0.1", "10.0.0.2"], "compute-1": ["10.0.0.3"]}
|
{"worker-0": ["10.0.0.1", "10.0.0.2"], "worker-1": ["10.0.0.3"]}
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
iscsi_initiators = json.loads(value)
|
iscsi_initiators = json.loads(value)
|
||||||
@ -527,10 +527,10 @@ def _emc_vnx_destroy_data_san_address(data_san_addr_param, data_san_db):
|
|||||||
raise wsme.exc.ClientSideError(msg)
|
raise wsme.exc.ClientSideError(msg)
|
||||||
|
|
||||||
|
|
||||||
def _validate_compute_boot_timeout(name, value):
|
def _validate_worker_boot_timeout(name, value):
|
||||||
_validate_range(name, value,
|
_validate_range(name, value,
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MIN,
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MIN,
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MAX)
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MAX)
|
||||||
|
|
||||||
|
|
||||||
def _validate_controller_boot_timeout(name, value):
|
def _validate_controller_boot_timeout(name, value):
|
||||||
@ -1353,7 +1353,7 @@ CINDER_HPELEFTHAND_PARAMETER_RESOURCE = {
|
|||||||
|
|
||||||
# Maintenance Service Parameters
|
# Maintenance Service Parameters
|
||||||
PLATFORM_MTCE_PARAMETER_MANDATORY = [
|
PLATFORM_MTCE_PARAMETER_MANDATORY = [
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT,
|
constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT,
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT,
|
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT,
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD,
|
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD,
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION,
|
constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION,
|
||||||
@ -1365,8 +1365,8 @@ PLATFORM_MTCE_PARAMETER_MANDATORY = [
|
|||||||
|
|
||||||
PLATFORM_SYSINV_PARAMETER_PROTECTED = ['firewall_rules_id']
|
PLATFORM_SYSINV_PARAMETER_PROTECTED = ['firewall_rules_id']
|
||||||
|
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MIN = 720
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MIN = 720
|
||||||
SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_MAX = 1800
|
SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_MAX = 1800
|
||||||
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MIN = 1200
|
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MIN = 1200
|
||||||
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MAX = 1800
|
SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT_MAX = 1800
|
||||||
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_MIN = 100
|
SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD_MIN = 100
|
||||||
@ -1385,8 +1385,8 @@ SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT_MIN = 100
|
|||||||
SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT_MAX = 86400
|
SERVICE_PARAM_PLAT_MTCE_MNFA_TIMEOUT_MAX = 86400
|
||||||
|
|
||||||
PLATFORM_MTCE_PARAMETER_VALIDATOR = {
|
PLATFORM_MTCE_PARAMETER_VALIDATOR = {
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT:
|
constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT:
|
||||||
_validate_compute_boot_timeout,
|
_validate_worker_boot_timeout,
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT:
|
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT:
|
||||||
_validate_controller_boot_timeout,
|
_validate_controller_boot_timeout,
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD:
|
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD:
|
||||||
@ -1404,7 +1404,7 @@ PLATFORM_MTCE_PARAMETER_VALIDATOR = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
PLATFORM_MTCE_PARAMETER_RESOURCE = {
|
PLATFORM_MTCE_PARAMETER_RESOURCE = {
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT: 'platform::mtce::params::compute_boot_timeout',
|
constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT: 'platform::mtce::params::worker_boot_timeout',
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT: 'platform::mtce::params::controller_boot_timeout',
|
constants.SERVICE_PARAM_PLAT_MTCE_CONTROLLER_BOOT_TIMEOUT: 'platform::mtce::params::controller_boot_timeout',
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD: 'platform::mtce::params::heartbeat_period',
|
constants.SERVICE_PARAM_PLAT_MTCE_HBS_PERIOD: 'platform::mtce::params::heartbeat_period',
|
||||||
constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION: 'platform::mtce::params::heartbeat_failure_action',
|
constants.SERVICE_PARAM_PLAT_MTCE_HBS_FAILURE_ACTION: 'platform::mtce::params::heartbeat_failure_action',
|
||||||
|
@ -262,7 +262,7 @@ class StorageBackendConfig(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# if both controllers are reconfigured and 1st pair storage nodes
|
# if both controllers are reconfigured and 1st pair storage nodes
|
||||||
# are provisioned, the task will be either reconfig_compute or none
|
# are provisioned, the task will be either reconfig_worker or none
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -879,12 +879,12 @@ def is_virtual():
|
|||||||
return bool(result == 'true')
|
return bool(result == 'true')
|
||||||
|
|
||||||
|
|
||||||
def is_virtual_compute(ihost):
|
def is_virtual_worker(ihost):
|
||||||
if not(os.path.isdir("/etc/sysinv/.virtual_compute_nodes")):
|
if not(os.path.isdir("/etc/sysinv/.virtual_worker_nodes")):
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
ip = ihost['mgmt_ip']
|
ip = ihost['mgmt_ip']
|
||||||
return os.path.isfile("/etc/sysinv/.virtual_compute_nodes/%s" % ip)
|
return os.path.isfile("/etc/sysinv/.virtual_worker_nodes/%s" % ip)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -913,9 +913,9 @@ def get_minimum_platform_reserved_memory(ihost, numa_node):
|
|||||||
reserved = 0
|
reserved = 0
|
||||||
if numa_node is None:
|
if numa_node is None:
|
||||||
return reserved
|
return reserved
|
||||||
if is_virtual() or is_virtual_compute(ihost):
|
if is_virtual() or is_virtual_worker(ihost):
|
||||||
# minimal memory requirements for VirtualBox
|
# minimal memory requirements for VirtualBox
|
||||||
if host_has_function(ihost, constants.COMPUTE):
|
if host_has_function(ihost, constants.WORKER):
|
||||||
if numa_node == 0:
|
if numa_node == 0:
|
||||||
reserved += 1200
|
reserved += 1200
|
||||||
if host_has_function(ihost, constants.CONTROLLER):
|
if host_has_function(ihost, constants.CONTROLLER):
|
||||||
@ -923,7 +923,7 @@ def get_minimum_platform_reserved_memory(ihost, numa_node):
|
|||||||
else:
|
else:
|
||||||
reserved += 500
|
reserved += 500
|
||||||
else:
|
else:
|
||||||
if host_has_function(ihost, constants.COMPUTE):
|
if host_has_function(ihost, constants.WORKER):
|
||||||
# Engineer 2G per numa node for disk IO RSS overhead
|
# Engineer 2G per numa node for disk IO RSS overhead
|
||||||
reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB
|
reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB
|
||||||
return reserved
|
return reserved
|
||||||
@ -939,10 +939,10 @@ def get_required_platform_reserved_memory(ihost, numa_node, low_core=False):
|
|||||||
required_reserved = 0
|
required_reserved = 0
|
||||||
if numa_node is None:
|
if numa_node is None:
|
||||||
return required_reserved
|
return required_reserved
|
||||||
if is_virtual() or is_virtual_compute(ihost):
|
if is_virtual() or is_virtual_worker(ihost):
|
||||||
# minimal memory requirements for VirtualBox
|
# minimal memory requirements for VirtualBox
|
||||||
required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX
|
required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX
|
||||||
if host_has_function(ihost, constants.COMPUTE):
|
if host_has_function(ihost, constants.WORKER):
|
||||||
if numa_node == 0:
|
if numa_node == 0:
|
||||||
required_reserved += \
|
required_reserved += \
|
||||||
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX
|
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX
|
||||||
@ -957,11 +957,11 @@ def get_required_platform_reserved_memory(ihost, numa_node, low_core=False):
|
|||||||
required_reserved += \
|
required_reserved += \
|
||||||
constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX
|
constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX
|
||||||
else:
|
else:
|
||||||
if host_has_function(ihost, constants.COMPUTE):
|
if host_has_function(ihost, constants.WORKER):
|
||||||
# Engineer 2G per numa node for disk IO RSS overhead
|
# Engineer 2G per numa node for disk IO RSS overhead
|
||||||
required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB
|
required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB
|
||||||
if numa_node == 0:
|
if numa_node == 0:
|
||||||
# Engineer 2G for compute to give some headroom;
|
# Engineer 2G for worker to give some headroom;
|
||||||
# typically requires 650 MB PSS
|
# typically requires 650 MB PSS
|
||||||
required_reserved += \
|
required_reserved += \
|
||||||
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB
|
constants.PLATFORM_CORE_MEMORY_RESERVED_MIB
|
||||||
@ -1005,7 +1005,7 @@ def get_primary_network_type(interface):
|
|||||||
have 1 primary network type. The additional network type can only be
|
have 1 primary network type. The additional network type can only be
|
||||||
'data' and is used as a placeholder to indicate that there is at least one
|
'data' and is used as a placeholder to indicate that there is at least one
|
||||||
VLAN based neutron provider network associated to the interface. This
|
VLAN based neutron provider network associated to the interface. This
|
||||||
information is used to determine whether the vswitch on the compute needs
|
information is used to determine whether the vswitch on the worker needs
|
||||||
to control the interface or not. This function examines the list of
|
to control the interface or not. This function examines the list of
|
||||||
network types, discards the secondary type (if any) and returns the primary
|
network types, discards the secondary type (if any) and returns the primary
|
||||||
network type.
|
network type.
|
||||||
@ -1215,7 +1215,7 @@ def get_personalities(host_obj):
|
|||||||
|
|
||||||
def is_cpe(host_obj):
|
def is_cpe(host_obj):
|
||||||
return (host_has_function(host_obj, constants.CONTROLLER) and
|
return (host_has_function(host_obj, constants.CONTROLLER) and
|
||||||
host_has_function(host_obj, constants.COMPUTE))
|
host_has_function(host_obj, constants.WORKER))
|
||||||
|
|
||||||
|
|
||||||
def output_to_dict(output):
|
def output_to_dict(output):
|
||||||
|
@ -584,7 +584,7 @@ class AppOperator(object):
|
|||||||
# Get controller host(s)
|
# Get controller host(s)
|
||||||
controller_hosts =\
|
controller_hosts =\
|
||||||
self._dbapi.ihost_get_by_personality(constants.CONTROLLER)
|
self._dbapi.ihost_get_by_personality(constants.CONTROLLER)
|
||||||
if constants.COMPUTE in controller_hosts[0].subfunctions:
|
if constants.WORKER in controller_hosts[0].subfunctions:
|
||||||
# AIO system
|
# AIO system
|
||||||
labels = controller_labels_set.union(compute_labels_set)
|
labels = controller_labels_set.union(compute_labels_set)
|
||||||
if op == constants.LABEL_ASSIGN_OP:
|
if op == constants.LABEL_ASSIGN_OP:
|
||||||
@ -594,7 +594,7 @@ class AppOperator(object):
|
|||||||
else:
|
else:
|
||||||
# Standard system
|
# Standard system
|
||||||
compute_hosts =\
|
compute_hosts =\
|
||||||
self._dbapi.ihost_get_by_personality(constants.COMPUTE)
|
self._dbapi.ihost_get_by_personality(constants.WORKER)
|
||||||
if op == constants.LABEL_ASSIGN_OP:
|
if op == constants.LABEL_ASSIGN_OP:
|
||||||
self._assign_host_labels(controller_hosts, controller_labels_set)
|
self._assign_host_labels(controller_hosts, controller_labels_set)
|
||||||
self._assign_host_labels(compute_hosts, compute_labels_set)
|
self._assign_host_labels(compute_hosts, compute_labels_set)
|
||||||
|
@ -347,13 +347,13 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
# At this point we are swacting to controller-0 which has just been
|
# At this point we are swacting to controller-0 which has just been
|
||||||
# downgraded.
|
# downgraded.
|
||||||
# Before downgrading controller-0 all storage/compute nodes were locked
|
# Before downgrading controller-0 all storage/worker nodes were locked
|
||||||
# The database of the from_load is not aware of this, so we set the
|
# The database of the from_load is not aware of this, so we set the
|
||||||
# state in the database to match the state of the system. This does not
|
# state in the database to match the state of the system. This does not
|
||||||
# actually lock the nodes.
|
# actually lock the nodes.
|
||||||
hosts = self.dbapi.ihost_get_list()
|
hosts = self.dbapi.ihost_get_list()
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if host.personality not in [constants.COMPUTE, constants.STORAGE]:
|
if host.personality not in [constants.WORKER, constants.STORAGE]:
|
||||||
continue
|
continue
|
||||||
self.dbapi.ihost_update(host.uuid, {
|
self.dbapi.ihost_update(host.uuid, {
|
||||||
'administrative': constants.ADMIN_LOCKED})
|
'administrative': constants.ADMIN_LOCKED})
|
||||||
@ -455,8 +455,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
},
|
},
|
||||||
{'service': constants.SERVICE_TYPE_PLATFORM,
|
{'service': constants.SERVICE_TYPE_PLATFORM,
|
||||||
'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE,
|
'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE,
|
||||||
'name': constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT,
|
'name': constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT,
|
||||||
'value': constants.SERVICE_PARAM_PLAT_MTCE_COMPUTE_BOOT_TIMEOUT_DEFAULT,
|
'value': constants.SERVICE_PARAM_PLAT_MTCE_WORKER_BOOT_TIMEOUT_DEFAULT,
|
||||||
},
|
},
|
||||||
{'service': constants.SERVICE_TYPE_PLATFORM,
|
{'service': constants.SERVICE_TYPE_PLATFORM,
|
||||||
'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE,
|
'section': constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE,
|
||||||
@ -968,18 +968,18 @@ class ConductorManager(service.PeriodicService):
|
|||||||
sw_version = target_load.software_version
|
sw_version = target_load.software_version
|
||||||
|
|
||||||
if (host.personality == constants.CONTROLLER and
|
if (host.personality == constants.CONTROLLER and
|
||||||
constants.COMPUTE in tsc.subfunctions):
|
constants.WORKER in tsc.subfunctions):
|
||||||
if constants.LOWLATENCY in host.subfunctions:
|
if constants.LOWLATENCY in host.subfunctions:
|
||||||
pxe_config = "pxe-smallsystem_lowlatency-install-%s" % sw_version
|
pxe_config = "pxe-smallsystem_lowlatency-install-%s" % sw_version
|
||||||
else:
|
else:
|
||||||
pxe_config = "pxe-smallsystem-install-%s" % sw_version
|
pxe_config = "pxe-smallsystem-install-%s" % sw_version
|
||||||
elif host.personality == constants.CONTROLLER:
|
elif host.personality == constants.CONTROLLER:
|
||||||
pxe_config = "pxe-controller-install-%s" % sw_version
|
pxe_config = "pxe-controller-install-%s" % sw_version
|
||||||
elif host.personality == constants.COMPUTE:
|
elif host.personality == constants.WORKER:
|
||||||
if constants.LOWLATENCY in host.subfunctions:
|
if constants.LOWLATENCY in host.subfunctions:
|
||||||
pxe_config = "pxe-compute_lowlatency-install-%s" % sw_version
|
pxe_config = "pxe-worker_lowlatency-install-%s" % sw_version
|
||||||
else:
|
else:
|
||||||
pxe_config = "pxe-compute-install-%s" % sw_version
|
pxe_config = "pxe-worker-install-%s" % sw_version
|
||||||
elif host.personality == constants.STORAGE:
|
elif host.personality == constants.STORAGE:
|
||||||
pxe_config = "pxe-storage-install-%s" % sw_version
|
pxe_config = "pxe-storage-install-%s" % sw_version
|
||||||
|
|
||||||
@ -1419,13 +1419,13 @@ class ConductorManager(service.PeriodicService):
|
|||||||
% (host.hostname, ceph_mon_gib))
|
% (host.hostname, ceph_mon_gib))
|
||||||
self.dbapi.ceph_mon_create(values)
|
self.dbapi.ceph_mon_create(values)
|
||||||
|
|
||||||
def config_compute_for_ceph(self, context):
|
def config_worker_for_ceph(self, context):
|
||||||
"""
|
"""
|
||||||
configure compute nodes for adding ceph
|
configure worker nodes for adding ceph
|
||||||
:param context:
|
:param context:
|
||||||
:return: none
|
:return: none
|
||||||
"""
|
"""
|
||||||
personalities = [constants.COMPUTE]
|
personalities = [constants.WORKER]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"personalities": personalities,
|
"personalities": personalities,
|
||||||
@ -1437,7 +1437,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
"""Update the remotelogging configuration"""
|
"""Update the remotelogging configuration"""
|
||||||
|
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
@ -1449,7 +1449,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
|
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"personalities": [constants.COMPUTE, constants.STORAGE],
|
"personalities": [constants.WORKER, constants.STORAGE],
|
||||||
"classes": ['platform::remotelogging::runtime'],
|
"classes": ['platform::remotelogging::runtime'],
|
||||||
}
|
}
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
@ -1457,8 +1457,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
def get_magnum_cluster_count(self, context):
|
def get_magnum_cluster_count(self, context):
|
||||||
return self._openstack.get_magnum_cluster_count()
|
return self._openstack.get_magnum_cluster_count()
|
||||||
|
|
||||||
def _configure_compute_host(self, context, host):
|
def _configure_worker_host(self, context, host):
|
||||||
"""Configure a compute host with the supplied data.
|
"""Configure a worker host with the supplied data.
|
||||||
|
|
||||||
Does the following tasks:
|
Does the following tasks:
|
||||||
- Create or update entries in address table
|
- Create or update entries in address table
|
||||||
@ -1472,7 +1472,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# Only update the config if the host is running the same version as
|
# Only update the config if the host is running the same version as
|
||||||
# the active controller.
|
# the active controller.
|
||||||
if self.host_load_matches_sw_version(host):
|
if self.host_load_matches_sw_version(host):
|
||||||
# Only generate the config files if the compute host is unlocked.
|
# Only generate the config files if the worker host is unlocked.
|
||||||
if (host.administrative == constants.ADMIN_UNLOCKED or
|
if (host.administrative == constants.ADMIN_UNLOCKED or
|
||||||
host.action == constants.FORCE_UNLOCK_ACTION or
|
host.action == constants.FORCE_UNLOCK_ACTION or
|
||||||
host.action == constants.UNLOCK_ACTION):
|
host.action == constants.UNLOCK_ACTION):
|
||||||
@ -1574,8 +1574,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
elif host.hostname == constants.CONTROLLER_1_HOSTNAME:
|
elif host.hostname == constants.CONTROLLER_1_HOSTNAME:
|
||||||
self.controller_1_posted = False
|
self.controller_1_posted = False
|
||||||
|
|
||||||
def _unconfigure_compute_host(self, host, is_cpe=False):
|
def _unconfigure_worker_host(self, host, is_cpe=False):
|
||||||
"""Unconfigure a compute host.
|
"""Unconfigure a worker host.
|
||||||
|
|
||||||
Does the following tasks:
|
Does the following tasks:
|
||||||
- Remove the puppet hiera data configuration for host
|
- Remove the puppet hiera data configuration for host
|
||||||
@ -1605,12 +1605,12 @@ class ConductorManager(service.PeriodicService):
|
|||||||
self._remove_pxe_config(host)
|
self._remove_pxe_config(host)
|
||||||
|
|
||||||
def configure_ihost(self, context, host,
|
def configure_ihost(self, context, host,
|
||||||
do_compute_apply=False):
|
do_worker_apply=False):
|
||||||
"""Configure a host.
|
"""Configure a host.
|
||||||
|
|
||||||
:param context: an admin context.
|
:param context: an admin context.
|
||||||
:param host: a host object.
|
:param host: a host object.
|
||||||
:param do_compute_apply: configure the compute subfunctions of the host.
|
:param do_worker_apply: configure the worker subfunctions of the host.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug("configure_ihost %s" % host.hostname)
|
LOG.debug("configure_ihost %s" % host.hostname)
|
||||||
@ -1623,8 +1623,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
if host.personality == constants.CONTROLLER:
|
if host.personality == constants.CONTROLLER:
|
||||||
self._configure_controller_host(context, host)
|
self._configure_controller_host(context, host)
|
||||||
elif host.personality == constants.COMPUTE:
|
elif host.personality == constants.WORKER:
|
||||||
self._configure_compute_host(context, host)
|
self._configure_worker_host(context, host)
|
||||||
elif host.personality == constants.STORAGE:
|
elif host.personality == constants.STORAGE:
|
||||||
self._configure_storage_host(context, host)
|
self._configure_storage_host(context, host)
|
||||||
else:
|
else:
|
||||||
@ -1632,10 +1632,10 @@ class ConductorManager(service.PeriodicService):
|
|||||||
"Invalid method call: unsupported personality: %s") %
|
"Invalid method call: unsupported personality: %s") %
|
||||||
host.personality)
|
host.personality)
|
||||||
|
|
||||||
if do_compute_apply:
|
if do_worker_apply:
|
||||||
# Apply the manifests immediately
|
# Apply the manifests immediately
|
||||||
puppet_common.puppet_apply_manifest(host.mgmt_ip,
|
puppet_common.puppet_apply_manifest(host.mgmt_ip,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
do_reboot=True)
|
do_reboot=True)
|
||||||
|
|
||||||
return host
|
return host
|
||||||
@ -1659,8 +1659,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
for personality in personalities:
|
for personality in personalities:
|
||||||
if personality == constants.CONTROLLER:
|
if personality == constants.CONTROLLER:
|
||||||
self._unconfigure_controller_host(ihost_obj)
|
self._unconfigure_controller_host(ihost_obj)
|
||||||
elif personality == constants.COMPUTE:
|
elif personality == constants.WORKER:
|
||||||
self._unconfigure_compute_host(ihost_obj, is_cpe)
|
self._unconfigure_worker_host(ihost_obj, is_cpe)
|
||||||
elif personality == constants.STORAGE:
|
elif personality == constants.STORAGE:
|
||||||
self._unconfigure_storage_host(ihost_obj)
|
self._unconfigure_storage_host(ihost_obj)
|
||||||
else:
|
else:
|
||||||
@ -2493,7 +2493,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
"""Return the initial number of reserved logical cores for platform
|
"""Return the initial number of reserved logical cores for platform
|
||||||
use. This can be overridden later by the end user."""
|
use. This can be overridden later by the end user."""
|
||||||
cpus = 0
|
cpus = 0
|
||||||
if cutils.host_has_function(ihost, constants.COMPUTE) and node == 0:
|
if cutils.host_has_function(ihost, constants.WORKER) and node == 0:
|
||||||
cpus += 1 if not hyperthreading else 2
|
cpus += 1 if not hyperthreading else 2
|
||||||
if cutils.host_has_function(ihost, constants.CONTROLLER):
|
if cutils.host_has_function(ihost, constants.CONTROLLER):
|
||||||
cpus += 1 if not hyperthreading else 2
|
cpus += 1 if not hyperthreading else 2
|
||||||
@ -2503,7 +2503,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
cpu_count, hyperthreading):
|
cpu_count, hyperthreading):
|
||||||
"""Return the initial number of reserved logical cores for vswitch
|
"""Return the initial number of reserved logical cores for vswitch
|
||||||
use. This can be overridden later by the end user."""
|
use. This can be overridden later by the end user."""
|
||||||
if cutils.host_has_function(ihost, constants.COMPUTE) and node == 0:
|
if cutils.host_has_function(ihost, constants.WORKER) and node == 0:
|
||||||
physical_cores = (cpu_count / 2) if hyperthreading else cpu_count
|
physical_cores = (cpu_count / 2) if hyperthreading else cpu_count
|
||||||
system_mode = self.dbapi.isystem_get_one().system_mode
|
system_mode = self.dbapi.isystem_get_one().system_mode
|
||||||
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||||
@ -2999,7 +2999,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# a physical volume in the nova-local volume group
|
# a physical volume in the nova-local volume group
|
||||||
cinder_device = None
|
cinder_device = None
|
||||||
if (cutils.host_has_function(ihost, constants.CONTROLLER) and
|
if (cutils.host_has_function(ihost, constants.CONTROLLER) and
|
||||||
cutils.host_has_function(ihost, constants.COMPUTE)):
|
cutils.host_has_function(ihost, constants.WORKER)):
|
||||||
|
|
||||||
if lvm_config:
|
if lvm_config:
|
||||||
cinder_device = cutils._get_cinder_device(self.dbapi,
|
cinder_device = cutils._get_cinder_device(self.dbapi,
|
||||||
@ -4228,11 +4228,11 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
kubernetes_config = utils.is_kubernetes_config(self.dbapi)
|
kubernetes_config = utils.is_kubernetes_config(self.dbapi)
|
||||||
|
|
||||||
if (cutils.host_has_function(ihost, constants.COMPUTE) and not
|
if (cutils.host_has_function(ihost, constants.WORKER) and not
|
||||||
kubernetes_config):
|
kubernetes_config):
|
||||||
if availability == constants.VIM_SERVICES_ENABLED:
|
if availability == constants.VIM_SERVICES_ENABLED:
|
||||||
# report to nova the host aggregate groupings now that
|
# report to nova the host aggregate groupings now that
|
||||||
# the compute node is available
|
# the worker node is available
|
||||||
LOG.info("AGG iplatform available for ihost= %s imsg= %s" %
|
LOG.info("AGG iplatform available for ihost= %s imsg= %s" %
|
||||||
(ihost_uuid, imsg_dict))
|
(ihost_uuid, imsg_dict))
|
||||||
# AGG10 noted 13secs in vbox between nova manifests applied and
|
# AGG10 noted 13secs in vbox between nova manifests applied and
|
||||||
@ -4361,7 +4361,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# Create the host entry in neutron to allow for data interfaces to
|
# Create the host entry in neutron to allow for data interfaces to
|
||||||
# be configured on a combined node
|
# be configured on a combined node
|
||||||
if (constants.CONTROLLER in subfunctions and
|
if (constants.CONTROLLER in subfunctions and
|
||||||
constants.COMPUTE in subfunctions):
|
constants.WORKER in subfunctions):
|
||||||
try:
|
try:
|
||||||
ihost = self.dbapi.ihost_get(ihost_uuid)
|
ihost = self.dbapi.ihost_get(ihost_uuid)
|
||||||
except exception.ServerNotFound:
|
except exception.ServerNotFound:
|
||||||
@ -4642,7 +4642,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if upgrade.state == constants.UPGRADE_ACTIVATING:
|
if upgrade.state == constants.UPGRADE_ACTIVATING:
|
||||||
personalities = [constants.CONTROLLER, constants.COMPUTE]
|
personalities = [constants.CONTROLLER, constants.WORKER]
|
||||||
|
|
||||||
all_manifests_applied = True
|
all_manifests_applied = True
|
||||||
hosts = self.dbapi.ihost_get_list()
|
hosts = self.dbapi.ihost_get_list()
|
||||||
@ -4671,7 +4671,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# In CPE upgrades, after swacting to controller-1, we need to clear
|
# In CPE upgrades, after swacting to controller-1, we need to clear
|
||||||
# the VIM upgrade flag on Controller-0 to allow VMs to be migrated
|
# the VIM upgrade flag on Controller-0 to allow VMs to be migrated
|
||||||
# to controller-1.
|
# to controller-1.
|
||||||
if constants.COMPUTE in tsc.subfunctions:
|
if constants.WORKER in tsc.subfunctions:
|
||||||
try:
|
try:
|
||||||
controller_0 = self.dbapi.ihost_get_by_hostname(
|
controller_0 = self.dbapi.ihost_get_by_hostname(
|
||||||
constants.CONTROLLER_0_HOSTNAME)
|
constants.CONTROLLER_0_HOSTNAME)
|
||||||
@ -5285,7 +5285,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
"""Update the NTP configuration"""
|
"""Update the NTP configuration"""
|
||||||
if service_change:
|
if service_change:
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
else:
|
else:
|
||||||
personalities = [constants.CONTROLLER]
|
personalities = [constants.CONTROLLER]
|
||||||
@ -5294,7 +5294,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
def update_ptp_config(self, context):
|
def update_ptp_config(self, context):
|
||||||
"""Update the PTP configuration"""
|
"""Update the PTP configuration"""
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
self._config_update_hosts(context, personalities)
|
self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
@ -5310,7 +5310,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# update manifest files and notify agents to apply timezone files
|
# update manifest files and notify agents to apply timezone files
|
||||||
personalities = [constants.COMPUTE,
|
personalities = [constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
@ -5338,7 +5338,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
# update manifest files and notifiy agents to apply them
|
# update manifest files and notifiy agents to apply them
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
@ -5381,7 +5381,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
self._config_update_hosts(context, [constants.CONTROLLER], reboot=True)
|
self._config_update_hosts(context, [constants.CONTROLLER], reboot=True)
|
||||||
|
|
||||||
config_uuid = self._config_update_hosts(context, [constants.COMPUTE],
|
config_uuid = self._config_update_hosts(context, [constants.WORKER],
|
||||||
reboot=False)
|
reboot=False)
|
||||||
|
|
||||||
extoam = self.dbapi.iextoam_get_one()
|
extoam = self.dbapi.iextoam_get_one()
|
||||||
@ -5389,9 +5389,9 @@ class ConductorManager(service.PeriodicService):
|
|||||||
self._update_hosts_file('oamcontroller', extoam.oam_floating_ip,
|
self._update_hosts_file('oamcontroller', extoam.oam_floating_ip,
|
||||||
active=False)
|
active=False)
|
||||||
|
|
||||||
# make changes to the computes
|
# make changes to the workers
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"personalities": [constants.COMPUTE],
|
"personalities": [constants.WORKER],
|
||||||
"classes": ['openstack::nova::compute::runtime']
|
"classes": ['openstack::nova::compute::runtime']
|
||||||
}
|
}
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
@ -5401,7 +5401,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
LOG.info("update_user_config")
|
LOG.info("update_user_config")
|
||||||
|
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
@ -5723,7 +5723,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
def config_update_nova_local_backed_hosts(self, context, instance_backing):
|
def config_update_nova_local_backed_hosts(self, context, instance_backing):
|
||||||
hosts_uuid = self.hosts_with_nova_local(instance_backing)
|
hosts_uuid = self.hosts_with_nova_local(instance_backing)
|
||||||
if hosts_uuid:
|
if hosts_uuid:
|
||||||
personalities = [constants.CONTROLLER, constants.COMPUTE]
|
personalities = [constants.CONTROLLER, constants.WORKER]
|
||||||
self._config_update_hosts(context,
|
self._config_update_hosts(context,
|
||||||
personalities,
|
personalities,
|
||||||
host_uuids=hosts_uuid,
|
host_uuids=hosts_uuid,
|
||||||
@ -5734,8 +5734,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
hosts_uuid = []
|
hosts_uuid = []
|
||||||
hosts = self.dbapi.ihost_get_list()
|
hosts = self.dbapi.ihost_get_list()
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if ((host.personality and host.personality == constants.COMPUTE) or
|
if ((host.personality and host.personality == constants.WORKER) or
|
||||||
(host.subfunctions and constants.COMPUTE in host.subfunctions)):
|
(host.subfunctions and constants.WORKER in host.subfunctions)):
|
||||||
ilvgs = self.dbapi.ilvg_get_by_ihost(host['uuid'])
|
ilvgs = self.dbapi.ilvg_get_by_ihost(host['uuid'])
|
||||||
for lvg in ilvgs:
|
for lvg in ilvgs:
|
||||||
if (lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
|
if (lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
|
||||||
@ -6256,8 +6256,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL)
|
self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL)
|
||||||
|
|
||||||
if ceph_conf:
|
if ceph_conf:
|
||||||
# For NOVA, if nova.conf needs to be updated on compute nodes, the
|
# For NOVA, if nova.conf needs to be updated on worker nodes, the
|
||||||
# task should be set to what? constants.SB_TASK_RECONFIG_COMPUTE?
|
# task should be set to what? constants.SB_TASK_RECONFIG_WORKER?
|
||||||
|
|
||||||
config_done = True
|
config_done = True
|
||||||
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
|
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
|
||||||
@ -6846,7 +6846,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
LOG.info("update_infra_config")
|
LOG.info("update_infra_config")
|
||||||
|
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
|
|
||||||
config_uuid = self._config_update_hosts(context, personalities,
|
config_uuid = self._config_update_hosts(context, personalities,
|
||||||
@ -6885,9 +6885,9 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
|
|
||||||
if constants.COMPUTE in host.subfunctions:
|
if constants.WORKER in host.subfunctions:
|
||||||
config_dict = {
|
config_dict = {
|
||||||
'personalities': [constants.COMPUTE],
|
'personalities': [constants.WORKER],
|
||||||
'host_uuids': host.uuid,
|
'host_uuids': host.uuid,
|
||||||
'classes': ['openstack::nova::compute::runtime']
|
'classes': ['openstack::nova::compute::runtime']
|
||||||
}
|
}
|
||||||
@ -6911,8 +6911,8 @@ class ConductorManager(service.PeriodicService):
|
|||||||
config_uuid = self._config_update_hosts(context, personalities,
|
config_uuid = self._config_update_hosts(context, personalities,
|
||||||
reboot=True)
|
reboot=True)
|
||||||
else:
|
else:
|
||||||
# compute hosts must be rebooted following service reconfig
|
# worker hosts must be rebooted following service reconfig
|
||||||
self._config_update_hosts(context, [constants.COMPUTE],
|
self._config_update_hosts(context, [constants.WORKER],
|
||||||
reboot=True)
|
reboot=True)
|
||||||
# controller hosts will actively apply the manifests
|
# controller hosts will actively apply the manifests
|
||||||
config_uuid = self._config_update_hosts(context,
|
config_uuid = self._config_update_hosts(context,
|
||||||
@ -6933,7 +6933,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
elif service == constants.SERVICE_TYPE_NOVA:
|
elif service == constants.SERVICE_TYPE_NOVA:
|
||||||
config_uuid = self._config_update_hosts(context,
|
config_uuid = self._config_update_hosts(context,
|
||||||
[constants.CONTROLLER,
|
[constants.CONTROLLER,
|
||||||
constants.COMPUTE])
|
constants.WORKER])
|
||||||
else:
|
else:
|
||||||
# All other services
|
# All other services
|
||||||
personalities = [constants.CONTROLLER]
|
personalities = [constants.CONTROLLER]
|
||||||
@ -6990,7 +6990,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
multipath_state_changed = self._multipath_update_state()
|
multipath_state_changed = self._multipath_update_state()
|
||||||
if multipath_state_changed:
|
if multipath_state_changed:
|
||||||
self._config_update_hosts(context,
|
self._config_update_hosts(context,
|
||||||
[constants.CONTROLLER, constants.COMPUTE],
|
[constants.CONTROLLER, constants.WORKER],
|
||||||
reboot=True)
|
reboot=True)
|
||||||
|
|
||||||
elif service == constants.SERVICE_TYPE_PLATFORM:
|
elif service == constants.SERVICE_TYPE_PLATFORM:
|
||||||
@ -7009,7 +7009,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
}
|
}
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
|
|
||||||
personalities = [constants.COMPUTE]
|
personalities = [constants.WORKER]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"personalities": personalities,
|
"personalities": personalities,
|
||||||
@ -7192,7 +7192,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
|
|
||||||
# Apply Neutron manifest on Controller(this
|
# Apply Neutron manifest on Controller(this
|
||||||
# will update the SNAT rules for the SDN controllers)
|
# will update the SNAT rules for the SDN controllers)
|
||||||
self._config_update_hosts(context, [constants.COMPUTE], reboot=True)
|
self._config_update_hosts(context, [constants.WORKER], reboot=True)
|
||||||
|
|
||||||
config_uuid = self._config_update_hosts(context,
|
config_uuid = self._config_update_hosts(context,
|
||||||
[constants.CONTROLLER])
|
[constants.CONTROLLER])
|
||||||
@ -7218,7 +7218,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
|
|
||||||
personalities = [constants.COMPUTE]
|
personalities = [constants.WORKER]
|
||||||
self._config_update_hosts(context, personalities, reboot=True)
|
self._config_update_hosts(context, personalities, reboot=True)
|
||||||
|
|
||||||
def update_vswitch_type(self, context):
|
def update_vswitch_type(self, context):
|
||||||
@ -7241,7 +7241,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
if tsc.system_type == constants.TIS_AIO_BUILD:
|
if tsc.system_type == constants.TIS_AIO_BUILD:
|
||||||
personalities = [constants.CONTROLLER]
|
personalities = [constants.CONTROLLER]
|
||||||
else:
|
else:
|
||||||
personalities = [constants.COMPUTE]
|
personalities = [constants.WORKER]
|
||||||
|
|
||||||
self._config_update_hosts(context, personalities, reboot=True)
|
self._config_update_hosts(context, personalities, reboot=True)
|
||||||
|
|
||||||
@ -7276,13 +7276,13 @@ class ConductorManager(service.PeriodicService):
|
|||||||
def update_cpu_config(self, context, host_uuid):
|
def update_cpu_config(self, context, host_uuid):
|
||||||
"""Update the cpu assignment configuration on a host"""
|
"""Update the cpu assignment configuration on a host"""
|
||||||
|
|
||||||
# only apply the manifest on the host that has compute sub function
|
# only apply the manifest on the host that has worker sub function
|
||||||
host = self.dbapi.ihost_get(host_uuid)
|
host = self.dbapi.ihost_get(host_uuid)
|
||||||
if constants.COMPUTE in host.subfunctions:
|
if constants.WORKER in host.subfunctions:
|
||||||
force = (not utils.is_host_simplex_controller(host))
|
force = (not utils.is_host_simplex_controller(host))
|
||||||
LOG.info("update_cpu_config, host uuid: (%s), force: (%s)",
|
LOG.info("update_cpu_config, host uuid: (%s), force: (%s)",
|
||||||
host_uuid, str(force))
|
host_uuid, str(force))
|
||||||
personalities = [constants.CONTROLLER, constants.COMPUTE]
|
personalities = [constants.CONTROLLER, constants.WORKER]
|
||||||
config_uuid = self._config_update_hosts(context,
|
config_uuid = self._config_update_hosts(context,
|
||||||
personalities,
|
personalities,
|
||||||
host_uuids=[host_uuid])
|
host_uuids=[host_uuid])
|
||||||
@ -7992,7 +7992,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# We will allow controller nodes to re-generate manifests
|
# We will allow controller nodes to re-generate manifests
|
||||||
# when in an "provisioning" state. This will allow for
|
# when in an "provisioning" state. This will allow for
|
||||||
# example the ntp configuration to be changed on an CPE
|
# example the ntp configuration to be changed on an CPE
|
||||||
# node before the "compute_config_complete" has been
|
# node before the "worker_config_complete" has been
|
||||||
# executed.
|
# executed.
|
||||||
if (force or
|
if (force or
|
||||||
host.invprovision == constants.PROVISIONED or
|
host.invprovision == constants.PROVISIONED or
|
||||||
@ -8872,7 +8872,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
to_load = self.dbapi.load_get(upgrade.to_load)
|
to_load = self.dbapi.load_get(upgrade.to_load)
|
||||||
to_version = to_load.software_version
|
to_version = to_load.software_version
|
||||||
|
|
||||||
personalities = [constants.CONTROLLER, constants.COMPUTE]
|
personalities = [constants.CONTROLLER, constants.WORKER]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
self.dbapi.software_upgrade_update(
|
self.dbapi.software_upgrade_update(
|
||||||
@ -8902,7 +8902,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
|
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"personalities": [constants.COMPUTE],
|
"personalities": [constants.WORKER],
|
||||||
"classes": ['openstack::nova::compute::runtime']
|
"classes": ['openstack::nova::compute::runtime']
|
||||||
}
|
}
|
||||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||||
@ -9752,7 +9752,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
ceph_conf_file = os.path.join(constants.CEPH_CONF_PATH,
|
ceph_conf_file = os.path.join(constants.CEPH_CONF_PATH,
|
||||||
ceph_conf_filename)
|
ceph_conf_filename)
|
||||||
|
|
||||||
personalities = [constants.CONTROLLER, constants.COMPUTE]
|
personalities = [constants.CONTROLLER, constants.WORKER]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
config_dict = {
|
config_dict = {
|
||||||
'personalities': personalities,
|
'personalities': personalities,
|
||||||
@ -9900,7 +9900,7 @@ class ConductorManager(service.PeriodicService):
|
|||||||
# Should only be applicable to the single controller that is up
|
# Should only be applicable to the single controller that is up
|
||||||
# when the dc role is configured, but add personalities anyway.
|
# when the dc role is configured, but add personalities anyway.
|
||||||
personalities = [constants.CONTROLLER,
|
personalities = [constants.CONTROLLER,
|
||||||
constants.COMPUTE,
|
constants.WORKER,
|
||||||
constants.STORAGE]
|
constants.STORAGE]
|
||||||
config_uuid = self._config_update_hosts(context, personalities)
|
config_uuid = self._config_update_hosts(context, personalities)
|
||||||
|
|
||||||
|
@ -464,7 +464,7 @@ class OpenStackOperator(object):
|
|||||||
#
|
#
|
||||||
# can query it from do_aggregate_list
|
# can query it from do_aggregate_list
|
||||||
# ('Name', 'Availability Zone'); anyways it doesnt
|
# ('Name', 'Availability Zone'); anyways it doesnt
|
||||||
# allow duplicates on Name. can be done prior to compute nodes?
|
# allow duplicates on Name. can be done prior to worker nodes?
|
||||||
#
|
#
|
||||||
# # On unlock, check whether exists: metadata is a key/value pair
|
# # On unlock, check whether exists: metadata is a key/value pair
|
||||||
# 2. nova aggregate-set-metadata provider_physnet0 \
|
# 2. nova aggregate-set-metadata provider_physnet0 \
|
||||||
|
@ -100,7 +100,7 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
ihost_obj=ihost_obj))
|
ihost_obj=ihost_obj))
|
||||||
|
|
||||||
def configure_ihost(self, context, host,
|
def configure_ihost(self, context, host,
|
||||||
do_compute_apply=False):
|
do_worker_apply=False):
|
||||||
"""Synchronously, have a conductor configure an ihost.
|
"""Synchronously, have a conductor configure an ihost.
|
||||||
|
|
||||||
Does the following tasks:
|
Does the following tasks:
|
||||||
@ -110,12 +110,12 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
|
|
||||||
:param context: request context.
|
:param context: request context.
|
||||||
:param host: an ihost object.
|
:param host: an ihost object.
|
||||||
:param do_compute_apply: apply the newly created compute manifests.
|
:param do_worker_apply: apply the newly created worker manifests.
|
||||||
"""
|
"""
|
||||||
return self.call(context,
|
return self.call(context,
|
||||||
self.make_msg('configure_ihost',
|
self.make_msg('configure_ihost',
|
||||||
host=host,
|
host=host,
|
||||||
do_compute_apply=do_compute_apply))
|
do_worker_apply=do_worker_apply))
|
||||||
|
|
||||||
# TODO(CephPoolsDecouple): remove
|
# TODO(CephPoolsDecouple): remove
|
||||||
def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None):
|
def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None):
|
||||||
@ -788,13 +788,13 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
"""
|
"""
|
||||||
return self.call(context, self.make_msg('update_lvm_config'))
|
return self.call(context, self.make_msg('update_lvm_config'))
|
||||||
|
|
||||||
def config_compute_for_ceph(self, context):
|
def config_worker_for_ceph(self, context):
|
||||||
"""Synchronously, have the conductor update the compute configuration
|
"""Synchronously, have the conductor update the worker configuration
|
||||||
for adding ceph.
|
for adding ceph.
|
||||||
|
|
||||||
:param context: request context.
|
:param context: request context.
|
||||||
"""
|
"""
|
||||||
return self.call(context, self.make_msg('config_compute_for_ceph'))
|
return self.call(context, self.make_msg('config_worker_for_ceph'))
|
||||||
|
|
||||||
def update_drbd_config(self, context):
|
def update_drbd_config(self, context):
|
||||||
"""Synchronously, have the conductor update the drbd configuration.
|
"""Synchronously, have the conductor update the drbd configuration.
|
||||||
@ -876,7 +876,7 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
services=services))
|
services=services))
|
||||||
|
|
||||||
def config_update_nova_local_backed_hosts(self, context, instance_backing):
|
def config_update_nova_local_backed_hosts(self, context, instance_backing):
|
||||||
"""Synchronously, have the conductor set the hosts with compute
|
"""Synchronously, have the conductor set the hosts with worker
|
||||||
functionality and with a certain nova-local instance backing to
|
functionality and with a certain nova-local instance backing to
|
||||||
config out-of-date.
|
config out-of-date.
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ class Connection(object):
|
|||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
"""Return a list of servers by personality.
|
"""Return a list of servers by personality.
|
||||||
:param personality: The personality of the server
|
:param personality: The personality of the server
|
||||||
e.g. controller or compute
|
e.g. controller or worker
|
||||||
returns: A server
|
returns: A server
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ def upgrade(migrate_engine):
|
|||||||
name='recordtypeEnum')
|
name='recordtypeEnum')
|
||||||
|
|
||||||
personalityEnum = Enum('controller',
|
personalityEnum = Enum('controller',
|
||||||
'compute',
|
'worker',
|
||||||
'network',
|
'network',
|
||||||
'storage',
|
'storage',
|
||||||
'profile',
|
'profile',
|
||||||
|
@ -13,7 +13,7 @@ from sysinv.common import constants
|
|||||||
|
|
||||||
def _populate_system_type(system_table):
|
def _populate_system_type(system_table):
|
||||||
|
|
||||||
if constants.COMPUTE in tsconfig.subfunctions:
|
if constants.WORKER in tsconfig.subfunctions:
|
||||||
s_type = constants.TIS_AIO_BUILD
|
s_type = constants.TIS_AIO_BUILD
|
||||||
else:
|
else:
|
||||||
s_type = constants.TIS_STD_BUILD
|
s_type = constants.TIS_STD_BUILD
|
||||||
|
@ -126,7 +126,7 @@ class ihost(Base):
|
|||||||
name='invprovisionStateEnum')
|
name='invprovisionStateEnum')
|
||||||
|
|
||||||
invPersonalityEnum = Enum('controller',
|
invPersonalityEnum = Enum('controller',
|
||||||
'compute',
|
'worker',
|
||||||
'network',
|
'network',
|
||||||
'storage',
|
'storage',
|
||||||
'profile',
|
'profile',
|
||||||
|
@ -153,7 +153,7 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
|
|||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if (host.invprovision == constants.PROVISIONED):
|
if (host.invprovision == constants.PROVISIONED):
|
||||||
if constants.COMPUTE in utils.get_personalities(host):
|
if constants.WORKER in utils.get_personalities(host):
|
||||||
|
|
||||||
hostname = str(host.hostname)
|
hostname = str(host.hostname)
|
||||||
host_neutron = {
|
host_neutron = {
|
||||||
|
@ -269,7 +269,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
|||||||
host_cpus = self._get_host_cpu_list(host, threads=True)
|
host_cpus = self._get_host_cpu_list(host, threads=True)
|
||||||
if host_cpus:
|
if host_cpus:
|
||||||
vm_cpus = self._get_host_cpu_list(
|
vm_cpus = self._get_host_cpu_list(
|
||||||
host, function=constants.VM_FUNCTION, threads=True)
|
host, function=constants.APPLICATION_FUNCTION, threads=True)
|
||||||
vm_cpu_list = [c.cpu for c in vm_cpus]
|
vm_cpu_list = [c.cpu for c in vm_cpus]
|
||||||
vm_cpu_fmt = "\"%s\"" % utils.format_range_set(vm_cpu_list)
|
vm_cpu_fmt = "\"%s\"" % utils.format_range_set(vm_cpu_list)
|
||||||
default_config.update({'vcpu_pin_set': vm_cpu_fmt})
|
default_config.update({'vcpu_pin_set': vm_cpu_fmt})
|
||||||
@ -399,7 +399,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
|||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if (host.invprovision == constants.PROVISIONED):
|
if (host.invprovision == constants.PROVISIONED):
|
||||||
if constants.COMPUTE in utils.get_personalities(host):
|
if constants.WORKER in utils.get_personalities(host):
|
||||||
|
|
||||||
hostname = str(host.hostname)
|
hostname = str(host.hostname)
|
||||||
default_config = {}
|
default_config = {}
|
||||||
|
@ -170,9 +170,9 @@ class CephPuppet(openstack.OpenstackBasePuppet):
|
|||||||
config.update(self._get_ceph_mon_config(host))
|
config.update(self._get_ceph_mon_config(host))
|
||||||
config.update(self._get_ceph_osd_config(host))
|
config.update(self._get_ceph_osd_config(host))
|
||||||
|
|
||||||
# if it is a compute node and on an secondary region,
|
# if it is a worker node and on an secondary region,
|
||||||
# check if ceph mon configuration is required
|
# check if ceph mon configuration is required
|
||||||
if constants.COMPUTE in host.subfunctions and self._region_config():
|
if constants.WORKER in host.subfunctions and self._region_config():
|
||||||
from sysinv.conductor import openstack
|
from sysinv.conductor import openstack
|
||||||
op = openstack.OpenStackOperator(self.dbapi)
|
op = openstack.OpenStackOperator(self.dbapi)
|
||||||
if self._is_ceph_mon_required(host, op):
|
if self._is_ceph_mon_required(host, op):
|
||||||
|
@ -57,7 +57,7 @@ class DevicePuppet(base.BasePuppet):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def get_host_config(self, host):
|
def get_host_config(self, host):
|
||||||
if constants.COMPUTE not in host.subfunctions:
|
if constants.WORKER not in host.subfunctions:
|
||||||
# configuration only required for compute hosts
|
# configuration only required for compute hosts
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ class InterfacePuppet(base.BasePuppet):
|
|||||||
# deal with this in a later commit.
|
# deal with this in a later commit.
|
||||||
pnets = {}
|
pnets = {}
|
||||||
if (self.openstack and
|
if (self.openstack and
|
||||||
constants.COMPUTE in utils.get_personalities(host)):
|
constants.WORKER in utils.get_personalities(host)):
|
||||||
pnets = self.openstack.get_providernetworksdict(quiet=True)
|
pnets = self.openstack.get_providernetworksdict(quiet=True)
|
||||||
return pnets
|
return pnets
|
||||||
|
|
||||||
@ -286,19 +286,19 @@ def is_data_network_type(iface):
|
|||||||
def is_controller(context):
|
def is_controller(context):
|
||||||
"""
|
"""
|
||||||
Determine we are creating a manifest for a controller node; regardless of
|
Determine we are creating a manifest for a controller node; regardless of
|
||||||
whether it has a compute subfunction or not.
|
whether it has a worker subfunction or not.
|
||||||
"""
|
"""
|
||||||
return bool(context['personality'] == constants.CONTROLLER)
|
return bool(context['personality'] == constants.CONTROLLER)
|
||||||
|
|
||||||
|
|
||||||
def is_compute_subfunction(context):
|
def is_worker_subfunction(context):
|
||||||
"""
|
"""
|
||||||
Determine if we are creating a manifest for a compute node or a compute
|
Determine if we are creating a manifest for a worker node or a worker
|
||||||
subfunction.
|
subfunction.
|
||||||
"""
|
"""
|
||||||
if context['personality'] == constants.COMPUTE:
|
if context['personality'] == constants.WORKER:
|
||||||
return True
|
return True
|
||||||
if constants.COMPUTE in context['subfunctions']:
|
if constants.WORKER in context['subfunctions']:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -662,7 +662,7 @@ def needs_interface_config(context, iface):
|
|||||||
"""
|
"""
|
||||||
if is_platform_interface(context, iface):
|
if is_platform_interface(context, iface):
|
||||||
return True
|
return True
|
||||||
elif not is_compute_subfunction(context):
|
elif not is_worker_subfunction(context):
|
||||||
return False
|
return False
|
||||||
elif is_data_interface(context, iface):
|
elif is_data_interface(context, iface):
|
||||||
if not is_dpdk_compatible(context, iface):
|
if not is_dpdk_compatible(context, iface):
|
||||||
@ -1141,7 +1141,7 @@ def generate_driver_config(context, config):
|
|||||||
"""
|
"""
|
||||||
Generate custom configuration for driver specific parameters.
|
Generate custom configuration for driver specific parameters.
|
||||||
"""
|
"""
|
||||||
if is_compute_subfunction(context):
|
if is_worker_subfunction(context):
|
||||||
generate_mlx4_core_options(context, config)
|
generate_mlx4_core_options(context, config)
|
||||||
|
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ class KubernetesPuppet(base.BasePuppet):
|
|||||||
|
|
||||||
def get_host_config(self, host):
|
def get_host_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if host.personality != constants.COMPUTE:
|
if host.personality != constants.WORKER:
|
||||||
return config
|
return config
|
||||||
|
|
||||||
if self._kubernetes_enabled():
|
if self._kubernetes_enabled():
|
||||||
|
@ -64,7 +64,7 @@ class LdapPuppet(base.BasePuppet):
|
|||||||
bind_anonymous = True
|
bind_anonymous = True
|
||||||
|
|
||||||
if host.personality != constants.CONTROLLER:
|
if host.personality != constants.CONTROLLER:
|
||||||
# if storage/compute, use bind anonymously
|
# if storage/worker, use bind anonymously
|
||||||
bind_anonymous = True
|
bind_anonymous = True
|
||||||
return {
|
return {
|
||||||
'platform::ldap::params::ldapserver_remote': ldapserver_remote,
|
'platform::ldap::params::ldapserver_remote': ldapserver_remote,
|
||||||
|
@ -157,7 +157,7 @@ class NeutronPuppet(openstack.OpenstackBasePuppet):
|
|||||||
|
|
||||||
def get_host_config(self, host):
|
def get_host_config(self, host):
|
||||||
if (constants.CONTROLLER not in utils.get_personalities(host) and
|
if (constants.CONTROLLER not in utils.get_personalities(host) and
|
||||||
constants.COMPUTE not in utils.get_personalities(host)):
|
constants.WORKER not in utils.get_personalities(host)):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
device_mappings = []
|
device_mappings = []
|
||||||
|
@ -114,7 +114,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
|
|||||||
raise exception.SysinvException('Failed to generate nova rsa key')
|
raise exception.SysinvException('Failed to generate nova rsa key')
|
||||||
|
|
||||||
# Generate an ecdsa key for the system, which will be used on all
|
# Generate an ecdsa key for the system, which will be used on all
|
||||||
# controller/compute nodes. When external ssh connections to the
|
# controller/worker nodes. When external ssh connections to the
|
||||||
# controllers are made, this key will be stored in the known_hosts file
|
# controllers are made, this key will be stored in the known_hosts file
|
||||||
# and allow connections after the controller swacts. The ecdsa key
|
# and allow connections after the controller swacts. The ecdsa key
|
||||||
# has precedence over the rsa key, which is why we use ecdsa.
|
# has precedence over the rsa key, which is why we use ecdsa.
|
||||||
@ -340,7 +340,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
|
|||||||
|
|
||||||
def get_host_config(self, host):
|
def get_host_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if constants.COMPUTE in host.subfunctions:
|
if constants.WORKER in host.subfunctions:
|
||||||
# nova storage and compute configuration is required for hosts
|
# nova storage and compute configuration is required for hosts
|
||||||
# with a compute function only
|
# with a compute function only
|
||||||
config.update(self._get_compute_config(host))
|
config.update(self._get_compute_config(host))
|
||||||
@ -569,7 +569,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
|
|||||||
|
|
||||||
def _get_vcpu_pin_set(self, host):
|
def _get_vcpu_pin_set(self, host):
|
||||||
vm_cpus = self._get_host_cpu_list(
|
vm_cpus = self._get_host_cpu_list(
|
||||||
host, function=constants.VM_FUNCTION, threads=True)
|
host, function=constants.APPLICATION_FUNCTION, threads=True)
|
||||||
cpu_list = [c.cpu for c in vm_cpus]
|
cpu_list = [c.cpu for c in vm_cpus]
|
||||||
return "\"%s\"" % utils.format_range_set(cpu_list)
|
return "\"%s\"" % utils.format_range_set(cpu_list)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ class OVSPuppet(base.BasePuppet):
|
|||||||
|
|
||||||
def get_host_config(self, host):
|
def get_host_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if (constants.COMPUTE in utils.get_personalities(host) and
|
if (constants.WORKER in utils.get_personalities(host) and
|
||||||
self._vswitch_type() == constants.VSWITCH_TYPE_OVS_DPDK):
|
self._vswitch_type() == constants.VSWITCH_TYPE_OVS_DPDK):
|
||||||
config.update(self._get_cpu_config(host))
|
config.update(self._get_cpu_config(host))
|
||||||
config.update(self._get_memory_config(host))
|
config.update(self._get_memory_config(host))
|
||||||
@ -346,7 +346,7 @@ class OVSPuppet(base.BasePuppet):
|
|||||||
|
|
||||||
def _get_virtual_config(self, host):
|
def _get_virtual_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if utils.is_virtual() or utils.is_virtual_compute(host):
|
if utils.is_virtual() or utils.is_virtual_worker(host):
|
||||||
config.update({
|
config.update({
|
||||||
'platform::vswitch::params::iommu_enabled': False,
|
'platform::vswitch::params::iommu_enabled': False,
|
||||||
'platform::vswitch::params::hugepage_dir': '/mnt/huge-2048kB',
|
'platform::vswitch::params::hugepage_dir': '/mnt/huge-2048kB',
|
||||||
|
@ -526,14 +526,14 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
|
|
||||||
def _get_host_cpu_config(self, host):
|
def _get_host_cpu_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if constants.COMPUTE in utils.get_personalities(host):
|
if constants.WORKER in utils.get_personalities(host):
|
||||||
host_cpus = self._get_host_cpu_list(host, threads=True)
|
host_cpus = self._get_host_cpu_list(host, threads=True)
|
||||||
if not host_cpus:
|
if not host_cpus:
|
||||||
return config
|
return config
|
||||||
|
|
||||||
# Define the full range of CPUs for the compute host
|
# Define the full range of CPUs for the compute host
|
||||||
max_cpu = max(host_cpus, key=operator.attrgetter('cpu'))
|
max_cpu = max(host_cpus, key=operator.attrgetter('cpu'))
|
||||||
compute_cpu_list = "\"0-%d\"" % max_cpu.cpu
|
worker_cpu_list = "\"0-%d\"" % max_cpu.cpu
|
||||||
|
|
||||||
platform_cpus_no_threads = self._get_platform_cpu_list(host)
|
platform_cpus_no_threads = self._get_platform_cpu_list(host)
|
||||||
vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host)
|
vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host)
|
||||||
@ -620,8 +620,8 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
platform_cpu_list,
|
platform_cpu_list,
|
||||||
platform_cpu_list)
|
platform_cpu_list)
|
||||||
config.update({
|
config.update({
|
||||||
'platform::compute::params::compute_cpu_list':
|
'platform::compute::params::worker_cpu_list':
|
||||||
compute_cpu_list,
|
worker_cpu_list,
|
||||||
'platform::compute::params::platform_cpu_list':
|
'platform::compute::params::platform_cpu_list':
|
||||||
platform_cpu_list_with_quotes,
|
platform_cpu_list_with_quotes,
|
||||||
'platform::compute::params::reserved_vswitch_cores':
|
'platform::compute::params::reserved_vswitch_cores':
|
||||||
@ -635,7 +635,7 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
|
|
||||||
def _get_host_memory_config(self, host):
|
def _get_host_memory_config(self, host):
|
||||||
config = {}
|
config = {}
|
||||||
if constants.COMPUTE in utils.get_personalities(host):
|
if constants.WORKER in utils.get_personalities(host):
|
||||||
host_memory = self.dbapi.imemory_get_by_ihost(host.id)
|
host_memory = self.dbapi.imemory_get_by_ihost(host.id)
|
||||||
memory_numa_list = utils.get_numa_index_list(host_memory)
|
memory_numa_list = utils.get_numa_index_list(host_memory)
|
||||||
|
|
||||||
@ -716,7 +716,7 @@ class PlatformPuppet(base.BasePuppet):
|
|||||||
vm_1G = "\"%s\"" % ','.join([str(i) for i in vm_1G_pages])
|
vm_1G = "\"%s\"" % ','.join([str(i) for i in vm_1G_pages])
|
||||||
|
|
||||||
config.update({
|
config.update({
|
||||||
'platform::compute::params::compute_base_reserved':
|
'platform::compute::params::worker_base_reserved':
|
||||||
platform_reserved_memory,
|
platform_reserved_memory,
|
||||||
'platform::compute::params::compute_vswitch_reserved':
|
'platform::compute::params::compute_vswitch_reserved':
|
||||||
vswitch_reserved_memory,
|
vswitch_reserved_memory,
|
||||||
|
@ -205,7 +205,7 @@ class StoragePuppet(base.BasePuppet):
|
|||||||
# LVM Global Filter is driven by:
|
# LVM Global Filter is driven by:
|
||||||
# - cgts-vg PVs : controllers and all storage
|
# - cgts-vg PVs : controllers and all storage
|
||||||
# - cinder-volumes PVs: controllers
|
# - cinder-volumes PVs: controllers
|
||||||
# - nova-local PVs : controllers and all computes
|
# - nova-local PVs : controllers and all workers
|
||||||
|
|
||||||
# Go through the PVs and
|
# Go through the PVs and
|
||||||
pvs = self.dbapi.ipv_get_by_ihost(host.id)
|
pvs = self.dbapi.ipv_get_by_ihost(host.id)
|
||||||
|
@ -258,7 +258,7 @@ class InterfaceTestCase(base.FunctionalTest):
|
|||||||
if personality == constants.CONTROLLER:
|
if personality == constants.CONTROLLER:
|
||||||
self.controller = host
|
self.controller = host
|
||||||
else:
|
else:
|
||||||
self.compute = host
|
self.worker = host
|
||||||
return
|
return
|
||||||
|
|
||||||
def _create_ethernet(self, ifname=None, networktype=None, ifclass=None,
|
def _create_ethernet(self, ifname=None, networktype=None, ifclass=None,
|
||||||
@ -377,10 +377,10 @@ class InterfaceTestCase(base.FunctionalTest):
|
|||||||
self.profile['interfaces'].append(interface)
|
self.profile['interfaces'].append(interface)
|
||||||
return interface
|
return interface
|
||||||
|
|
||||||
def _create_compute_bond(self, ifname, networktype=None, ifclass=None,
|
def _create_worker_bond(self, ifname, networktype=None, ifclass=None,
|
||||||
providernetworks=None, expect_errors=False):
|
providernetworks=None, expect_errors=False):
|
||||||
return self._create_bond(ifname, networktype, ifclass, providernetworks,
|
return self._create_bond(ifname, networktype, ifclass, providernetworks,
|
||||||
self.compute, expect_errors)
|
self.worker, expect_errors)
|
||||||
|
|
||||||
def _create_vlan(self, ifname, networktype, ifclass, vlan_id,
|
def _create_vlan(self, ifname, networktype, ifclass, vlan_id,
|
||||||
lower_iface=None, providernetworks=None, host=None,
|
lower_iface=None, providernetworks=None, host=None,
|
||||||
@ -424,12 +424,12 @@ class InterfaceTestCase(base.FunctionalTest):
|
|||||||
self.profile['interfaces'].append(interface)
|
self.profile['interfaces'].append(interface)
|
||||||
return interface
|
return interface
|
||||||
|
|
||||||
def _create_compute_vlan(self, ifname, networktype, ifclass, vlan_id,
|
def _create_worker_vlan(self, ifname, networktype, ifclass, vlan_id,
|
||||||
lower_iface=None, providernetworks=None,
|
lower_iface=None, providernetworks=None,
|
||||||
host=None, expect_errors=False):
|
host=None, expect_errors=False):
|
||||||
return self._create_vlan(ifname, networktype, ifclass, vlan_id,
|
return self._create_vlan(ifname, networktype, ifclass, vlan_id,
|
||||||
lower_iface,
|
lower_iface,
|
||||||
providernetworks, self.compute, expect_errors)
|
providernetworks, self.worker, expect_errors)
|
||||||
|
|
||||||
def _post_and_check_success(self, ndict):
|
def _post_and_check_success(self, ndict):
|
||||||
response = self.post_json('%s' % self._get_path(), ndict)
|
response = self.post_json('%s' % self._get_path(), ndict)
|
||||||
@ -491,7 +491,7 @@ class InterfaceTestCase(base.FunctionalTest):
|
|||||||
'interface_networks': []}
|
'interface_networks': []}
|
||||||
self.system = None
|
self.system = None
|
||||||
self.controller = None
|
self.controller = None
|
||||||
self.compute = None
|
self.worker = None
|
||||||
self._setup_configuration()
|
self._setup_configuration()
|
||||||
|
|
||||||
def test_interface(self):
|
def test_interface(self):
|
||||||
@ -583,56 +583,56 @@ class InterfaceComputeEthernet(InterfaceTestCase):
|
|||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are ethernet interfaces.
|
# worker and all interfaces are ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED)
|
self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED)
|
||||||
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM)
|
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM)
|
||||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA)
|
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA)
|
||||||
|
|
||||||
self._create_host(constants.COMPUTE, constants.COMPUTE,
|
self._create_host(constants.WORKER, constants.WORKER,
|
||||||
mgmt_mac='01:02.03.04.05.C0',
|
mgmt_mac='01:02.03.04.05.C0',
|
||||||
mgmt_ip='192.168.24.12',
|
mgmt_ip='192.168.24.12',
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT,
|
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA,
|
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA,
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
self._create_ethernet('data',
|
self._create_ethernet('data',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
'group0-data0', host=self.compute)
|
'group0-data0', host=self.worker)
|
||||||
self._create_ethernet('sriov',
|
self._create_ethernet('sriov',
|
||||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||||
constants.INTERFACE_CLASS_PCI_SRIOV,
|
constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||||
'group0-data1', host=self.compute)
|
'group0-data1', host=self.worker)
|
||||||
self._create_ethernet('pthru',
|
self._create_ethernet('pthru',
|
||||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
'group0-ext0', host=self.compute)
|
'group0-ext0', host=self.worker)
|
||||||
port, iface = (
|
port, iface = (
|
||||||
self._create_ethernet('slow',
|
self._create_ethernet('slow',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
'group0-ext1', host=self.compute))
|
'group0-ext1', host=self.worker))
|
||||||
port['dpdksupport'] = False
|
port['dpdksupport'] = False
|
||||||
port, iface = (
|
port, iface = (
|
||||||
self._create_ethernet('mlx4',
|
self._create_ethernet('mlx4',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
'group0-ext2', host=self.compute))
|
'group0-ext2', host=self.worker))
|
||||||
port['driver'] = 'mlx4_core'
|
port['driver'] = 'mlx4_core'
|
||||||
port, iface = (
|
port, iface = (
|
||||||
self._create_ethernet('mlx5',
|
self._create_ethernet('mlx5',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
'group0-ext3', host=self.compute))
|
'group0-ext3', host=self.worker))
|
||||||
port['driver'] = 'mlx5_core'
|
port['driver'] = 'mlx5_core'
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(InterfaceComputeEthernet, self).setUp()
|
super(InterfaceComputeEthernet, self).setUp()
|
||||||
|
|
||||||
def test_compute_ethernet_profile(self):
|
def test_worker_ethernet_profile(self):
|
||||||
self._create_and_apply_profile(self.compute)
|
self._create_and_apply_profile(self.worker)
|
||||||
|
|
||||||
|
|
||||||
class InterfaceComputeVlanOverEthernet(InterfaceTestCase):
|
class InterfaceComputeVlanOverEthernet(InterfaceTestCase):
|
||||||
@ -652,32 +652,32 @@ class InterfaceComputeVlanOverEthernet(InterfaceTestCase):
|
|||||||
constants.INTERFACE_CLASS_PLATFORM, 3, iface)
|
constants.INTERFACE_CLASS_PLATFORM, 3, iface)
|
||||||
|
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are vlan interfaces over ethernet
|
# worker and all interfaces are vlan interfaces over ethernet
|
||||||
# interfaces.
|
# interfaces.
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
port, iface = self._create_ethernet(
|
port, iface = self._create_ethernet(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute)
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker)
|
||||||
self._create_compute_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM, 2, iface)
|
constants.INTERFACE_CLASS_PLATFORM, 2, iface)
|
||||||
self._create_compute_vlan('infra', constants.NETWORK_TYPE_INFRA,
|
self._create_worker_vlan('infra', constants.NETWORK_TYPE_INFRA,
|
||||||
constants.INTERFACE_CLASS_PLATFORM, 3)
|
constants.INTERFACE_CLASS_PLATFORM, 3)
|
||||||
self._create_compute_vlan('data', constants.INTERFACE_CLASS_DATA,
|
self._create_worker_vlan('data', constants.INTERFACE_CLASS_DATA,
|
||||||
constants.NETWORK_TYPE_DATA, 5,
|
constants.NETWORK_TYPE_DATA, 5,
|
||||||
providernetworks='group0-ext0')
|
providernetworks='group0-ext0')
|
||||||
self._create_ethernet('sriov',
|
self._create_ethernet('sriov',
|
||||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||||
constants.INTERFACE_CLASS_PCI_SRIOV,
|
constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||||
'group0-data0', host=self.compute)
|
'group0-data0', host=self.worker)
|
||||||
self._create_ethernet('pthru',
|
self._create_ethernet('pthru',
|
||||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
'group0-data1', host=self.compute)
|
'group0-data1', host=self.worker)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(InterfaceComputeVlanOverEthernet, self).setUp()
|
super(InterfaceComputeVlanOverEthernet, self).setUp()
|
||||||
|
|
||||||
def test_compute_vlan_over_ethernet_profile(self):
|
def test_worker_vlan_over_ethernet_profile(self):
|
||||||
self._create_and_apply_profile(self.compute)
|
self._create_and_apply_profile(self.worker)
|
||||||
|
|
||||||
|
|
||||||
class InterfaceComputeBond(InterfaceTestCase):
|
class InterfaceComputeBond(InterfaceTestCase):
|
||||||
@ -691,28 +691,28 @@ class InterfaceComputeBond(InterfaceTestCase):
|
|||||||
self._create_bond('infra', constants.NETWORK_TYPE_INFRA)
|
self._create_bond('infra', constants.NETWORK_TYPE_INFRA)
|
||||||
|
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are aggregated ethernet interfaces.
|
# worker and all interfaces are aggregated ethernet interfaces.
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
self._create_compute_bond('mgmt', constants.NETWORK_TYPE_MGMT)
|
self._create_worker_bond('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_compute_bond('infra', constants.NETWORK_TYPE_INFRA)
|
self._create_worker_bond('infra', constants.NETWORK_TYPE_INFRA)
|
||||||
self._create_compute_bond('data',
|
self._create_worker_bond('data',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-data0')
|
providernetworks='group0-data0')
|
||||||
self._create_ethernet('sriov',
|
self._create_ethernet('sriov',
|
||||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||||
constants.INTERFACE_CLASS_PCI_SRIOV,
|
constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||||
'group0-ext0', host=self.compute)
|
'group0-ext0', host=self.worker)
|
||||||
self._create_ethernet('pthru',
|
self._create_ethernet('pthru',
|
||||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
'group0-ext1', host=self.compute)
|
'group0-ext1', host=self.worker)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(InterfaceComputeBond, self).setUp()
|
super(InterfaceComputeBond, self).setUp()
|
||||||
|
|
||||||
def test_compute_bond_profile(self):
|
def test_worker_bond_profile(self):
|
||||||
self._create_and_apply_profile(self.compute)
|
self._create_and_apply_profile(self.worker)
|
||||||
|
|
||||||
|
|
||||||
class InterfaceComputeVlanOverBond(InterfaceTestCase):
|
class InterfaceComputeVlanOverBond(InterfaceTestCase):
|
||||||
@ -729,40 +729,40 @@ class InterfaceComputeVlanOverBond(InterfaceTestCase):
|
|||||||
constants.INTERFACE_CLASS_PLATFORM, 3, bond)
|
constants.INTERFACE_CLASS_PLATFORM, 3, bond)
|
||||||
|
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are vlan interfaces over aggregated
|
# worker and all interfaces are vlan interfaces over aggregated
|
||||||
# ethernet interfaces.
|
# ethernet interfaces.
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
bond = self._create_compute_bond('pxeboot',
|
bond = self._create_worker_bond('pxeboot',
|
||||||
constants.NETWORK_TYPE_PXEBOOT,
|
constants.NETWORK_TYPE_PXEBOOT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM)
|
constants.INTERFACE_CLASS_PLATFORM)
|
||||||
self._create_compute_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
self._create_worker_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM, 2, bond)
|
constants.INTERFACE_CLASS_PLATFORM, 2, bond)
|
||||||
self._create_compute_vlan('infra', constants.NETWORK_TYPE_INFRA,
|
self._create_worker_vlan('infra', constants.NETWORK_TYPE_INFRA,
|
||||||
constants.INTERFACE_CLASS_PLATFORM, 3,
|
constants.INTERFACE_CLASS_PLATFORM, 3,
|
||||||
bond)
|
bond)
|
||||||
bond2 = self._create_compute_bond('bond2', constants.NETWORK_TYPE_NONE)
|
bond2 = self._create_worker_bond('bond2', constants.NETWORK_TYPE_NONE)
|
||||||
self._create_compute_vlan('data',
|
self._create_worker_vlan('data',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
5, bond2,
|
5, bond2,
|
||||||
providernetworks='group0-ext0')
|
providernetworks='group0-ext0')
|
||||||
|
|
||||||
self._create_compute_bond('bond3', constants.NETWORK_TYPE_NONE)
|
self._create_worker_bond('bond3', constants.NETWORK_TYPE_NONE)
|
||||||
|
|
||||||
self._create_ethernet('sriov',
|
self._create_ethernet('sriov',
|
||||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||||
constants.INTERFACE_CLASS_PCI_SRIOV,
|
constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||||
'group0-data0', host=self.compute)
|
'group0-data0', host=self.worker)
|
||||||
self._create_ethernet('pthru',
|
self._create_ethernet('pthru',
|
||||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
'group0-data1', host=self.compute)
|
'group0-data1', host=self.worker)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(InterfaceComputeVlanOverBond, self).setUp()
|
super(InterfaceComputeVlanOverBond, self).setUp()
|
||||||
|
|
||||||
def test_compute_vlan_over_bond_profile(self):
|
def test_worker_vlan_over_bond_profile(self):
|
||||||
self._create_and_apply_profile(self.compute)
|
self._create_and_apply_profile(self.worker)
|
||||||
|
|
||||||
|
|
||||||
class InterfaceComputeVlanOverDataEthernet(InterfaceTestCase):
|
class InterfaceComputeVlanOverDataEthernet(InterfaceTestCase):
|
||||||
@ -776,44 +776,44 @@ class InterfaceComputeVlanOverDataEthernet(InterfaceTestCase):
|
|||||||
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA)
|
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA)
|
||||||
|
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are vlan interfaces over data ethernet
|
# worker and all interfaces are vlan interfaces over data ethernet
|
||||||
# interfaces.
|
# interfaces.
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
port, iface = (
|
port, iface = (
|
||||||
self._create_ethernet('data',
|
self._create_ethernet('data',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
'group0-data0', host=self.compute))
|
'group0-data0', host=self.worker))
|
||||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT,
|
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA,
|
self._create_ethernet('infra', constants.NETWORK_TYPE_INFRA,
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
self._create_compute_vlan('data2', constants.NETWORK_TYPE_DATA,
|
self._create_worker_vlan('data2', constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA, 5,
|
constants.INTERFACE_CLASS_DATA, 5,
|
||||||
iface, providernetworks='group0-ext0')
|
iface, providernetworks='group0-ext0')
|
||||||
self._create_ethernet('sriov',
|
self._create_ethernet('sriov',
|
||||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||||
constants.INTERFACE_CLASS_PCI_SRIOV,
|
constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||||
'group0-ext1', host=self.compute)
|
'group0-ext1', host=self.worker)
|
||||||
self._create_ethernet('pthru',
|
self._create_ethernet('pthru',
|
||||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
'group0-ext2', host=self.compute)
|
'group0-ext2', host=self.worker)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(InterfaceComputeVlanOverDataEthernet, self).setUp()
|
super(InterfaceComputeVlanOverDataEthernet, self).setUp()
|
||||||
|
|
||||||
def test_compute_vlan_over_data_ethernet_profile(self):
|
def test_worker_vlan_over_data_ethernet_profile(self):
|
||||||
self._create_and_apply_profile(self.compute)
|
self._create_and_apply_profile(self.worker)
|
||||||
|
|
||||||
|
|
||||||
class InterfaceCpeEthernet(InterfaceTestCase):
|
class InterfaceCpeEthernet(InterfaceTestCase):
|
||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# ethernet interfaces.
|
# ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM)
|
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM)
|
||||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||||
@ -853,9 +853,9 @@ class InterfaceCpeVlanOverEthernet(InterfaceTestCase):
|
|||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# vlan interfaces over ethernet interfaces.
|
# vlan interfaces over ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
port, iface = self._create_ethernet(
|
port, iface = self._create_ethernet(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
||||||
@ -886,10 +886,10 @@ class InterfaceCpeBond(InterfaceTestCase):
|
|||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# aggregated ethernet interfaces.
|
# aggregated ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER,
|
self._create_host(constants.CONTROLLER,
|
||||||
subfunction=constants.COMPUTE,
|
subfunction=constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
self._create_bond('oam', constants.NETWORK_TYPE_OAM)
|
self._create_bond('oam', constants.NETWORK_TYPE_OAM)
|
||||||
self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT)
|
self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||||
@ -915,9 +915,9 @@ class InterfaceCpeVlanOverBond(InterfaceTestCase):
|
|||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# vlan interfaces over aggregated ethernet interfaces.
|
# vlan interfaces over aggregated ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
||||||
self._create_vlan('oam', constants.NETWORK_TYPE_OAM,
|
self._create_vlan('oam', constants.NETWORK_TYPE_OAM,
|
||||||
@ -950,9 +950,9 @@ class InterfaceCpeVlanOverDataEthernet(InterfaceTestCase):
|
|||||||
|
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# vlan interfaces over data ethernet interfaces.
|
# vlan interfaces over data ethernet interfaces.
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
port, iface = (
|
port, iface = (
|
||||||
self._create_ethernet('data',
|
self._create_ethernet('data',
|
||||||
@ -1007,7 +1007,7 @@ class TestPatch(InterfaceTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestPatch, self).setUp()
|
super(TestPatch, self).setUp()
|
||||||
self._create_host(constants.CONTROLLER)
|
self._create_host(constants.CONTROLLER)
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
|
|
||||||
def test_modify_ifname(self):
|
def test_modify_ifname(self):
|
||||||
interface = dbutils.create_test_interface(forihostid='1')
|
interface = dbutils.create_test_interface(forihostid='1')
|
||||||
@ -1031,10 +1031,10 @@ class TestPatch(InterfaceTestCase):
|
|||||||
data_bond = self._create_bond('data', constants.NETWORK_TYPE_DATA,
|
data_bond = self._create_bond('data', constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
|
|
||||||
port, new_ethernet = self._create_ethernet(
|
port, new_ethernet = self._create_ethernet(
|
||||||
'new', constants.NETWORK_TYPE_NONE, host=self.compute)
|
'new', constants.NETWORK_TYPE_NONE, host=self.worker)
|
||||||
# Modify AE interface to add another port
|
# Modify AE interface to add another port
|
||||||
uses = ','.join(data_bond['uses'])
|
uses = ','.join(data_bond['uses'])
|
||||||
patch_result = self.patch_dict_json(
|
patch_result = self.patch_dict_json(
|
||||||
@ -1047,10 +1047,10 @@ class TestPatch(InterfaceTestCase):
|
|||||||
# MTU (%s) using this interface
|
# MTU (%s) using this interface
|
||||||
def test_mtu_smaller_than_users(self):
|
def test_mtu_smaller_than_users(self):
|
||||||
port, lower_interface = self._create_ethernet(
|
port, lower_interface = self._create_ethernet(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute)
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker)
|
||||||
dbutils.create_test_interface(
|
dbutils.create_test_interface(
|
||||||
forihostid='2',
|
forihostid='2',
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='data0',
|
ifname='data0',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1071,10 +1071,10 @@ class TestPatch(InterfaceTestCase):
|
|||||||
# interface ___
|
# interface ___
|
||||||
def test_vlan_mtu_smaller_than_users(self):
|
def test_vlan_mtu_smaller_than_users(self):
|
||||||
port, lower_interface = self._create_ethernet(
|
port, lower_interface = self._create_ethernet(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.compute)
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT, host=self.worker)
|
||||||
upper = dbutils.create_test_interface(
|
upper = dbutils.create_test_interface(
|
||||||
forihostid='2',
|
forihostid='2',
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='data0',
|
ifname='data0',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1110,24 +1110,24 @@ class TestPost(InterfaceTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestPost, self).setUp()
|
super(TestPost, self).setUp()
|
||||||
self._create_host(constants.CONTROLLER)
|
self._create_host(constants.CONTROLLER)
|
||||||
self._create_host(constants.COMPUTE, admin=constants.ADMIN_LOCKED)
|
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||||
|
|
||||||
# Expected error: The oam network type is only supported on controller nodes
|
# Expected error: The oam network type is only supported on controller nodes
|
||||||
def test_invalid_oam_on_compute(self):
|
def test_invalid_oam_on_worker(self):
|
||||||
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM,
|
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM,
|
||||||
constants.INTERFACE_CLASS_PLATFORM,
|
constants.INTERFACE_CLASS_PLATFORM,
|
||||||
host=self.compute, expect_errors=True)
|
host=self.worker, expect_errors=True)
|
||||||
|
|
||||||
# Expected error: The pci-passthrough, pci-sriov network types are only
|
# Expected error: The pci-passthrough, pci-sriov network types are only
|
||||||
# valid on Ethernet interfaces
|
# valid on Ethernet interfaces
|
||||||
def test_invalid_iftype_for_pci_network_type(self):
|
def test_invalid_iftype_for_pci_network_type(self):
|
||||||
self._create_bond('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
self._create_bond('pthru', constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
host=self.compute, expect_errors=True)
|
host=self.worker, expect_errors=True)
|
||||||
|
|
||||||
# Expected error: The ___ network type is only supported on nodes supporting
|
# Expected error: The ___ network type is only supported on nodes supporting
|
||||||
# compute functions
|
# worker functions
|
||||||
def test_invalid_network_type_on_noncompute(self):
|
def test_invalid_network_type_on_nonworker(self):
|
||||||
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-ext0',
|
providernetworks='group0-ext0',
|
||||||
@ -1164,11 +1164,11 @@ class TestPost(InterfaceTestCase):
|
|||||||
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
host=self.compute)
|
host=self.worker)
|
||||||
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
self._create_ethernet('data0', constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-ext0',
|
providernetworks='group0-ext0',
|
||||||
host=self.compute,
|
host=self.worker,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
def test_ipv4_mode_valid(self):
|
def test_ipv4_mode_valid(self):
|
||||||
@ -1187,7 +1187,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# mgmt, infra, data, data-vrs interfaces
|
# mgmt, infra, data, data-vrs interfaces
|
||||||
def test_ipv4_mode_networktype_invalid(self):
|
def test_ipv4_mode_networktype_invalid(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
networktype=constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||||
ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
ifclass=constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||||
@ -1244,7 +1244,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected error: IPv4 address pool name not specified
|
# Expected error: IPv4 address pool name not specified
|
||||||
def test_ipv4_mode_no_pool_invalid(self):
|
def test_ipv4_mode_no_pool_invalid(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_MGMT,
|
networktype=constants.NETWORK_TYPE_MGMT,
|
||||||
networks=['1'],
|
networks=['1'],
|
||||||
@ -1257,7 +1257,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected error: IPv6 address pool name not specified
|
# Expected error: IPv6 address pool name not specified
|
||||||
def test_ipv6_mode_no_pool_invalid(self):
|
def test_ipv6_mode_no_pool_invalid(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_MGMT,
|
networktype=constants.NETWORK_TYPE_MGMT,
|
||||||
networks=['1'],
|
networks=['1'],
|
||||||
@ -1271,7 +1271,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected error: Address pool IP family does not match requested family
|
# Expected error: Address pool IP family does not match requested family
|
||||||
def test_ipv4_pool_family_mismatch_invalid(self):
|
def test_ipv4_pool_family_mismatch_invalid(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_MGMT,
|
networktype=constants.NETWORK_TYPE_MGMT,
|
||||||
networks=['1'],
|
networks=['1'],
|
||||||
@ -1286,7 +1286,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected error: Address pool IP family does not match requested family
|
# Expected error: Address pool IP family does not match requested family
|
||||||
def test_ipv6_pool_family_mismatch_invalid(self):
|
def test_ipv6_pool_family_mismatch_invalid(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_MGMT,
|
networktype=constants.NETWORK_TYPE_MGMT,
|
||||||
networks=['1'],
|
networks=['1'],
|
||||||
@ -1302,7 +1302,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# 'vlan' or 'ethernet'.
|
# 'vlan' or 'ethernet'.
|
||||||
def test_aemode_invalid_iftype(self):
|
def test_aemode_invalid_iftype(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1316,7 +1316,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# in ___ mode should not specify a Tx Hash Policy.
|
# in ___ mode should not specify a Tx Hash Policy.
|
||||||
def test_aemode_no_txhash(self):
|
def test_aemode_no_txhash(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1330,7 +1330,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# 'aggregated ethernet' must have a Tx Hash Policy of 'layer2'.
|
# 'aggregated ethernet' must have a Tx Hash Policy of 'layer2'.
|
||||||
def test_aemode_invalid_txhash(self):
|
def test_aemode_invalid_txhash(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1343,7 +1343,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# in 'balanced' or '802.3ad' mode require a valid Tx Hash Policy
|
# in 'balanced' or '802.3ad' mode require a valid Tx Hash Policy
|
||||||
def test_aemode_invalid_txhash_none(self):
|
def test_aemode_invalid_txhash_none(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1354,7 +1354,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
self._post_and_check_failure(ndict)
|
self._post_and_check_failure(ndict)
|
||||||
|
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1368,7 +1368,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# 'aggregated ethernet' must be in mode '802.3ad'
|
# 'aggregated ethernet' must be in mode '802.3ad'
|
||||||
def test_aemode_invalid_mgmt(self):
|
def test_aemode_invalid_mgmt(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_MGMT,
|
networktype=constants.NETWORK_TYPE_MGMT,
|
||||||
@ -1384,7 +1384,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# '802.3ad'.
|
# '802.3ad'.
|
||||||
def test_aemode_invalid_data(self):
|
def test_aemode_invalid_data(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1408,7 +1408,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
|
|
||||||
def test_aemode_invalid_infra(self):
|
def test_aemode_invalid_infra(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_INFRA,
|
networktype=constants.NETWORK_TYPE_INFRA,
|
||||||
networks=['2'],
|
networks=['2'],
|
||||||
@ -1422,7 +1422,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# on controller.
|
# on controller.
|
||||||
def test_no_infra_on_controller(self):
|
def test_no_infra_on_controller(self):
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
ifname='name',
|
ifname='name',
|
||||||
networktype=constants.NETWORK_TYPE_INFRA,
|
networktype=constants.NETWORK_TYPE_INFRA,
|
||||||
networks=['2'],
|
networks=['2'],
|
||||||
@ -1457,14 +1457,14 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: Interface eth0 is already used by another AE interface
|
# Expected message: Interface eth0 is already used by another AE interface
|
||||||
# bond0
|
# bond0
|
||||||
def test_create_bond_invalid_overlap_ae(self):
|
def test_create_bond_invalid_overlap_ae(self):
|
||||||
bond_iface = self._create_compute_bond('bond0',
|
bond_iface = self._create_worker_bond('bond0',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-data0')
|
providernetworks='group0-data0')
|
||||||
port, iface1 = self._create_ethernet()
|
port, iface1 = self._create_ethernet()
|
||||||
|
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-ext1',
|
providernetworks='group0-ext1',
|
||||||
ifname='bond1',
|
ifname='bond1',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1477,7 +1477,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
|
|
||||||
# Expected message: VLAN id must be between 1 and 4094.
|
# Expected message: VLAN id must be between 1 and 4094.
|
||||||
def test_create_invalid_vlan_id(self):
|
def test_create_invalid_vlan_id(self):
|
||||||
self._create_compute_vlan('vlan0', constants.NETWORK_TYPE_DATA,
|
self._create_worker_vlan('vlan0', constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
vlan_id=4095,
|
vlan_id=4095,
|
||||||
providernetworks='group0-ext0',
|
providernetworks='group0-ext0',
|
||||||
@ -1486,7 +1486,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: Interface eth0 is already used by another VLAN
|
# Expected message: Interface eth0 is already used by another VLAN
|
||||||
# interface vlan0
|
# interface vlan0
|
||||||
def test_create_bond_invalid_overlap_vlan(self):
|
def test_create_bond_invalid_overlap_vlan(self):
|
||||||
vlan_iface = self._create_compute_vlan(
|
vlan_iface = self._create_worker_vlan(
|
||||||
'vlan0',
|
'vlan0',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1494,7 +1494,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
port, iface1 = self._create_ethernet()
|
port, iface1 = self._create_ethernet()
|
||||||
|
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-ext1',
|
providernetworks='group0-ext1',
|
||||||
ifname='bond0',
|
ifname='bond0',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1507,14 +1507,14 @@ class TestPost(InterfaceTestCase):
|
|||||||
|
|
||||||
# Expected message: Can only have one interface for vlan type.
|
# Expected message: Can only have one interface for vlan type.
|
||||||
def test_create_vlan_invalid_uses(self):
|
def test_create_vlan_invalid_uses(self):
|
||||||
bond_iface = self._create_compute_bond('bond0',
|
bond_iface = self._create_worker_bond('bond0',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
providernetworks='group0-data0')
|
providernetworks='group0-data0')
|
||||||
port, iface1 = self._create_ethernet()
|
port, iface1 = self._create_ethernet()
|
||||||
|
|
||||||
ndict = dbutils.post_get_test_interface(
|
ndict = dbutils.post_get_test_interface(
|
||||||
ihost_uuid=self.compute.uuid,
|
ihost_uuid=self.worker.uuid,
|
||||||
providernetworks='group0-ext1',
|
providernetworks='group0-ext1',
|
||||||
ifname='bond1',
|
ifname='bond1',
|
||||||
networktype=constants.NETWORK_TYPE_DATA,
|
networktype=constants.NETWORK_TYPE_DATA,
|
||||||
@ -1528,11 +1528,11 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: VLAN interfaces cannot be created over existing VLAN
|
# Expected message: VLAN interfaces cannot be created over existing VLAN
|
||||||
# interfaces
|
# interfaces
|
||||||
def test_create_invalid_vlan_over_vlan(self):
|
def test_create_invalid_vlan_over_vlan(self):
|
||||||
vlan_iface = self._create_compute_vlan(
|
vlan_iface = self._create_worker_vlan(
|
||||||
'vlan1', constants.NETWORK_TYPE_DATA,
|
'vlan1', constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA, 1,
|
constants.INTERFACE_CLASS_DATA, 1,
|
||||||
providernetworks='group0-ext0')
|
providernetworks='group0-ext0')
|
||||||
self._create_compute_vlan('vlan2',
|
self._create_worker_vlan('vlan2',
|
||||||
constants.NETWORK_TYPE_DATA,
|
constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA,
|
constants.INTERFACE_CLASS_DATA,
|
||||||
vlan_id=2,
|
vlan_id=2,
|
||||||
@ -1543,10 +1543,10 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: data VLAN cannot be created over a LAG interface with
|
# Expected message: data VLAN cannot be created over a LAG interface with
|
||||||
# network type pxeboot
|
# network type pxeboot
|
||||||
def test_create_data_vlan_over_pxeboot_lag(self):
|
def test_create_data_vlan_over_pxeboot_lag(self):
|
||||||
bond_iface = self._create_compute_bond(
|
bond_iface = self._create_worker_bond(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT,
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM)
|
constants.INTERFACE_CLASS_PLATFORM)
|
||||||
self._create_compute_vlan(
|
self._create_worker_vlan(
|
||||||
'vlan2',
|
'vlan2',
|
||||||
constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 2,
|
constants.NETWORK_TYPE_DATA, constants.INTERFACE_CLASS_DATA, 2,
|
||||||
lower_iface=bond_iface, providernetworks='group0-ext1',
|
lower_iface=bond_iface, providernetworks='group0-ext1',
|
||||||
@ -1555,10 +1555,10 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: data VLAN cannot be created over a LAG interface with
|
# Expected message: data VLAN cannot be created over a LAG interface with
|
||||||
# network type mgmt
|
# network type mgmt
|
||||||
def test_create_data_vlan_over_mgmt_lag(self):
|
def test_create_data_vlan_over_mgmt_lag(self):
|
||||||
bond_iface = self._create_compute_bond(
|
bond_iface = self._create_worker_bond(
|
||||||
'mgmt', constants.NETWORK_TYPE_MGMT,
|
'mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM)
|
constants.INTERFACE_CLASS_PLATFORM)
|
||||||
self._create_compute_vlan(
|
self._create_worker_vlan(
|
||||||
'vlan2', constants.NETWORK_TYPE_DATA,
|
'vlan2', constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA, 2,
|
constants.INTERFACE_CLASS_DATA, 2,
|
||||||
lower_iface=bond_iface, providernetworks='group0-ext1',
|
lower_iface=bond_iface, providernetworks='group0-ext1',
|
||||||
@ -1567,10 +1567,10 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message: mgmt VLAN cannot be created over a LAG interface with
|
# Expected message: mgmt VLAN cannot be created over a LAG interface with
|
||||||
# network type data
|
# network type data
|
||||||
def test_create_mgmt_vlan_over_data_lag(self):
|
def test_create_mgmt_vlan_over_data_lag(self):
|
||||||
bond_iface = self._create_compute_bond(
|
bond_iface = self._create_worker_bond(
|
||||||
'data', constants.NETWORK_TYPE_DATA,
|
'data', constants.NETWORK_TYPE_DATA,
|
||||||
constants.INTERFACE_CLASS_DATA, providernetworks='group0-ext1')
|
constants.INTERFACE_CLASS_DATA, providernetworks='group0-ext1')
|
||||||
self._create_compute_vlan(
|
self._create_worker_vlan(
|
||||||
'mgmt', constants.NETWORK_TYPE_MGMT,
|
'mgmt', constants.NETWORK_TYPE_MGMT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM, 2,
|
constants.INTERFACE_CLASS_PLATFORM, 2,
|
||||||
lower_iface=bond_iface, providernetworks='group0-ext1',
|
lower_iface=bond_iface, providernetworks='group0-ext1',
|
||||||
@ -1579,7 +1579,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
# Expected message:
|
# Expected message:
|
||||||
# Provider network(s) not supported for non-data interfaces.
|
# Provider network(s) not supported for non-data interfaces.
|
||||||
def test_create_nondata_provider_network(self):
|
def test_create_nondata_provider_network(self):
|
||||||
self._create_compute_bond(
|
self._create_worker_bond(
|
||||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT,
|
'pxeboot', constants.NETWORK_TYPE_PXEBOOT,
|
||||||
constants.INTERFACE_CLASS_PLATFORM,
|
constants.INTERFACE_CLASS_PLATFORM,
|
||||||
providernetworks='group0-data0', expect_errors=True)
|
providernetworks='group0-data0', expect_errors=True)
|
||||||
@ -1608,7 +1608,7 @@ class TestPost(InterfaceTestCase):
|
|||||||
networktype=[constants.NETWORK_TYPE_MGMT,
|
networktype=[constants.NETWORK_TYPE_MGMT,
|
||||||
constants.NETWORK_TYPE_DATA],
|
constants.NETWORK_TYPE_DATA],
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
host=self.compute,
|
host=self.worker,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
# Expected message:
|
# Expected message:
|
||||||
@ -1619,14 +1619,14 @@ class TestPost(InterfaceTestCase):
|
|||||||
networktype=[constants.NETWORK_TYPE_DATA,
|
networktype=[constants.NETWORK_TYPE_DATA,
|
||||||
constants.NETWORK_TYPE_PXEBOOT],
|
constants.NETWORK_TYPE_PXEBOOT],
|
||||||
providernetworks='group0-data0',
|
providernetworks='group0-data0',
|
||||||
host=self.compute,
|
host=self.worker,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
|
|
||||||
class TestCpePost(InterfaceTestCase):
|
class TestCpePost(InterfaceTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestCpePost, self).setUp()
|
super(TestCpePost, self).setUp()
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
|
|
||||||
# Expected message:
|
# Expected message:
|
||||||
@ -1790,7 +1790,7 @@ class TestCpePost(InterfaceTestCase):
|
|||||||
class TestCpePatch(InterfaceTestCase):
|
class TestCpePatch(InterfaceTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestCpePatch, self).setUp()
|
super(TestCpePatch, self).setUp()
|
||||||
self._create_host(constants.CONTROLLER, constants.COMPUTE,
|
self._create_host(constants.CONTROLLER, constants.WORKER,
|
||||||
admin=constants.ADMIN_LOCKED)
|
admin=constants.ADMIN_LOCKED)
|
||||||
|
|
||||||
def test_create_invalid_infra_data_ethernet(self):
|
def test_create_invalid_infra_data_ethernet(self):
|
||||||
|
@ -28,13 +28,13 @@ class InterfaceNetworkTestCase(base.FunctionalTest):
|
|||||||
subfunctions=constants.CONTROLLER,
|
subfunctions=constants.CONTROLLER,
|
||||||
invprovision=constants.PROVISIONED,
|
invprovision=constants.PROVISIONED,
|
||||||
)
|
)
|
||||||
self.compute = dbutils.create_test_ihost(
|
self.worker = dbutils.create_test_ihost(
|
||||||
id='2',
|
id='2',
|
||||||
uuid=None,
|
uuid=None,
|
||||||
forisystemid=self.system.id,
|
forisystemid=self.system.id,
|
||||||
hostname='compute-0',
|
hostname='worker-0',
|
||||||
personality=constants.COMPUTE,
|
personality=constants.WORKER,
|
||||||
subfunctions=constants.COMPUTE,
|
subfunctions=constants.WORKER,
|
||||||
mgmt_mac='01:02.03.04.05.C0',
|
mgmt_mac='01:02.03.04.05.C0',
|
||||||
mgmt_ip='192.168.24.12',
|
mgmt_ip='192.168.24.12',
|
||||||
invprovision=constants.PROVISIONED,
|
invprovision=constants.PROVISIONED,
|
||||||
@ -114,73 +114,73 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
controller_interface = dbutils.create_test_interface(
|
controller_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.controller.id)
|
forihostid=self.controller.id)
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=controller_interface.uuid,
|
interface_uuid=controller_interface.uuid,
|
||||||
network_uuid=self.mgmt_network.uuid)
|
network_uuid=self.mgmt_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=False)
|
self._post_and_check(controller_interface_network, expect_errors=False)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.mgmt_network.uuid)
|
network_uuid=self.mgmt_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=False)
|
self._post_and_check(worker_interface_network, expect_errors=False)
|
||||||
|
|
||||||
def test_create_infra_interface_network(self):
|
def test_create_infra_interface_network(self):
|
||||||
controller_interface = dbutils.create_test_interface(
|
controller_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.controller.id)
|
forihostid=self.controller.id)
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=controller_interface.uuid,
|
interface_uuid=controller_interface.uuid,
|
||||||
network_uuid=self.infra_network.uuid)
|
network_uuid=self.infra_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=False)
|
self._post_and_check(controller_interface_network, expect_errors=False)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.infra_network.uuid)
|
network_uuid=self.infra_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=False)
|
self._post_and_check(worker_interface_network, expect_errors=False)
|
||||||
|
|
||||||
def test_create_oam_interface_network(self):
|
def test_create_oam_interface_network(self):
|
||||||
controller_interface = dbutils.create_test_interface(
|
controller_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.controller.id)
|
forihostid=self.controller.id)
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=controller_interface.uuid,
|
interface_uuid=controller_interface.uuid,
|
||||||
network_uuid=self.oam_network.uuid)
|
network_uuid=self.oam_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=False)
|
self._post_and_check(controller_interface_network, expect_errors=False)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.oam_network.uuid)
|
network_uuid=self.oam_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=False)
|
self._post_and_check(worker_interface_network, expect_errors=False)
|
||||||
|
|
||||||
def test_create_pxeboot_interface_network(self):
|
def test_create_pxeboot_interface_network(self):
|
||||||
controller_interface = dbutils.create_test_interface(
|
controller_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.controller.id)
|
forihostid=self.controller.id)
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=controller_interface.uuid,
|
interface_uuid=controller_interface.uuid,
|
||||||
network_uuid=self.pxeboot_network.uuid)
|
network_uuid=self.pxeboot_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=False)
|
self._post_and_check(controller_interface_network, expect_errors=False)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.pxeboot_network.uuid)
|
network_uuid=self.pxeboot_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=False)
|
self._post_and_check(worker_interface_network, expect_errors=False)
|
||||||
|
|
||||||
def test_create_mgmt_infra_interface_network(self):
|
def test_create_mgmt_infra_interface_network(self):
|
||||||
controller_interface = dbutils.create_test_interface(
|
controller_interface = dbutils.create_test_interface(
|
||||||
@ -190,11 +190,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
interface_id=controller_interface.id,
|
interface_id=controller_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
dbutils.create_test_interface_network(
|
dbutils.create_test_interface_network(
|
||||||
interface_id=compute_interface.id,
|
interface_id=worker_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
@ -202,10 +202,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
network_uuid=self.infra_network.uuid)
|
network_uuid=self.infra_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=False)
|
self._post_and_check(controller_interface_network, expect_errors=False)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.infra_network.uuid)
|
network_uuid=self.infra_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=False)
|
self._post_and_check(worker_interface_network, expect_errors=False)
|
||||||
|
|
||||||
# Expected error:
|
# Expected error:
|
||||||
# You cannot assign a network of type 'oam' to an interface
|
# You cannot assign a network of type 'oam' to an interface
|
||||||
@ -218,11 +218,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
interface_id=controller_interface.id,
|
interface_id=controller_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
dbutils.create_test_interface_network(
|
dbutils.create_test_interface_network(
|
||||||
interface_id=compute_interface.id,
|
interface_id=worker_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
@ -230,10 +230,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
network_uuid=self.oam_network.uuid)
|
network_uuid=self.oam_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=True)
|
self._post_and_check(controller_interface_network, expect_errors=True)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.oam_network.uuid)
|
network_uuid=self.oam_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=True)
|
self._post_and_check(worker_interface_network, expect_errors=True)
|
||||||
|
|
||||||
# Expected error:
|
# Expected error:
|
||||||
# You cannot assign a network of type 'pxeboot' to an interface
|
# You cannot assign a network of type 'pxeboot' to an interface
|
||||||
@ -246,11 +246,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
interface_id=controller_interface.id,
|
interface_id=controller_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
dbutils.create_test_interface_network(
|
dbutils.create_test_interface_network(
|
||||||
interface_id=compute_interface.id,
|
interface_id=worker_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
@ -258,10 +258,10 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
network_uuid=self.pxeboot_network.uuid)
|
network_uuid=self.pxeboot_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=True)
|
self._post_and_check(controller_interface_network, expect_errors=True)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.pxeboot_network.uuid)
|
network_uuid=self.pxeboot_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=True)
|
self._post_and_check(worker_interface_network, expect_errors=True)
|
||||||
|
|
||||||
# Expected error:
|
# Expected error:
|
||||||
# Interface network with interface ID '%s' and
|
# Interface network with interface ID '%s' and
|
||||||
@ -274,11 +274,11 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
interface_id=controller_interface.id,
|
interface_id=controller_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
compute_interface = dbutils.create_test_interface(
|
worker_interface = dbutils.create_test_interface(
|
||||||
ifname='enp0s8',
|
ifname='enp0s8',
|
||||||
forihostid=self.compute.id)
|
forihostid=self.worker.id)
|
||||||
dbutils.create_test_interface_network(
|
dbutils.create_test_interface_network(
|
||||||
interface_id=compute_interface.id,
|
interface_id=worker_interface.id,
|
||||||
network_id=self.mgmt_network.id)
|
network_id=self.mgmt_network.id)
|
||||||
|
|
||||||
controller_interface_network = dbutils.post_get_test_interface_network(
|
controller_interface_network = dbutils.post_get_test_interface_network(
|
||||||
@ -286,7 +286,7 @@ class InterfaceNetworkCreateTestCase(InterfaceNetworkTestCase):
|
|||||||
network_uuid=self.mgmt_network.uuid)
|
network_uuid=self.mgmt_network.uuid)
|
||||||
self._post_and_check(controller_interface_network, expect_errors=True)
|
self._post_and_check(controller_interface_network, expect_errors=True)
|
||||||
|
|
||||||
compute_interface_network = dbutils.post_get_test_interface_network(
|
worker_interface_network = dbutils.post_get_test_interface_network(
|
||||||
interface_uuid=compute_interface.uuid,
|
interface_uuid=worker_interface.uuid,
|
||||||
network_uuid=self.mgmt_network.uuid)
|
network_uuid=self.mgmt_network.uuid)
|
||||||
self._post_and_check(compute_interface_network, expect_errors=True)
|
self._post_and_check(worker_interface_network, expect_errors=True)
|
||||||
|
@ -41,13 +41,13 @@ class ProfileTestCase(base.FunctionalTest):
|
|||||||
subfunctions=constants.CONTROLLER,
|
subfunctions=constants.CONTROLLER,
|
||||||
invprovision=constants.PROVISIONED,
|
invprovision=constants.PROVISIONED,
|
||||||
)
|
)
|
||||||
self.compute = dbutils.create_test_ihost(
|
self.worker = dbutils.create_test_ihost(
|
||||||
id='2',
|
id='2',
|
||||||
uuid=None,
|
uuid=None,
|
||||||
forisystemid=self.system.id,
|
forisystemid=self.system.id,
|
||||||
hostname='compute-0',
|
hostname='worker-0',
|
||||||
personality=constants.COMPUTE,
|
personality=constants.WORKER,
|
||||||
subfunctions=constants.COMPUTE,
|
subfunctions=constants.WORKER,
|
||||||
mgmt_mac='01:02.03.04.05.C0',
|
mgmt_mac='01:02.03.04.05.C0',
|
||||||
mgmt_ip='192.168.24.12',
|
mgmt_ip='192.168.24.12',
|
||||||
invprovision=constants.PROVISIONED,
|
invprovision=constants.PROVISIONED,
|
||||||
@ -76,27 +76,27 @@ class ProfileTestCase(base.FunctionalTest):
|
|||||||
hugepages_configured=True,
|
hugepages_configured=True,
|
||||||
forinodeid=self.ctrlcpu.forinodeid))
|
forinodeid=self.ctrlcpu.forinodeid))
|
||||||
|
|
||||||
self.compnode = self.dbapi.inode_create(self.compute.id,
|
self.compnode = self.dbapi.inode_create(self.worker.id,
|
||||||
dbutils.get_test_node(id=2))
|
dbutils.get_test_node(id=2))
|
||||||
self.compcpu = self.dbapi.icpu_create(
|
self.compcpu = self.dbapi.icpu_create(
|
||||||
self.compute.id,
|
self.worker.id,
|
||||||
dbutils.get_test_icpu(id=5, cpu=3,
|
dbutils.get_test_icpu(id=5, cpu=3,
|
||||||
forinodeid=self.compnode.id,
|
forinodeid=self.compnode.id,
|
||||||
forihostid=self.compute.id))
|
forihostid=self.worker.id))
|
||||||
self.compmemory = self.dbapi.imemory_create(
|
self.compmemory = self.dbapi.imemory_create(
|
||||||
self.compute.id,
|
self.worker.id,
|
||||||
dbutils.get_test_imemory(id=2, Hugepagesize=constants.MIB_1G,
|
dbutils.get_test_imemory(id=2, Hugepagesize=constants.MIB_1G,
|
||||||
forinodeid=self.compcpu.forinodeid))
|
forinodeid=self.compcpu.forinodeid))
|
||||||
|
|
||||||
self.disk = self.dbapi.idisk_create(
|
self.disk = self.dbapi.idisk_create(
|
||||||
self.compute.id,
|
self.worker.id,
|
||||||
dbutils.get_test_idisk(device_node='/dev/sdb',
|
dbutils.get_test_idisk(device_node='/dev/sdb',
|
||||||
device_type=constants.DEVICE_TYPE_HDD))
|
device_type=constants.DEVICE_TYPE_HDD))
|
||||||
self.lvg = self.dbapi.ilvg_create(
|
self.lvg = self.dbapi.ilvg_create(
|
||||||
self.compute.id,
|
self.worker.id,
|
||||||
dbutils.get_test_lvg(lvm_vg_name=constants.LVG_NOVA_LOCAL))
|
dbutils.get_test_lvg(lvm_vg_name=constants.LVG_NOVA_LOCAL))
|
||||||
self.pv = self.dbapi.ipv_create(
|
self.pv = self.dbapi.ipv_create(
|
||||||
self.compute.id,
|
self.worker.id,
|
||||||
dbutils.get_test_pv(lvm_vg_name=constants.LVG_NOVA_LOCAL,
|
dbutils.get_test_pv(lvm_vg_name=constants.LVG_NOVA_LOCAL,
|
||||||
disk_or_part_uuid=self.disk.uuid))
|
disk_or_part_uuid=self.disk.uuid))
|
||||||
|
|
||||||
@ -129,13 +129,13 @@ class ProfileCreateTestCase(ProfileTestCase):
|
|||||||
|
|
||||||
def test_create_memory_success(self):
|
def test_create_memory_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY
|
self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
response = self.post_json('%s' % self._get_path(), self.profile)
|
response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
self.assertEqual(http_client.OK, response.status_int)
|
self.assertEqual(http_client.OK, response.status_int)
|
||||||
|
|
||||||
def test_create_storage_success(self):
|
def test_create_storage_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
response = self.post_json('%s' % self._get_path(), self.profile)
|
response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
self.assertEqual(http_client.OK, response.status_int)
|
self.assertEqual(http_client.OK, response.status_int)
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ class ProfileDeleteTestCase(ProfileTestCase):
|
|||||||
|
|
||||||
def test_delete_storage_success(self):
|
def test_delete_storage_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
post_response = self.post_json('%s' % self._get_path(), self.profile)
|
post_response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
profile_data = self.get_json('%s' % self._get_path())
|
profile_data = self.get_json('%s' % self._get_path())
|
||||||
storprofile_data = self.get_json(
|
storprofile_data = self.get_json(
|
||||||
@ -227,7 +227,7 @@ class ProfileShowTestCase(ProfileTestCase):
|
|||||||
|
|
||||||
def test_show_storage_success(self):
|
def test_show_storage_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
self.post_json('%s' % self._get_path(), self.profile)
|
self.post_json('%s' % self._get_path(), self.profile)
|
||||||
list_data = self.get_json('%s' % self._get_path())
|
list_data = self.get_json('%s' % self._get_path())
|
||||||
profile_uuid = list_data['iprofiles'][0]['uuid']
|
profile_uuid = list_data['iprofiles'][0]['uuid']
|
||||||
@ -272,7 +272,7 @@ class ProfileListTestCase(ProfileTestCase):
|
|||||||
|
|
||||||
def test_list_storage_success(self):
|
def test_list_storage_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
self.profile["profiletype"] = constants.PROFILE_TYPE_STORAGE
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
post_response = self.post_json('%s' % self._get_path(), self.profile)
|
post_response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
list_data = self.get_json('%s' % self._get_path())
|
list_data = self.get_json('%s' % self._get_path())
|
||||||
self.assertEqual(post_response.json['uuid'],
|
self.assertEqual(post_response.json['uuid'],
|
||||||
@ -296,7 +296,7 @@ class ProfileApplyTestCase(ProfileTestCase):
|
|||||||
self.assertEqual(http_client.OK, result.status_int)
|
self.assertEqual(http_client.OK, result.status_int)
|
||||||
|
|
||||||
hostcpu_r = self.get_json(
|
hostcpu_r = self.get_json(
|
||||||
'/ihosts/%s/icpus' % self.compute.uuid)
|
'/ihosts/%s/icpus' % self.worker.uuid)
|
||||||
profile_r = self.get_json(
|
profile_r = self.get_json(
|
||||||
'%s/icpus' % self._get_path(profile_uuid))
|
'%s/icpus' % self._get_path(profile_uuid))
|
||||||
self.assertEqual(hostcpu_r['icpus'][0]['allocated_function'],
|
self.assertEqual(hostcpu_r['icpus'][0]['allocated_function'],
|
||||||
@ -306,20 +306,20 @@ class ProfileApplyTestCase(ProfileTestCase):
|
|||||||
def test_apply_memory_success(self, mock_is_virtual):
|
def test_apply_memory_success(self, mock_is_virtual):
|
||||||
mock_is_virtual.return_value = True
|
mock_is_virtual.return_value = True
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY
|
self.profile["profiletype"] = constants.PROFILE_TYPE_MEMORY
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
response = self.post_json('%s' % self._get_path(), self.profile)
|
response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
self.assertEqual(http_client.OK, response.status_int)
|
self.assertEqual(http_client.OK, response.status_int)
|
||||||
|
|
||||||
list_data = self.get_json('%s' % self._get_path())
|
list_data = self.get_json('%s' % self._get_path())
|
||||||
profile_uuid = list_data['iprofiles'][0]['uuid']
|
profile_uuid = list_data['iprofiles'][0]['uuid']
|
||||||
result = self.patch_dict_json('/ihosts/%s' % self.compute.id,
|
result = self.patch_dict_json('/ihosts/%s' % self.worker.id,
|
||||||
headers=HEADER,
|
headers=HEADER,
|
||||||
action=constants.APPLY_PROFILE_ACTION,
|
action=constants.APPLY_PROFILE_ACTION,
|
||||||
iprofile_uuid=profile_uuid)
|
iprofile_uuid=profile_uuid)
|
||||||
self.assertEqual(http_client.OK, result.status_int)
|
self.assertEqual(http_client.OK, result.status_int)
|
||||||
|
|
||||||
hostmem_r = self.get_json(
|
hostmem_r = self.get_json(
|
||||||
'/ihosts/%s/imemorys' % self.compute.uuid)
|
'/ihosts/%s/imemorys' % self.worker.uuid)
|
||||||
profile_r = self.get_json(
|
profile_r = self.get_json(
|
||||||
'%s/imemorys' % self._get_path(profile_uuid))
|
'%s/imemorys' % self._get_path(profile_uuid))
|
||||||
self.assertEqual(hostmem_r['imemorys'][0]['platform_reserved_mib'],
|
self.assertEqual(hostmem_r['imemorys'][0]['platform_reserved_mib'],
|
||||||
@ -331,7 +331,7 @@ class ProfileApplyTestCase(ProfileTestCase):
|
|||||||
|
|
||||||
def test_apply_storage_success(self):
|
def test_apply_storage_success(self):
|
||||||
self.profile["profiletype"] = constants.PROFILE_TYPE_LOCAL_STORAGE
|
self.profile["profiletype"] = constants.PROFILE_TYPE_LOCAL_STORAGE
|
||||||
self.profile["ihost_uuid"] = self.compute.uuid
|
self.profile["ihost_uuid"] = self.worker.uuid
|
||||||
response = self.post_json('%s' % self._get_path(), self.profile)
|
response = self.post_json('%s' % self._get_path(), self.profile)
|
||||||
self.assertEqual(http_client.OK, response.status_int)
|
self.assertEqual(http_client.OK, response.status_int)
|
||||||
|
|
||||||
@ -346,21 +346,21 @@ class ProfileApplyTestCase(ProfileTestCase):
|
|||||||
self.delete('/ilvgs/%s' % self.lvg.uuid)
|
self.delete('/ilvgs/%s' % self.lvg.uuid)
|
||||||
|
|
||||||
# Apply storage profile
|
# Apply storage profile
|
||||||
result = self.patch_dict_json('/ihosts/%s' % self.compute.id,
|
result = self.patch_dict_json('/ihosts/%s' % self.worker.id,
|
||||||
headers=HEADER,
|
headers=HEADER,
|
||||||
action=constants.APPLY_PROFILE_ACTION,
|
action=constants.APPLY_PROFILE_ACTION,
|
||||||
iprofile_uuid=profile_uuid)
|
iprofile_uuid=profile_uuid)
|
||||||
self.assertEqual(http_client.OK, result.status_int)
|
self.assertEqual(http_client.OK, result.status_int)
|
||||||
|
|
||||||
hostdisk_r = self.get_json(
|
hostdisk_r = self.get_json(
|
||||||
'/ihosts/%s/idisks' % self.compute.uuid)
|
'/ihosts/%s/idisks' % self.worker.uuid)
|
||||||
profile_r = self.get_json(
|
profile_r = self.get_json(
|
||||||
'%s/idisks' % self._get_path(profile_uuid))
|
'%s/idisks' % self._get_path(profile_uuid))
|
||||||
self.assertEqual(hostdisk_r['idisks'][0]['device_path'],
|
self.assertEqual(hostdisk_r['idisks'][0]['device_path'],
|
||||||
profile_r['idisks'][0]['device_path'])
|
profile_r['idisks'][0]['device_path'])
|
||||||
|
|
||||||
hostpv_r = self.get_json(
|
hostpv_r = self.get_json(
|
||||||
'/ihosts/%s/ipvs' % self.compute.uuid)
|
'/ihosts/%s/ipvs' % self.worker.uuid)
|
||||||
profile_r = self.get_json(
|
profile_r = self.get_json(
|
||||||
'%s/ipvs' % self._get_path(profile_uuid))
|
'%s/ipvs' % self._get_path(profile_uuid))
|
||||||
self.assertEqual(hostpv_r['ipvs'][1]['pv_type'],
|
self.assertEqual(hostpv_r['ipvs'][1]['pv_type'],
|
||||||
@ -370,7 +370,7 @@ class ProfileApplyTestCase(ProfileTestCase):
|
|||||||
profile_r['ipvs'][0]['lvm_pv_name'])
|
profile_r['ipvs'][0]['lvm_pv_name'])
|
||||||
|
|
||||||
hostlvg_r = self.get_json(
|
hostlvg_r = self.get_json(
|
||||||
'/ihosts/%s/ilvgs' % self.compute.uuid)
|
'/ihosts/%s/ilvgs' % self.worker.uuid)
|
||||||
profile_r = self.get_json(
|
profile_r = self.get_json(
|
||||||
'%s/ilvgs' % self._get_path(profile_uuid))
|
'%s/ilvgs' % self._get_path(profile_uuid))
|
||||||
self.assertEqual(hostlvg_r['ilvgs'][0]['lvm_vg_name'],
|
self.assertEqual(hostlvg_r['ilvgs'][0]['lvm_vg_name'],
|
||||||
|
@ -101,7 +101,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
'mgmt_ip': '1.2.3.4',
|
'mgmt_ip': '1.2.3.4',
|
||||||
'hostname': 'newhost',
|
'hostname': 'newhost',
|
||||||
'invprovision': 'unprovisioned',
|
'invprovision': 'unprovisioned',
|
||||||
'personality': 'compute',
|
'personality': 'worker',
|
||||||
'administrative': 'locked',
|
'administrative': 'locked',
|
||||||
'operational': 'disabled',
|
'operational': 'disabled',
|
||||||
'availability': 'not-installed',
|
'availability': 'not-installed',
|
||||||
@ -126,7 +126,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
ihost['mgmt_ip'] = '1.2.3.4'
|
ihost['mgmt_ip'] = '1.2.3.4'
|
||||||
ihost['hostname'] = 'newhost'
|
ihost['hostname'] = 'newhost'
|
||||||
ihost['invprovision'] = 'unprovisioned'
|
ihost['invprovision'] = 'unprovisioned'
|
||||||
ihost['personality'] = 'compute'
|
ihost['personality'] = 'worker'
|
||||||
ihost['administrative'] = 'locked'
|
ihost['administrative'] = 'locked'
|
||||||
ihost['operational'] = 'disabled'
|
ihost['operational'] = 'disabled'
|
||||||
ihost['availability'] = 'not-installed'
|
ihost['availability'] = 'not-installed'
|
||||||
@ -142,7 +142,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
self.assertEqual(res['mgmt_ip'], '1.2.3.4')
|
self.assertEqual(res['mgmt_ip'], '1.2.3.4')
|
||||||
self.assertEqual(res['hostname'], 'newhost')
|
self.assertEqual(res['hostname'], 'newhost')
|
||||||
self.assertEqual(res['invprovision'], 'unprovisioned')
|
self.assertEqual(res['invprovision'], 'unprovisioned')
|
||||||
self.assertEqual(res['personality'], 'compute')
|
self.assertEqual(res['personality'], 'worker')
|
||||||
self.assertEqual(res['administrative'], 'locked')
|
self.assertEqual(res['administrative'], 'locked')
|
||||||
self.assertEqual(res['operational'], 'disabled')
|
self.assertEqual(res['operational'], 'disabled')
|
||||||
self.assertEqual(res['availability'], 'not-installed')
|
self.assertEqual(res['availability'], 'not-installed')
|
||||||
@ -179,7 +179,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
# IOError: [Errno 13] Permission denied: '/tmp/dnsmasq.hosts'
|
# IOError: [Errno 13] Permission denied: '/tmp/dnsmasq.hosts'
|
||||||
self.skipTest("Skipping to prevent failure notification on Jenkins")
|
self.skipTest("Skipping to prevent failure notification on Jenkins")
|
||||||
with open(self.dnsmasq_hosts_file, 'w') as f:
|
with open(self.dnsmasq_hosts_file, 'w') as f:
|
||||||
f.write("dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n")
|
f.write("dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n")
|
||||||
|
|
||||||
ihost = self._create_test_ihost()
|
ihost = self._create_test_ihost()
|
||||||
|
|
||||||
@ -187,7 +187,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
ihost['mgmt_ip'] = '1.2.3.4'
|
ihost['mgmt_ip'] = '1.2.3.4'
|
||||||
ihost['hostname'] = 'newhost'
|
ihost['hostname'] = 'newhost'
|
||||||
ihost['invprovision'] = 'unprovisioned'
|
ihost['invprovision'] = 'unprovisioned'
|
||||||
ihost['personality'] = 'compute'
|
ihost['personality'] = 'worker'
|
||||||
ihost['administrative'] = 'locked'
|
ihost['administrative'] = 'locked'
|
||||||
ihost['operational'] = 'disabled'
|
ihost['operational'] = 'disabled'
|
||||||
ihost['availability'] = 'not-installed'
|
ihost['availability'] = 'not-installed'
|
||||||
@ -202,7 +202,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
with open(self.dnsmasq_hosts_file, 'r') as f:
|
with open(self.dnsmasq_hosts_file, 'r') as f:
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
f.readline(),
|
f.readline(),
|
||||||
"dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n")
|
"dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
f.readline(),
|
f.readline(),
|
||||||
"dhcp-host=00:11:22:33:44:55,newhost,1.2.3.4,2h\n")
|
"dhcp-host=00:11:22:33:44:55,newhost,1.2.3.4,2h\n")
|
||||||
@ -215,7 +215,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
self.skipTest("Skipping to prevent failure notification on Jenkins")
|
self.skipTest("Skipping to prevent failure notification on Jenkins")
|
||||||
with open(self.dnsmasq_hosts_file, 'w') as f:
|
with open(self.dnsmasq_hosts_file, 'w') as f:
|
||||||
f.write("dhcp-host=00:11:22:33:44:55,oldhost,1.2.3.4,2h\n")
|
f.write("dhcp-host=00:11:22:33:44:55,oldhost,1.2.3.4,2h\n")
|
||||||
f.write("dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n")
|
f.write("dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n")
|
||||||
|
|
||||||
ihost = self._create_test_ihost()
|
ihost = self._create_test_ihost()
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
ihost['mgmt_ip'] = '1.2.3.42'
|
ihost['mgmt_ip'] = '1.2.3.42'
|
||||||
ihost['hostname'] = 'newhost'
|
ihost['hostname'] = 'newhost'
|
||||||
ihost['invprovision'] = 'unprovisioned'
|
ihost['invprovision'] = 'unprovisioned'
|
||||||
ihost['personality'] = 'compute'
|
ihost['personality'] = 'worker'
|
||||||
ihost['administrative'] = 'locked'
|
ihost['administrative'] = 'locked'
|
||||||
ihost['operational'] = 'disabled'
|
ihost['operational'] = 'disabled'
|
||||||
ihost['availability'] = 'not-installed'
|
ihost['availability'] = 'not-installed'
|
||||||
@ -241,7 +241,7 @@ class ManagerTestCase(base.DbTestCase):
|
|||||||
"dhcp-host=00:11:22:33:44:55,newhost,1.2.3.42,2h\n")
|
"dhcp-host=00:11:22:33:44:55,newhost,1.2.3.42,2h\n")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
f.readline(),
|
f.readline(),
|
||||||
"dhcp-host=08:00:27:0a:fa:fa,compute-1,192.168.204.25,2h\n")
|
"dhcp-host=08:00:27:0a:fa:fa,worker-1,192.168.204.25,2h\n")
|
||||||
|
|
||||||
def test_configure_ihost_no_hostname(self):
|
def test_configure_ihost_no_hostname(self):
|
||||||
# Test skipped to prevent error message in Jenkins. Error thrown is:
|
# Test skipped to prevent error message in Jenkins. Error thrown is:
|
||||||
|
@ -94,4 +94,4 @@ class RPCAPITestCase(base.DbTestCase):
|
|||||||
self._test_rpcapi('configure_ihost',
|
self._test_rpcapi('configure_ihost',
|
||||||
'call',
|
'call',
|
||||||
host=self.fake_ihost,
|
host=self.fake_ihost,
|
||||||
do_compute_apply=False)
|
do_worker_apply=False)
|
||||||
|
@ -392,12 +392,12 @@
|
|||||||
|
|
||||||
200.012:
|
200.012:
|
||||||
Type: Alarm
|
Type: Alarm
|
||||||
Description: <hostname> controller function has in-service failure while compute services remain healthy.
|
Description: <hostname> controller function has in-service failure while worker services remain healthy.
|
||||||
Entity_Instance_ID: host=<hostname>
|
Entity_Instance_ID: host=<hostname>
|
||||||
Severity: major
|
Severity: major
|
||||||
Proposed_Repair_Action: |-
|
Proposed_Repair_Action: |-
|
||||||
Lock and then Unlock host to recover.
|
Lock and then Unlock host to recover.
|
||||||
Avoid using 'Force Lock' action as that will impact compute services running on this host,
|
Avoid using 'Force Lock' action as that will impact worker services running on this host,
|
||||||
If lock action fails then contact next level of support to investigate and recover.
|
If lock action fails then contact next level of support to investigate and recover.
|
||||||
Maintenance_Action: "degrade - requires manual action"
|
Maintenance_Action: "degrade - requires manual action"
|
||||||
Inhibit_Alarms: false
|
Inhibit_Alarms: false
|
||||||
@ -408,10 +408,10 @@
|
|||||||
|
|
||||||
200.013:
|
200.013:
|
||||||
Type: Alarm
|
Type: Alarm
|
||||||
Description: <hostname> compute service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead.
|
Description: <hostname> worker service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead.
|
||||||
Entity_Instance_ID: host=<hostname>
|
Entity_Instance_ID: host=<hostname>
|
||||||
Severity: major
|
Severity: major
|
||||||
Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local compute service.
|
Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local worker service.
|
||||||
Maintenance_Action: "degrade - requires manual action"
|
Maintenance_Action: "degrade - requires manual action"
|
||||||
Inhibit_Alarms: false
|
Inhibit_Alarms: false
|
||||||
Alarm_Type: operational-violation
|
Alarm_Type: operational-violation
|
||||||
@ -674,8 +674,8 @@
|
|||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
270.001:
|
270.001:
|
||||||
Type: Alarm
|
Type: Alarm
|
||||||
Description: "Host <host_name> compute services failure[, reason = <reason_text>]"
|
Description: "Host <host_name> worker services failure[, reason = <reason_text>]"
|
||||||
Entity_Instance_ID: host=<host_name>.services=compute
|
Entity_Instance_ID: host=<host_name>.services=worker
|
||||||
Severity: critical
|
Severity: critical
|
||||||
Proposed_Repair_Action: Wait for host services recovery to complete; if problem persists contact next level of support
|
Proposed_Repair_Action: Wait for host services recovery to complete; if problem persists contact next level of support
|
||||||
Maintenance_Action:
|
Maintenance_Action:
|
||||||
@ -687,7 +687,7 @@
|
|||||||
|
|
||||||
270.101:
|
270.101:
|
||||||
Type: Log
|
Type: Log
|
||||||
Description: "Host <host_name> compute services failure[, reason = <reason_text>]"
|
Description: "Host <host_name> worker services failure[, reason = <reason_text>]"
|
||||||
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
||||||
Severity: critical
|
Severity: critical
|
||||||
Alarm_Type: equipment
|
Alarm_Type: equipment
|
||||||
@ -696,7 +696,7 @@
|
|||||||
|
|
||||||
270.102:
|
270.102:
|
||||||
Type: Log
|
Type: Log
|
||||||
Description: Host <host_name> compute services enabled
|
Description: Host <host_name> worker services enabled
|
||||||
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
||||||
Severity: critical
|
Severity: critical
|
||||||
Alarm_Type: equipment
|
Alarm_Type: equipment
|
||||||
@ -705,7 +705,7 @@
|
|||||||
|
|
||||||
270.103:
|
270.103:
|
||||||
Type: Log
|
Type: Log
|
||||||
Description: Host <host_name> compute services disabled
|
Description: Host <host_name> worker services disabled
|
||||||
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
|
||||||
Severity: critical
|
Severity: critical
|
||||||
Alarm_Type: equipment
|
Alarm_Type: equipment
|
||||||
@ -774,10 +774,10 @@
|
|||||||
|
|
||||||
300.004:
|
300.004:
|
||||||
Type: Alarm
|
Type: Alarm
|
||||||
Description: No enabled compute host with connectivity to provider network.
|
Description: No enabled worker host with connectivity to provider network.
|
||||||
Entity_Instance_ID: host=<hostname>.providernet=<pnet-uuid>
|
Entity_Instance_ID: host=<hostname>.providernet=<pnet-uuid>
|
||||||
Severity: major
|
Severity: major
|
||||||
Proposed_Repair_Action: Enable compute hosts with required provider network connectivity.
|
Proposed_Repair_Action: Enable worker hosts with required provider network connectivity.
|
||||||
Maintenance_Action:
|
Maintenance_Action:
|
||||||
Inhibit_Alarms:
|
Inhibit_Alarms:
|
||||||
Alarm_Type: operational-violation
|
Alarm_Type: operational-violation
|
||||||
|
@ -504,29 +504,29 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
self.assertEqual(index[constants.NETWORK_TYPE_OAM],
|
self.assertEqual(index[constants.NETWORK_TYPE_OAM],
|
||||||
str(self.oam_gateway_address.ip))
|
str(self.oam_gateway_address.ip))
|
||||||
|
|
||||||
def test_is_compute_subfunction_true(self):
|
def test_is_worker_subfunction_true(self):
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
self.assertTrue(interface.is_compute_subfunction(self.context))
|
self.assertTrue(interface.is_worker_subfunction(self.context))
|
||||||
|
|
||||||
def test_is_compute_subfunction_true_cpe(self):
|
def test_is_worker_subfunction_true_cpe(self):
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
self.assertTrue(interface.is_compute_subfunction(self.context))
|
self.assertTrue(interface.is_worker_subfunction(self.context))
|
||||||
|
|
||||||
def test_is_compute_subfunction_false(self):
|
def test_is_worker_subfunction_false(self):
|
||||||
self.host['personality'] = constants.STORAGE
|
self.host['personality'] = constants.STORAGE
|
||||||
self.host['subfunctions'] = constants.STORAGE
|
self.host['subfunctions'] = constants.STORAGE
|
||||||
self._update_context()
|
self._update_context()
|
||||||
self.assertFalse(interface.is_compute_subfunction(self.context))
|
self.assertFalse(interface.is_worker_subfunction(self.context))
|
||||||
|
|
||||||
def test_is_compute_subfunction_false_cpe(self):
|
def test_is_worker_subfunction_false_cpe(self):
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.CONTROLLER
|
self.host['subfunctions'] = constants.CONTROLLER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
self.assertFalse(interface.is_compute_subfunction(self.context))
|
self.assertFalse(interface.is_worker_subfunction(self.context))
|
||||||
|
|
||||||
def test_is_pci_interface_true(self):
|
def test_is_pci_interface_true(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
||||||
@ -670,10 +670,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
self.assertEqual(method, 'manual')
|
self.assertEqual(method, 'manual')
|
||||||
|
|
||||||
def test_get_interface_address_method_for_pxeboot_compute(self):
|
def test_get_interface_address_method_for_pxeboot_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PXEBOOT
|
self.iface['networktype'] = constants.NETWORK_TYPE_PXEBOOT
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
method = interface.get_interface_address_method(
|
method = interface.get_interface_address_method(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -697,10 +697,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
self.assertEqual(method, 'static')
|
self.assertEqual(method, 'static')
|
||||||
|
|
||||||
def test_get_interface_address_method_for_mgmt_compute(self):
|
def test_get_interface_address_method_for_mgmt_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_MGMT
|
self.iface['networktype'] = constants.NETWORK_TYPE_MGMT
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
method = interface.get_interface_address_method(
|
method = interface.get_interface_address_method(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -724,10 +724,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
self.assertEqual(method, 'static')
|
self.assertEqual(method, 'static')
|
||||||
|
|
||||||
def test_get_interface_address_method_for_infra_compute(self):
|
def test_get_interface_address_method_for_infra_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_INFRA
|
self.iface['networktype'] = constants.NETWORK_TYPE_INFRA
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
method = interface.get_interface_address_method(
|
method = interface.get_interface_address_method(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -889,84 +889,84 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertFalse(needed)
|
self.assertFalse(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_slow_compute(self):
|
def test_needs_interface_config_data_slow_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self.port['dpdksupport'] = False
|
self.port['dpdksupport'] = False
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_mlx4_compute(self):
|
def test_needs_interface_config_data_mlx4_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self.port['driver'] = interface.DRIVER_MLX_CX3
|
self.port['driver'] = interface.DRIVER_MLX_CX3
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_mlx5_compute(self):
|
def test_needs_interface_config_data_mlx5_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self.port['driver'] = interface.DRIVER_MLX_CX4
|
self.port['driver'] = interface.DRIVER_MLX_CX4
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_sriov_compute(self):
|
def test_needs_interface_config_sriov_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_pthru_compute(self):
|
def test_needs_interface_config_pthru_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_cpe_compute(self):
|
def test_needs_interface_config_data_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self.port['dpdksupport'] = True
|
self.port['dpdksupport'] = True
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertFalse(needed)
|
self.assertFalse(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_slow_cpe_compute(self):
|
def test_needs_interface_config_data_slow_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self.port['dpdksupport'] = False
|
self.port['dpdksupport'] = False
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_mlx4_cpe_compute(self):
|
def test_needs_interface_config_data_mlx4_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self.port['driver'] = interface.DRIVER_MLX_CX3
|
self.port['driver'] = interface.DRIVER_MLX_CX3
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_data_mlx5_cpe_compute(self):
|
def test_needs_interface_config_data_mlx5_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self.port['driver'] = interface.DRIVER_MLX_CX4
|
self.port['driver'] = interface.DRIVER_MLX_CX4
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
@ -981,20 +981,20 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertFalse(needed)
|
self.assertFalse(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_sriov_cpe_compute(self):
|
def test_needs_interface_config_sriov_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
|
|
||||||
def test_needs_interface_config_pthru_cpe_compute(self):
|
def test_needs_interface_config_pthru_cpe_worker(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
||||||
self.host['personality'] = constants.CONTROLLER
|
self.host['personality'] = constants.CONTROLLER
|
||||||
self.host['subfunctions'] = constants.COMPUTE
|
self.host['subfunctions'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
needed = interface.needs_interface_config(self.context, self.iface)
|
needed = interface.needs_interface_config(self.context, self.iface)
|
||||||
self.assertTrue(needed)
|
self.assertTrue(needed)
|
||||||
@ -1192,10 +1192,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_mgmt(self):
|
def test_get_worker_ethernet_config_mgmt(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_MGMT
|
self.iface['networktype'] = constants.NETWORK_TYPE_MGMT
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
for network in self.networks:
|
for network in self.networks:
|
||||||
if network['type'] == constants.NETWORK_TYPE_MGMT:
|
if network['type'] == constants.NETWORK_TYPE_MGMT:
|
||||||
net_id = network['id']
|
net_id = network['id']
|
||||||
@ -1213,10 +1213,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_infra(self):
|
def test_get_worker_ethernet_config_infra(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_INFRA
|
self.iface['networktype'] = constants.NETWORK_TYPE_INFRA
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
for network in self.networks:
|
for network in self.networks:
|
||||||
if network['type'] == constants.NETWORK_TYPE_INFRA:
|
if network['type'] == constants.NETWORK_TYPE_INFRA:
|
||||||
net_id = network['id']
|
net_id = network['id']
|
||||||
@ -1234,10 +1234,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_pci_sriov(self):
|
def test_get_worker_ethernet_config_pci_sriov(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
config = interface.get_interface_network_config(
|
config = interface.get_interface_network_config(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -1251,10 +1251,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_pci_pthru(self):
|
def test_get_worker_ethernet_config_pci_pthru(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_PASSTHROUGH
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_PASSTHROUGH
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
config = interface.get_interface_network_config(
|
config = interface.get_interface_network_config(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -1268,11 +1268,11 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_data_slow(self):
|
def test_get_worker_ethernet_config_data_slow(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.port['dpdksupport'] = False
|
self.port['dpdksupport'] = False
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
config = interface.get_interface_network_config(
|
config = interface.get_interface_network_config(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -1284,10 +1284,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_data_slow_as_bond_slave(self):
|
def test_get_worker_ethernet_config_data_slow_as_bond_slave(self):
|
||||||
bond = self._create_bond_test("data1", constants.INTERFACE_CLASS_DATA,
|
bond = self._create_bond_test("data1", constants.INTERFACE_CLASS_DATA,
|
||||||
constants.NETWORK_TYPE_DATA)
|
constants.NETWORK_TYPE_DATA)
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
lower_ifname = bond['uses'][0]
|
lower_ifname = bond['uses'][0]
|
||||||
lower_iface = self.context['interfaces'][lower_ifname]
|
lower_iface = self.context['interfaces'][lower_ifname]
|
||||||
@ -1305,11 +1305,11 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
print(expected)
|
print(expected)
|
||||||
self.assertEqual(expected, config)
|
self.assertEqual(expected, config)
|
||||||
|
|
||||||
def test_get_compute_ethernet_config_data_slow_bridge(self):
|
def test_get_worker_ethernet_config_data_slow_bridge(self):
|
||||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
self.iface['ifclass'] = constants.INTERFACE_CLASS_DATA
|
||||||
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
self.iface['networktype'] = constants.NETWORK_TYPE_DATA
|
||||||
self.port['dpdksupport'] = False
|
self.port['dpdksupport'] = False
|
||||||
self.host['personality'] = constants.COMPUTE
|
self.host['personality'] = constants.WORKER
|
||||||
self._update_context()
|
self._update_context()
|
||||||
avp_config, bridge_config = interface.get_bridged_network_config(
|
avp_config, bridge_config = interface.get_bridged_network_config(
|
||||||
self.context, self.iface)
|
self.context, self.iface)
|
||||||
@ -1453,10 +1453,10 @@ class InterfaceTestCase(BaseTestCase):
|
|||||||
|
|
||||||
class InterfaceHostTestCase(BaseTestCase):
|
class InterfaceHostTestCase(BaseTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Personality is set to compute to avoid issues due to missing OAM
|
# Personality is set to worker to avoid issues due to missing OAM
|
||||||
# interface in this empty/dummy configuration
|
# interface in this empty/dummy configuration
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.COMPUTE)
|
self._create_test_host(constants.WORKER)
|
||||||
|
|
||||||
def _update_context(self):
|
def _update_context(self):
|
||||||
# ensure DB entries are updated prior to updating the context which
|
# ensure DB entries are updated prior to updating the context which
|
||||||
@ -1572,7 +1572,7 @@ class InterfaceHostTestCase(BaseTestCase):
|
|||||||
def test_needs_interface_config(self):
|
def test_needs_interface_config(self):
|
||||||
expected_configured = (self.expected_platform_interfaces +
|
expected_configured = (self.expected_platform_interfaces +
|
||||||
[self.expected_bmc_interface])
|
[self.expected_bmc_interface])
|
||||||
if interface.is_compute_subfunction(self.context):
|
if interface.is_worker_subfunction(self.context):
|
||||||
expected_configured += (self.expected_pci_interfaces +
|
expected_configured += (self.expected_pci_interfaces +
|
||||||
self.expected_slow_interfaces +
|
self.expected_slow_interfaces +
|
||||||
self.expected_mlx_interfaces)
|
self.expected_mlx_interfaces)
|
||||||
@ -1670,9 +1670,9 @@ class InterfaceControllerVlanOverEthernet(InterfaceHostTestCase):
|
|||||||
class InterfaceComputeEthernet(InterfaceHostTestCase):
|
class InterfaceComputeEthernet(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are ethernet interfaces.
|
# worker and all interfaces are ethernet interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.COMPUTE)
|
self._create_test_host(constants.WORKER)
|
||||||
self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
||||||
self._create_ethernet_test('data', constants.INTERFACE_CLASS_DATA,
|
self._create_ethernet_test('data', constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1710,10 +1710,10 @@ class InterfaceComputeEthernet(InterfaceHostTestCase):
|
|||||||
class InterfaceComputeVlanOverEthernet(InterfaceHostTestCase):
|
class InterfaceComputeVlanOverEthernet(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are vlan interfaces over ethernet
|
# worker and all interfaces are vlan interfaces over ethernet
|
||||||
# interfaces.
|
# interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.COMPUTE)
|
self._create_test_host(constants.WORKER)
|
||||||
port, iface = self._create_ethernet_test(
|
port, iface = self._create_ethernet_test(
|
||||||
'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT)
|
'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT)
|
||||||
self._create_vlan_test('mgmt', None, constants.NETWORK_TYPE_MGMT, 2,
|
self._create_vlan_test('mgmt', None, constants.NETWORK_TYPE_MGMT, 2,
|
||||||
@ -1739,8 +1739,8 @@ class InterfaceComputeBond(InterfaceHostTestCase):
|
|||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
# compute and all interfaces are aggregated ethernet interfaces.
|
# worker and all interfaces are aggregated ethernet interfaces.
|
||||||
self._create_test_host(constants.COMPUTE)
|
self._create_test_host(constants.WORKER)
|
||||||
self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
||||||
self._create_bond_test('data', constants.INTERFACE_CLASS_DATA,
|
self._create_bond_test('data', constants.INTERFACE_CLASS_DATA,
|
||||||
@ -1768,10 +1768,10 @@ class InterfaceComputeBond(InterfaceHostTestCase):
|
|||||||
class InterfaceComputeVlanOverBond(InterfaceHostTestCase):
|
class InterfaceComputeVlanOverBond(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# compute and all interfaces are vlan interfaces over ethernet
|
# worker and all interfaces are vlan interfaces over ethernet
|
||||||
# interfaces.
|
# interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.COMPUTE)
|
self._create_test_host(constants.WORKER)
|
||||||
bond = self._create_bond_test('pxeboot', None,
|
bond = self._create_bond_test('pxeboot', None,
|
||||||
constants.NETWORK_TYPE_PXEBOOT)
|
constants.NETWORK_TYPE_PXEBOOT)
|
||||||
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond)
|
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond)
|
||||||
@ -1937,10 +1937,10 @@ class InterfaceCpeVlanOverBond(InterfaceHostTestCase):
|
|||||||
class InterfaceCpeComputeEthernet(InterfaceHostTestCase):
|
class InterfaceCpeComputeEthernet(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# ethernet interfaces.
|
# ethernet interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.CONTROLLER, constants.COMPUTE)
|
self._create_test_host(constants.CONTROLLER, constants.WORKER)
|
||||||
self._create_ethernet_test('oam', None, constants.NETWORK_TYPE_OAM)
|
self._create_ethernet_test('oam', None, constants.NETWORK_TYPE_OAM)
|
||||||
self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
self._create_ethernet_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
self._create_ethernet_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
||||||
@ -1979,10 +1979,10 @@ class InterfaceCpeComputeEthernet(InterfaceHostTestCase):
|
|||||||
class InterfaceCpeComputeVlanOverEthernet(InterfaceHostTestCase):
|
class InterfaceCpeComputeVlanOverEthernet(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# vlan interfaces over ethernet interfaces.
|
# vlan interfaces over ethernet interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.CONTROLLER, constants.COMPUTE)
|
self._create_test_host(constants.CONTROLLER, constants.WORKER)
|
||||||
port, iface = self._create_ethernet_test(
|
port, iface = self._create_ethernet_test(
|
||||||
'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT)
|
'pxeboot', None, constants.NETWORK_TYPE_PXEBOOT)
|
||||||
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, iface)
|
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, iface)
|
||||||
@ -2008,10 +2008,10 @@ class InterfaceCpeComputeVlanOverEthernet(InterfaceHostTestCase):
|
|||||||
class InterfaceCpeComputeBond(InterfaceHostTestCase):
|
class InterfaceCpeComputeBond(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# aggregated ethernet interfaces.
|
# aggregated ethernet interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.CONTROLLER, constants.COMPUTE)
|
self._create_test_host(constants.CONTROLLER, constants.WORKER)
|
||||||
self._create_bond_test('oam', None, constants.NETWORK_TYPE_OAM)
|
self._create_bond_test('oam', None, constants.NETWORK_TYPE_OAM)
|
||||||
self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
self._create_bond_test('mgmt', None, constants.NETWORK_TYPE_MGMT)
|
||||||
self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
self._create_bond_test('infra', None, constants.NETWORK_TYPE_INFRA)
|
||||||
@ -2038,10 +2038,10 @@ class InterfaceCpeComputeBond(InterfaceHostTestCase):
|
|||||||
class InterfaceCpeComputeVlanOverBond(InterfaceHostTestCase):
|
class InterfaceCpeComputeVlanOverBond(InterfaceHostTestCase):
|
||||||
def _setup_configuration(self):
|
def _setup_configuration(self):
|
||||||
# Setup a sample configuration where the personality is set to a
|
# Setup a sample configuration where the personality is set to a
|
||||||
# controller with a compute subfunction and all interfaces are
|
# controller with a worker subfunction and all interfaces are
|
||||||
# vlan interfaces over aggregated ethernet interfaces.
|
# vlan interfaces over aggregated ethernet interfaces.
|
||||||
self._create_test_common()
|
self._create_test_common()
|
||||||
self._create_test_host(constants.CONTROLLER, constants.COMPUTE)
|
self._create_test_host(constants.CONTROLLER, constants.WORKER)
|
||||||
bond = self._create_bond_test('pxeboot', None,
|
bond = self._create_bond_test('pxeboot', None,
|
||||||
constants.NETWORK_TYPE_PXEBOOT)
|
constants.NETWORK_TYPE_PXEBOOT)
|
||||||
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond)
|
self._create_vlan_test('oam', None, constants.NETWORK_TYPE_OAM, 1, bond)
|
||||||
|
@ -3,4 +3,4 @@
|
|||||||
.distro/centos7/rpmbuild/SRPMS
|
.distro/centos7/rpmbuild/SRPMS
|
||||||
.distro/centos7/rpmbuild/BUILD
|
.distro/centos7/rpmbuild/BUILD
|
||||||
.distro/centos7/rpmbuild/BUILDROOT
|
.distro/centos7/rpmbuild/BUILDROOT
|
||||||
.distro/centos7/rpmbuild/SOURCES/compute-huge*tar.gz
|
.distro/centos7/rpmbuild/SOURCES/worker-utils*tar.gz
|
3
worker-utils/centos/build_srpm.data
Normal file
3
worker-utils/centos/build_srpm.data
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
SRC_DIR="worker-utils"
|
||||||
|
COPY_LIST="$SRC_DIR/LICENSE"
|
||||||
|
TIS_PATCH_VER=1
|
@ -1,5 +1,5 @@
|
|||||||
Summary: Initial compute node hugepages and reserved cpus configuration
|
Summary: Initial worker node resource reservation and misc. utilities
|
||||||
Name: compute-huge
|
Name: worker-utils
|
||||||
Version: 1.0
|
Version: 1.0
|
||||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||||
License: Apache-2.0
|
License: Apache-2.0
|
||||||
@ -15,11 +15,11 @@ Requires: python
|
|||||||
Requires: /bin/systemctl
|
Requires: /bin/systemctl
|
||||||
|
|
||||||
%description
|
%description
|
||||||
Initial compute node hugepages and reserved cpus configuration
|
Initial worker node resource reservation and misc. utilities
|
||||||
|
|
||||||
%define local_bindir /usr/bin/
|
%define local_bindir /usr/bin/
|
||||||
%define local_etc_initd /etc/init.d/
|
%define local_etc_initd /etc/init.d/
|
||||||
%define local_etc_nova /etc/nova/
|
%define local_etc_platform /etc/platform/
|
||||||
%define local_etc_goenabledd /etc/goenabled.d/
|
%define local_etc_goenabledd /etc/goenabled.d/
|
||||||
|
|
||||||
%define debug_package %{nil}
|
%define debug_package %{nil}
|
||||||
@ -34,7 +34,7 @@ make
|
|||||||
make install BINDIR=%{buildroot}%{local_bindir} \
|
make install BINDIR=%{buildroot}%{local_bindir} \
|
||||||
INITDDIR=%{buildroot}%{local_etc_initd} \
|
INITDDIR=%{buildroot}%{local_etc_initd} \
|
||||||
GOENABLEDDIR=%{buildroot}%{local_etc_goenabledd} \
|
GOENABLEDDIR=%{buildroot}%{local_etc_goenabledd} \
|
||||||
NOVACONFDIR=%{buildroot}%{local_etc_nova} \
|
PLATFORMCONFDIR=%{buildroot}%{local_etc_platform} \
|
||||||
SYSTEMDDIR=%{buildroot}%{_unitdir}
|
SYSTEMDDIR=%{buildroot}%{_unitdir}
|
||||||
|
|
||||||
%post
|
%post
|
||||||
@ -50,6 +50,6 @@ rm -rf $RPM_BUILD_ROOT
|
|||||||
%{local_bindir}/*
|
%{local_bindir}/*
|
||||||
%{local_etc_initd}/*
|
%{local_etc_initd}/*
|
||||||
%{local_etc_goenabledd}/*
|
%{local_etc_goenabledd}/*
|
||||||
%config(noreplace) %{local_etc_nova}/compute_reserved.conf
|
%config(noreplace) %{local_etc_platform}/worker_reserved.conf
|
||||||
|
|
||||||
%{_unitdir}/affine-platform.sh.service
|
%{_unitdir}/affine-platform.sh.service
|
@ -5,7 +5,7 @@
|
|||||||
BINDIR ?= /usr/bin
|
BINDIR ?= /usr/bin
|
||||||
INITDDIR ?= /etc/init.d/
|
INITDDIR ?= /etc/init.d/
|
||||||
GOENABLEDDIR ?= /etc/goenabled.d/
|
GOENABLEDDIR ?= /etc/goenabled.d/
|
||||||
NOVACONFDIR ?= /etc/nova
|
PLATFORMCONFDIR ?= /etc/platform
|
||||||
SYSTEMDDIR ?= /usr/lib/systemd/system/
|
SYSTEMDDIR ?= /usr/lib/systemd/system/
|
||||||
|
|
||||||
all:
|
all:
|
||||||
@ -15,7 +15,7 @@ install:
|
|||||||
install -d -m 755 $(BINDIR)
|
install -d -m 755 $(BINDIR)
|
||||||
install -d -m 755 $(INITDDIR)
|
install -d -m 755 $(INITDDIR)
|
||||||
install -d -m 755 $(GOENABLEDDIR)
|
install -d -m 755 $(GOENABLEDDIR)
|
||||||
install -d -m 755 $(NOVACONFDIR)
|
install -d -m 755 $(PLATFORMCONFDIR)
|
||||||
install -d -m 755 $(SYSTEMDDIR)
|
install -d -m 755 $(SYSTEMDDIR)
|
||||||
install -p -D -m 755 affine-platform.sh $(INITDDIR)/affine-platform.sh
|
install -p -D -m 755 affine-platform.sh $(INITDDIR)/affine-platform.sh
|
||||||
install -p -D -m 755 cpumap_functions.sh $(INITDDIR)/cpumap_functions.sh
|
install -p -D -m 755 cpumap_functions.sh $(INITDDIR)/cpumap_functions.sh
|
||||||
@ -26,6 +26,6 @@ install:
|
|||||||
install -p -D -m 755 affine-interrupts.sh $(BINDIR)/affine-interrupts.sh
|
install -p -D -m 755 affine-interrupts.sh $(BINDIR)/affine-interrupts.sh
|
||||||
install -p -D -m 755 set-cpu-wakeup-latency.sh $(BINDIR)/set-cpu-wakeup-latency.sh
|
install -p -D -m 755 set-cpu-wakeup-latency.sh $(BINDIR)/set-cpu-wakeup-latency.sh
|
||||||
install -p -D -m 755 topology $(BINDIR)/topology
|
install -p -D -m 755 topology $(BINDIR)/topology
|
||||||
install -p -D -m 755 compute_reserved.conf $(NOVACONFDIR)/compute_reserved.conf
|
install -p -D -m 755 worker_reserved.conf $(PLATFORMCONFDIR)/worker_reserved.conf
|
||||||
install -p -D -m 755 compute-huge-goenabled.sh $(GOENABLEDDIR)/compute-huge-goenabled.sh
|
install -p -D -m 755 worker-goenabled.sh $(GOENABLEDDIR)/worker-goenabled.sh
|
||||||
install -p -D -m 664 affine-platform.sh.service $(SYSTEMDDIR)/affine-platform.sh.service
|
install -p -D -m 664 affine-platform.sh.service $(SYSTEMDDIR)/affine-platform.sh.service
|
@ -64,7 +64,7 @@ function affine_tasks {
|
|||||||
for i in ${irqs[@]}; do
|
for i in ${irqs[@]}; do
|
||||||
/bin/bash -c "[[ -e /proc/irq/${i} ]] && echo ${CPULIST} > /proc/irq/${i}/smp_affinity_list" 2>/dev/null
|
/bin/bash -c "[[ -e /proc/irq/${i} ]] && echo ${CPULIST} > /proc/irq/${i}/smp_affinity_list" 2>/dev/null
|
||||||
done
|
done
|
||||||
if [[ "$subfunction" == *"compute,lowlatency" ]]; then
|
if [[ "$subfunction" == *"worker,lowlatency" ]]; then
|
||||||
# Affine work queues to platform cores
|
# Affine work queues to platform cores
|
||||||
echo ${PLATFORM_COREMASK} > /sys/devices/virtual/workqueue/cpumask
|
echo ${PLATFORM_COREMASK} > /sys/devices/virtual/workqueue/cpumask
|
||||||
echo ${PLATFORM_COREMASK} > /sys/bus/workqueue/devices/writeback/cpumask
|
echo ${PLATFORM_COREMASK} > /sys/bus/workqueue/devices/writeback/cpumask
|
@ -1,7 +1,7 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Titanium Cloud Affine Platform
|
Description=Titanium Cloud Affine Platform
|
||||||
After=syslog.service network.service dbus.service sw-patch.service
|
After=syslog.service network.service dbus.service sw-patch.service
|
||||||
Before=computeconfig.service
|
Before=workerconfig.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
################################################################################
|
################################################################################
|
||||||
# Copyright (c) 2013-2015 Wind River Systems, Inc.
|
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -239,8 +239,8 @@ function any_in_list {
|
|||||||
function get_platform_cpu_list {
|
function get_platform_cpu_list {
|
||||||
## Define platform cpulist based on engineering a number of cores and
|
## Define platform cpulist based on engineering a number of cores and
|
||||||
## whether this is a combo or not, and include SMT siblings.
|
## whether this is a combo or not, and include SMT siblings.
|
||||||
if [[ $subfunction = *compute* ]]; then
|
if [[ $subfunction = *worker* ]]; then
|
||||||
RESERVE_CONF="/etc/nova/compute_reserved.conf"
|
RESERVE_CONF="/etc/platform/worker_reserved.conf"
|
||||||
[[ -e ${RESERVE_CONF} ]] && source ${RESERVE_CONF}
|
[[ -e ${RESERVE_CONF} ]] && source ${RESERVE_CONF}
|
||||||
if [ -n "$PLATFORM_CPU_LIST" ];then
|
if [ -n "$PLATFORM_CPU_LIST" ];then
|
||||||
echo "$PLATFORM_CPU_LIST"
|
echo "$PLATFORM_CPU_LIST"
|
||||||
@ -265,7 +265,7 @@ function get_platform_cpu_list {
|
|||||||
function get_vswitch_cpu_list {
|
function get_vswitch_cpu_list {
|
||||||
## Define default avp cpulist based on engineered number of platform cores,
|
## Define default avp cpulist based on engineered number of platform cores,
|
||||||
## engineered avp cores, and include SMT siblings.
|
## engineered avp cores, and include SMT siblings.
|
||||||
if [[ $subfunction = *compute* ]]; then
|
if [[ $subfunction = *worker* ]]; then
|
||||||
VSWITCH_CONF="/etc/vswitch/vswitch.conf"
|
VSWITCH_CONF="/etc/vswitch/vswitch.conf"
|
||||||
[[ -e ${VSWITCH_CONF} ]] && source ${VSWITCH_CONF}
|
[[ -e ${VSWITCH_CONF} ]] && source ${VSWITCH_CONF}
|
||||||
if [ -n "$VSWITCH_CPU_LIST" ];then
|
if [ -n "$VSWITCH_CPU_LIST" ];then
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user