Merge remote-tracking branch 'starlingx/master' into f/centos8

Change-Id: I3d182a19798182a62382921b45f84b75bb70f628
Signed-off-by: Saul Wold <sgw@linux.intel.com>
This commit is contained in:
Saul Wold 2020-02-04 13:15:59 -08:00
commit 8ac6ec70cb
194 changed files with 5590 additions and 17777 deletions

View File

@ -8,28 +8,28 @@
jobs: jobs:
- openstack-tox-linters - openstack-tox-linters
- sysinv-tox-py27 - sysinv-tox-py27
- sysinv-tox-py35 - sysinv-tox-py36
- sysinv-tox-flake8 - sysinv-tox-flake8
- sysinv-tox-pylint - sysinv-tox-pylint
- sysinv-tox-bandit - sysinv-tox-bandit
- controllerconfig-tox-flake8 - controllerconfig-tox-flake8
- controllerconfig-tox-py27
- controllerconfig-tox-pylint - controllerconfig-tox-pylint
- cgtsclient-tox-py27 - cgtsclient-tox-py27
- cgtsclient-tox-py36
- cgtsclient-tox-pep8 - cgtsclient-tox-pep8
- cgtsclient-tox-pylint - cgtsclient-tox-pylint
gate: gate:
jobs: jobs:
- openstack-tox-linters - openstack-tox-linters
- sysinv-tox-py27 - sysinv-tox-py27
- sysinv-tox-py35 - sysinv-tox-py36
- sysinv-tox-flake8 - sysinv-tox-flake8
- sysinv-tox-pylint - sysinv-tox-pylint
- sysinv-tox-bandit - sysinv-tox-bandit
- controllerconfig-tox-flake8 - controllerconfig-tox-flake8
- controllerconfig-tox-py27
- controllerconfig-tox-pylint - controllerconfig-tox-pylint
- cgtsclient-tox-py27 - cgtsclient-tox-py27
- cgtsclient-tox-py36
- cgtsclient-tox-pep8 - cgtsclient-tox-pep8
- cgtsclient-tox-pylint - cgtsclient-tox-pylint
@ -50,11 +50,11 @@
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
- job: - job:
name: sysinv-tox-py35 name: sysinv-tox-py36
parent: tox parent: tox
description: | description: |
Run py35 test for sysinv Run py36 test for sysinv
nodeset: ubuntu-xenial nodeset: ubuntu-bionic
required-projects: required-projects:
- starlingx/fault - starlingx/fault
- starlingx/update - starlingx/update
@ -62,7 +62,7 @@
files: files:
- sysinv/sysinv/* - sysinv/sysinv/*
vars: vars:
tox_envlist: py35 tox_envlist: py36
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
- job: - job:
@ -112,18 +112,6 @@
tox_envlist: flake8 tox_envlist: flake8
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
- job:
name: controllerconfig-tox-py27
parent: tox
description: Run py27 tests for controllerconfig
required-projects:
- starlingx/fault
files:
- controllerconfig/*
vars:
tox_envlist: py27
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
- job: - job:
name: controllerconfig-tox-pylint name: controllerconfig-tox-pylint
parent: tox parent: tox
@ -171,6 +159,19 @@
tox_envlist: py27 tox_envlist: py27
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini
- job:
name: cgtsclient-tox-py36
parent: tox
description: |
Run py36 test for cgts-client
nodeset: ubuntu-bionic
files:
- sysinv/cgts-client/*
vars:
tox_envlist: py36
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini
- job: - job:
name: cgtsclient-tox-pep8 name: cgtsclient-tox-pep8
parent: tox parent: tox

View File

@ -51,7 +51,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
repository_name = 'openstack/stx-config' repository_name = 'openstack/stx-config'
project = u'stx-config' project = u'StarlingX Configuration'
bug_project = 'starlingx' bug_project = 'starlingx'
bug_tag = 'stx.config' bug_tag = 'stx.config'

View File

@ -1,12 +1,13 @@
======================== ===========================
stx-config API Reference Configuration API Reference
======================== ===========================
Use the StarlingX stx-config API for system configuration management. Use the StarlingX Configuration API for system configuration management.
stx-config API content can be searched using the :ref:`search page <search>`. Search Configuration API content using the :ref:`search page <search>`.
API Reference -------------
API reference
------------- -------------
.. toctree:: .. toctree::

View File

@ -1,2 +1,2 @@
SRC_DIR="controllerconfig" SRC_DIR="controllerconfig"
TIS_PATCH_VER=151 TIS_PATCH_VER=152

View File

@ -57,10 +57,7 @@ mkdir -p $RPM_BUILD_ROOT/wheels
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
install -d -m 755 %{buildroot}%{local_bindir} install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh
install -d -m 755 %{buildroot}%{local_goenabledd} install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@ -74,13 +71,12 @@ install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
install -d -m 755 %{buildroot}%{local_etc_systemd} install -d -m 755 %{buildroot}%{local_etc_systemd}
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service
%post %post
systemctl enable controllerconfig.service systemctl enable controllerconfig.service
%clean %clean
rm -rf $RPM_BUILD_ROOT rm -rf $RPM_BUILD_ROOT
%files %files
%defattr(-,root,root,-) %defattr(-,root,root,-)

View File

@ -1,34 +1,10 @@
# #
# Copyright (c) 2015-2019 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
from controllerconfig.common.validator import validate # noqa: F401
from controllerconfig.common.configobjects import Network # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_CONFIG # noqa: F401
from controllerconfig.common.configobjects import REGION_CONFIG # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_NAMES # noqa: F401
from controllerconfig.common.configobjects import HP_NAMES # noqa: F401
from controllerconfig.common.configobjects import SUBCLOUD_CONFIG # noqa: F401
from controllerconfig.common.configobjects import MGMT_TYPE # noqa: F401
from controllerconfig.common.configobjects import INFRA_TYPE # noqa: F401
from controllerconfig.common.configobjects import OAM_TYPE # noqa: F401
from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES # noqa: F401
from controllerconfig.common.configobjects import HOST_XML_ATTRIBUTES # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME # noqa: F401
from controllerconfig.common.exceptions import ConfigError # noqa: F401 from controllerconfig.common.exceptions import ConfigError # noqa: F401
from controllerconfig.common.exceptions import ConfigFail # noqa: F401
from controllerconfig.common.exceptions import ValidateFail # noqa: F401 from controllerconfig.common.exceptions import ValidateFail # noqa: F401
from controllerconfig.utils import is_valid_vlan # noqa: F401
from controllerconfig.utils import is_mtu_valid # noqa: F401
from controllerconfig.utils import validate_network_str # noqa: F401 from controllerconfig.utils import validate_network_str # noqa: F401
from controllerconfig.utils import validate_address_str # noqa: F401 from controllerconfig.utils import validate_address_str # noqa: F401
from controllerconfig.utils import validate_address # noqa: F401
from controllerconfig.utils import is_valid_url # noqa: F401
from controllerconfig.utils import is_valid_domain_or_ip # noqa: F401
from controllerconfig.utils import ip_version_to_string # noqa: F401
from controllerconfig.utils import lag_mode_to_str # noqa: F401
from controllerconfig.utils import validate_openstack_password # noqa: F401
from controllerconfig.utils import validate_nameserver_address_str # noqa: F401
from controllerconfig.utils import extract_openstack_password_rules_from_file # noqa: F401

View File

@ -1,712 +0,0 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Clone a Configured System and Install the image on another
identical hardware or the same hardware.
"""
from __future__ import print_function
import os
import re
import glob
import time
import shutil
import netaddr
import tempfile
import fileinput
import subprocess
from controllerconfig.common import constants
from sysinv.common import constants as si_const
from controllerconfig import sysinv_api
import tsconfig.tsconfig as tsconfig
from controllerconfig.common import log
from controllerconfig.common.exceptions import CloneFail
from controllerconfig.common.exceptions import BackupFail
from controllerconfig import utils
from controllerconfig import backup_restore
DEBUG = False
LOG = log.get_logger(__name__)
DEVNULL = open(os.devnull, 'w')
CLONE_ARCHIVE_DIR = "clone-archive"
CLONE_ISO_INI = ".cloneiso.ini"
NAME = "name"
INSTALLED = "installed_at"
RESULT = "result"
IN_PROGRESS = "in-progress"
FAIL = "failed"
OK = "ok"
def clone_status():
""" Check status of last install-clone. """
INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI)
INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI)
name = "unknown"
result = "unknown"
installed_at = "unknown time"
for ini_file in [INI_FILE1, INI_FILE2]:
if os.path.exists(ini_file):
with open(ini_file) as f:
s = f.read()
for line in s.split("\n"):
if line.startswith(NAME):
name = line.split("=")[1].strip()
elif line.startswith(RESULT):
result = line.split("=")[1].strip()
elif line.startswith(INSTALLED):
installed_at = line.split("=")[1].strip()
break # one file was found, skip the other file
if result != "unknown":
if result == OK:
print("\nInstallation of cloned image [{}] was successful at {}\n"
.format(name, installed_at))
elif result == FAIL:
print("\nInstallation of cloned image [{}] failed at {}\n"
.format(name, installed_at))
else:
print("\ninstall-clone is in progress.\n")
else:
print("\nCloned image is not installed on this node.\n")
def check_size(archive_dir):
""" Check if there is enough space to create iso. """
overhead_bytes = 1024 ** 3 # extra GB for staging directory
# Size of the cloned iso is directly proportional to the
# installed package repository (note that patches are a part of
# the system archive size below).
# 1G overhead size added (above) will accomodate the temporary
# workspace (updating system archive etc) needed to create the iso.
feed_dir = os.path.join('/www', 'pages', 'feed',
'rel-' + tsconfig.SW_VERSION)
overhead_bytes += backup_restore.backup_std_dir_size(feed_dir)
clone_size = (
overhead_bytes +
backup_restore.backup_etc_size() +
backup_restore.backup_config_size(tsconfig.CONFIG_PATH) +
backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) +
backup_restore.backup_keyring_size(backup_restore.keyring_permdir) +
backup_restore.backup_ldap_size() +
backup_restore.backup_postgres_size() +
backup_restore.backup_std_dir_size(backup_restore.home_permdir) +
backup_restore.backup_std_dir_size(backup_restore.patching_permdir) +
backup_restore.backup_std_dir_size(
backup_restore.patching_repo_permdir) +
backup_restore.backup_std_dir_size(backup_restore.extension_permdir) +
backup_restore.backup_std_dir_size(
backup_restore.patch_vault_permdir) +
backup_restore.backup_armada_manifest_size(
constants.ARMADA_PERMDIR) +
backup_restore.backup_std_dir_size(
constants.HELM_CHARTS_PERMDIR) +
backup_restore.backup_mariadb_size())
archive_dir_free_space = \
utils.filesystem_get_free_space(archive_dir)
if clone_size > archive_dir_free_space:
print("\nArchive directory (%s) does not have enough free "
"space (%s), estimated size to create image is %s." %
(archive_dir,
utils.print_bytes(archive_dir_free_space),
utils.print_bytes(clone_size)))
raise CloneFail("Not enough free space.\n")
def update_bootloader_default(bl_file, host):
""" Update bootloader files for cloned image """
if not os.path.exists(bl_file):
LOG.error("{} does not exist".format(bl_file))
raise CloneFail("{} does not exist".format(os.path.basename(bl_file)))
# Tags should be in sync with common-bsp/files/centos.syslinux.cfg
# and common-bsp/files/grub.cfg
STANDARD_STANDARD = '0'
STANDARD_EXTENDED = 'S0'
AIO_STANDARD = '2'
AIO_EXTENDED = 'S2'
AIO_LL_STANDARD = '4'
AIO_LL_EXTENDED = 'S4'
if "grub.cfg" in bl_file:
STANDARD_STANDARD = 'standard>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
STANDARD_EXTENDED = 'standard>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
AIO_STANDARD = 'aio>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
AIO_EXTENDED = 'aio>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
AIO_LL_STANDARD = 'aio-lowlat>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
SUBMENUITEM_TBOOT = 'tboot'
SUBMENUITEM_SECUREBOOT = 'secureboot'
timeout_line = None
default_line = None
default_label_num = STANDARD_STANDARD
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
default_label_num = AIO_LL_STANDARD
else:
default_label_num = AIO_STANDARD
if (tsconfig.security_profile ==
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED):
default_label_num = STANDARD_EXTENDED
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
default_label_num = AIO_LL_EXTENDED
else:
default_label_num = AIO_EXTENDED
if "grub.cfg" in bl_file:
if host.tboot is not None:
if host.tboot == "true":
default_label_num = default_label_num + '>' + \
SUBMENUITEM_TBOOT
else:
default_label_num = default_label_num + '>' + \
SUBMENUITEM_SECUREBOOT
try:
with open(bl_file) as f:
s = f.read()
for line in s.split("\n"):
if line.startswith("timeout"):
timeout_line = line
elif line.startswith("default"):
default_line = line
if "grub.cfg" in bl_file:
replace = "default='{}'\ntimeout=10".format(default_label_num)
else: # isolinux format
replace = "default {}\ntimeout 10".format(default_label_num)
if default_line and timeout_line:
s = s.replace(default_line, "")
s = s.replace(timeout_line, replace)
elif default_line:
s = s.replace(default_line, replace)
elif timeout_line:
s = s.replace(timeout_line, replace)
else:
s = replace + s
s = re.sub(r'boot_device=[^\s]*',
'boot_device=%s' % host.boot_device,
s)
s = re.sub(r'rootfs_device=[^\s]*',
'rootfs_device=%s' % host.rootfs_device,
s)
s = re.sub(r'console=[^\s]*',
'console=%s' % host.console,
s)
with open(bl_file, "w") as f:
LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]"
.format(bl_file, default_label_num, timeout_line,
default_line, replace.replace('\n', '<newline>')))
f.write(s)
except Exception as e:
LOG.error("update_bootloader_default failed: {}".format(e))
raise CloneFail("Failed to update bootloader files")
def get_online_cpus():
""" Get max cpu id """
with open('/sys/devices/system/cpu/online') as f:
s = f.read()
max_cpu_id = s.split('-')[-1].strip()
LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip()))
return max_cpu_id
return ""
def get_total_mem():
""" Get total memory size """
with open('/proc/meminfo') as f:
s = f.read()
for line in s.split("\n"):
if line.startswith("MemTotal:"):
mem_total = line.split()[1]
LOG.info("MemTotal:[{}]".format(mem_total))
return mem_total
return ""
def get_disk_size(disk):
""" Get the disk size """
disk_size = ""
try:
disk_size = subprocess.check_output(
['lsblk', '--nodeps', '--output', 'SIZE',
'--noheadings', '--bytes', disk])
except Exception as e:
LOG.exception(e)
LOG.error("Failed to get disk size [{}]".format(disk))
raise CloneFail("Failed to get disk size")
return disk_size.strip()
def create_ini_file(clone_archive_dir, iso_name):
"""Create clone ini file."""
interfaces = ""
my_hostname = utils.get_controller_hostname()
macs = sysinv_api.get_mac_addresses(my_hostname)
for intf in macs.keys():
interfaces += intf + " "
disk_paths = ""
for _, _, files in os.walk('/dev/disk/by-path'):
for f in files:
if f.startswith("pci-") and "part" not in f and "usb" not in f:
disk_size = get_disk_size('/dev/disk/by-path/' + f)
disk_paths += f + "#" + disk_size + " "
break # no need to go into sub-dirs.
LOG.info("create ini: {} {}".format(macs, files))
with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f:
f.write('[clone_iso]\n')
f.write('name=' + iso_name + '\n')
f.write('host=' + my_hostname + '\n')
f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z")
+ '\n')
f.write('interfaces=' + interfaces + '\n')
f.write('disks=' + disk_paths + '\n')
f.write('cpus=' + get_online_cpus() + '\n')
f.write('mem=' + get_total_mem() + '\n')
LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths))
def create_iso(iso_name, archive_dir):
""" Create iso image. This is modelled after
the cgcs-root/build-tools/build-iso tool. """
try:
controller_0 = sysinv_api.get_host_data('controller-0')
except Exception as e:
e_log = "Failed to retrieve controller-0 inventory details."
LOG.exception(e_log)
raise CloneFail(e_log)
iso_dir = os.path.join(archive_dir, 'isolinux')
clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)
output = None
tmpdir = None
total_steps = 6
step = 1
print ("\nCreating ISO:")
# Add the correct kick-start file to the image
ks_file = "controller_ks.cfg"
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
ks_file = "smallsystem_lowlatency_ks.cfg"
else:
ks_file = "smallsystem_ks.cfg"
try:
# prepare the iso files
images_dir = os.path.join(iso_dir, 'images')
os.mkdir(images_dir, 0o644)
pxe_dir = os.path.join('/pxeboot',
'rel-' + tsconfig.SW_VERSION)
os.symlink(pxe_dir + '/installer-bzImage',
iso_dir + '/vmlinuz')
os.symlink(pxe_dir + '/installer-initrd',
iso_dir + '/initrd.img')
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1
feed_dir = os.path.join('/www', 'pages', 'feed',
'rel-' + tsconfig.SW_VERSION)
os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')
os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')
os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')
shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)
update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)
shutil.copyfile('/usr/share/syslinux/isolinux.bin',
iso_dir + '/isolinux.bin')
os.symlink('/usr/share/syslinux/vesamenu.c32',
iso_dir + '/vesamenu.c32')
for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):
shutil.copy(os.path.join(feed_dir, filename), iso_dir)
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1
efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')
os.makedirs(efiboot_dir, 0o644)
l_efi_dir = os.path.join('/boot', 'efi', 'EFI')
shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)
shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)
shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)
shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)
update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)
shutil.copytree(l_efi_dir + '/centos/fonts',
efiboot_dir + '/fonts')
# copy EFI boot image and update the grub.cfg file
efi_img = images_dir + '/efiboot.img'
shutil.copy2(pxe_dir + '/efiboot.img', efi_img)
tmpdir = tempfile.mkdtemp(dir=archive_dir)
output = subprocess.check_output(
["mount", "-t", "vfat", "-o", "loop",
efi_img, tmpdir],
stderr=subprocess.STDOUT)
# replace the grub.cfg file with the updated file
efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')
os.remove(efi_grub_f)
shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)
subprocess.call(['umount', tmpdir])
shutil.rmtree(tmpdir, ignore_errors=True)
tmpdir = None
epoch_time = "%.9f" % time.time()
disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"]
with open(iso_dir + '/.discinfo', 'w') as f:
f.write('\n'.join(disc_info))
# copy the latest install_clone executable
shutil.copy2('/usr/bin/install_clone', iso_dir)
subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " +
iso_dir + "/" + ks_file, shell=True)
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1
# copy patches
iso_patches_dir = os.path.join(iso_dir, 'patches')
iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')
iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')
iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')
iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')
iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,
'committed')
os.mkdir(iso_patches_dir, 0o755)
os.mkdir(iso_patch_repo_dir, 0o755)
os.mkdir(iso_patch_pkgs_dir, 0o755)
os.mkdir(iso_patch_metadata_dir, 0o755)
os.mkdir(iso_patch_applied_dir, 0o755)
os.mkdir(iso_patch_committed_dir, 0o755)
repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION
pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION
patch_applied_dir = '/opt/patching/metadata/applied/'
patch_committed_dir = '/opt/patching/metadata/committed/'
subprocess.check_call(['rsync', '-a', repodata,
'%s/' % iso_patch_repo_dir])
if os.path.exists(pkgsdir):
subprocess.check_call(['rsync', '-a', pkgsdir,
'%s/' % iso_patch_pkgs_dir])
if os.path.exists(patch_applied_dir):
subprocess.check_call(['rsync', '-a', patch_applied_dir,
'%s/' % iso_patch_applied_dir])
if os.path.exists(patch_committed_dir):
subprocess.check_call(['rsync', '-a', patch_committed_dir,
'%s/' % iso_patch_committed_dir])
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1
create_ini_file(clone_archive_dir, iso_name)
os.chmod(iso_dir + '/isolinux.bin', 0o664)
iso_file = os.path.join(archive_dir, iso_name + ".iso")
output = subprocess.check_output(
["nice", "mkisofs",
"-o", iso_file, "-R", "-D",
"-A", "oe_iso_boot", "-V", "oe_iso_boot",
"-f", "-quiet",
"-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot",
"-boot-load-size", "4", "-boot-info-table",
"-eltorito-alt-boot", "-e", "images/efiboot.img",
"-no-emul-boot",
iso_dir],
stderr=subprocess.STDOUT)
LOG.info("{} created: [{}]".format(iso_file, output))
utils.progress(total_steps, step, 'iso created', 'DONE')
step += 1
output = subprocess.check_output(
["nice", "isohybrid",
"--uefi",
iso_file],
stderr=subprocess.STDOUT)
LOG.debug("isohybrid: {}".format(output))
output = subprocess.check_output(
["nice", "implantisomd5",
iso_file],
stderr=subprocess.STDOUT)
LOG.debug("implantisomd5: {}".format(output))
utils.progress(total_steps, step, 'checksum implanted', 'DONE')
print("Cloned iso image created: {}".format(iso_file))
except Exception as e:
LOG.exception(e)
e_log = "ISO creation ({}) failed".format(iso_name)
if output:
e_log += ' [' + output + ']'
LOG.error(e_log)
raise CloneFail("ISO creation failed.")
finally:
if tmpdir:
subprocess.call(['umount', tmpdir], stderr=DEVNULL)
shutil.rmtree(tmpdir, ignore_errors=True)
def find_and_replace_in_file(target, find, replace):
""" Find and replace a string in a file. """
found = None
try:
for line in fileinput.FileInput(target, inplace=1):
if find in line:
# look for "find" string within word boundaries
fpat = r'\b' + find + r'\b'
line = re.sub(fpat, replace, line)
found = True
print(line, end='')
except Exception as e:
LOG.error("Failed to replace [{}] with [{}] in [{}]: {}"
.format(find, replace, target, str(e)))
found = None
finally:
fileinput.close()
return found
def find_and_replace(target_list, find, replace):
""" Find and replace a string in all files in a directory. """
found = False
file_list = []
for target in target_list:
if os.path.isfile(target):
if find_and_replace_in_file(target, find, replace):
found = True
file_list.append(target)
elif os.path.isdir(target):
try:
output = subprocess.check_output(
['grep', '-rl', find, target])
if output:
for line in output.split('\n'):
if line and find_and_replace_in_file(
line, find, replace):
found = True
file_list.append(line)
except Exception:
pass # nothing found in that directory
if not found:
LOG.error("[{}] not found in backup".format(find))
else:
LOG.info("Replaced [{}] with [{}] in {}".format(
find, replace, file_list))
def remove_from_archive(archive, unwanted):
""" Remove a file from the archive. """
try:
subprocess.check_call(["tar", "--delete",
"--file=" + archive,
unwanted])
except subprocess.CalledProcessError as e:
LOG.error("Delete of {} failed: {}".format(unwanted, e.output))
raise CloneFail("Failed to modify backup archive")
def update_oamip_in_archive(tmpdir):
""" Update OAM IP in system archive file. """
oam_list = sysinv_api.get_oam_ip()
if not oam_list:
raise CloneFail("Failed to get OAM IP")
for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip,
oam_list.oam_subnet, oam_list.oam_floating_ip,
oam_list.oam_c0_ip, oam_list.oam_c1_ip]:
if not oamfind:
continue
ip = netaddr.IPNetwork(oamfind)
find_str = ""
if ip.version == 4:
# if ipv4, use 192.0.x.x as the temporary oam ip
find_str = str(ip.ip)
ipstr_list = find_str.split('.')
ipstr_list[0] = '192'
ipstr_list[1] = '0'
repl_ipstr = ".".join(ipstr_list)
else:
# if ipv6, use 2001:db8:x as the temporary oam ip
find_str = str(ip.ip)
ipstr_list = find_str.split(':')
ipstr_list[0] = '2001'
ipstr_list[1] = 'db8'
repl_ipstr = ":".join(ipstr_list)
if repl_ipstr:
find_and_replace(
[os.path.join(tmpdir, 'etc/hosts'),
os.path.join(tmpdir, 'etc/sysconfig/network-scripts'),
os.path.join(tmpdir, 'etc/nfv/vim/config.ini'),
os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'),
os.path.join(tmpdir, 'etc/heat/heat.conf'),
os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'),
os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'),
os.path.join(tmpdir, 'etc/nova/nova.conf'),
os.path.join(tmpdir, 'config/hosts'),
os.path.join(tmpdir, 'hieradata'),
os.path.join(tmpdir, 'postgres/keystone.sql.data'),
os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
find_str, repl_ipstr)
else:
LOG.error("Failed to modify OAM IP:[{}]"
.format(oamfind))
raise CloneFail("Failed to modify OAM IP")
def update_mac_in_archive(tmpdir):
""" Update MAC addresses in system archive file. """
hostname = utils.get_controller_hostname()
macs = sysinv_api.get_mac_addresses(hostname)
for intf, mac in macs.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
hostname = utils.get_mate_controller_hostname()
macs = sysinv_api.get_mac_addresses(hostname)
for intf, mac in macs.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
def update_disk_serial_id_in_archive(tmpdir):
""" Update disk serial id in system archive file. """
hostname = utils.get_controller_hostname()
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
for d_dnode, d_sid in disk_sids.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
hostname = utils.get_mate_controller_hostname()
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
for d_dnode, d_sid in disk_sids.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
def update_sysuuid_in_archive(tmpdir):
""" Update system uuid in system archive file. """
sysuuid = sysinv_api.get_system_uuid()
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
sysuuid, "CLONEISO_SYSTEM_UUID")
def update_backup_archive(backup_name, archive_dir):
""" Update backup archive file to be included in clone-iso """
path_to_archive = os.path.join(archive_dir, backup_name)
tmpdir = tempfile.mkdtemp(dir=archive_dir)
try:
subprocess.check_call(
['gunzip', path_to_archive + '.tgz'],
stdout=DEVNULL, stderr=DEVNULL)
# 70-persistent-net.rules with the correct MACs will be
# generated on the linux boot on the cloned side. Remove
# the stale file from original side.
remove_from_archive(path_to_archive + '.tar',
'etc/udev/rules.d/70-persistent-net.rules')
# Extract only a subset of directories which have files to be
# updated for oam-ip and MAC addresses. After updating the files
# these directories are added back to the archive.
subprocess.check_call(
['tar', '-x',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'etc', 'postgres', 'config',
'hieradata'],
stdout=DEVNULL, stderr=DEVNULL)
update_oamip_in_archive(tmpdir)
update_mac_in_archive(tmpdir)
update_disk_serial_id_in_archive(tmpdir)
update_sysuuid_in_archive(tmpdir)
subprocess.check_call(
['tar', '--update',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'etc', 'postgres', 'config',
'hieradata'],
stdout=DEVNULL, stderr=DEVNULL)
subprocess.check_call(['gzip', path_to_archive + '.tar'])
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
except Exception as e:
LOG.error("Update of backup archive {} failed {}".format(
path_to_archive, str(e)))
raise CloneFail("Failed to update backup archive")
finally:
if not DEBUG:
shutil.rmtree(tmpdir, ignore_errors=True)
def validate_controller_state():
""" Cloning allowed now? """
# Check if this Controller is enabled and provisioned
try:
if not sysinv_api.controller_enabled_provisioned(
utils.get_controller_hostname()):
raise CloneFail("Controller is not enabled/provisioned")
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
if not sysinv_api.controller_enabled_provisioned(
utils.get_mate_controller_hostname()):
raise CloneFail("Mate controller is not enabled/provisioned")
except CloneFail:
raise
except Exception:
raise CloneFail("Controller is not enabled/provisioned")
if utils.get_system_type() != si_const.TIS_AIO_BUILD:
raise CloneFail("Cloning supported only on All-in-one systems")
if len(sysinv_api.get_alarms()) > 0:
raise CloneFail("There are active alarms on this system!")
def clone(backup_name, archive_dir):
""" Do Cloning """
validate_controller_state()
LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir))
check_size(archive_dir)
isolinux_dir = os.path.join(archive_dir, 'isolinux')
clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR)
if os.path.exists(isolinux_dir):
LOG.info("deleting old iso_dir %s" % isolinux_dir)
shutil.rmtree(isolinux_dir, ignore_errors=True)
os.makedirs(clone_archive_dir, 0o644)
try:
backup_restore.backup(backup_name, clone_archive_dir, clone=True)
LOG.info("system backup done")
update_backup_archive(backup_name + '_system', clone_archive_dir)
create_iso(backup_name, archive_dir)
except BackupFail as e:
raise CloneFail(e.message)
except CloneFail as e:
raise
finally:
if not DEBUG:
shutil.rmtree(isolinux_dir, ignore_errors=True)

View File

@ -1,371 +0,0 @@
"""
Copyright (c) 2015-2019 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from netaddr import IPRange
from controllerconfig.common.exceptions import ConfigFail
from controllerconfig.common.exceptions import ValidateFail
from controllerconfig.utils import is_mtu_valid
from controllerconfig.utils import is_valid_vlan
from controllerconfig.utils import validate_network_str
from controllerconfig.utils import validate_address_str
DEFAULT_CONFIG = 0
REGION_CONFIG = 1
SUBCLOUD_CONFIG = 2
MGMT_TYPE = 0
INFRA_TYPE = 1
OAM_TYPE = 2
CLUSTER_TYPE = 3
NETWORK_PREFIX_NAMES = [
('MGMT', 'INFRA', 'OAM', 'CLUSTER'),
('CLM', 'BLS', 'CAN', 'CLUSTER')
]
HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions',
'mgmt_mac', 'mgmt_ip',
'bm_ip', 'bm_type', 'bm_username',
'bm_password', 'boot_device', 'rootfs_device',
'install_output', 'console', 'vsc_controllers',
'power_on', 'location']
# Network naming types
DEFAULT_NAMES = 0
HP_NAMES = 1
# well-known default domain name
DEFAULT_DOMAIN_NAME = 'Default'
class LogicalInterface(object):
""" Represents configuration for a logical interface.
"""
def __init__(self):
self.name = None
self.mtu = None
self.lag_interface = False
self.lag_mode = None
self.ports = None
def parse_config(self, system_config, logical_interface):
# Ensure logical interface config is present
if not system_config.has_section(logical_interface):
raise ConfigFail("Missing config for logical interface %s." %
logical_interface)
self.name = logical_interface
# Parse/validate the MTU
self.mtu = system_config.getint(logical_interface, 'INTERFACE_MTU')
if not is_mtu_valid(self.mtu):
raise ConfigFail("Invalid MTU value for %s. "
"Valid values: 576 - 9216" % logical_interface)
# Parse the ports
self.ports = [_f for _f in
[x.strip() for x in
system_config.get(logical_interface,
'INTERFACE_PORTS').split(',')]
if _f]
# Parse/validate the LAG config
lag_interface = system_config.get(logical_interface,
'LAG_INTERFACE')
if lag_interface.lower() == 'y':
self.lag_interface = True
if len(self.ports) != 2:
raise ConfigFail(
"Invalid number of ports (%d) supplied for LAG "
"interface %s" % (len(self.ports), logical_interface))
self.lag_mode = system_config.getint(logical_interface, 'LAG_MODE')
if self.lag_mode < 1 or self.lag_mode > 6:
raise ConfigFail(
"Invalid LAG_MODE value of %d for %s. Valid values: 1-6" %
(self.lag_mode, logical_interface))
elif lag_interface.lower() == 'n':
if len(self.ports) > 1:
raise ConfigFail(
"More than one interface supplied for non-LAG "
"interface %s" % logical_interface)
if len(self.ports) == 0:
raise ConfigFail(
"No interfaces supplied for non-LAG "
"interface %s" % logical_interface)
else:
raise ConfigFail(
"Invalid LAG_INTERFACE value of %s for %s. Valid values: "
"Y or N" % (lag_interface, logical_interface))
class Network(object):
""" Represents configuration for a network.
"""
def __init__(self):
self.vlan = None
self.cidr = None
self.multicast_cidr = None
self.start_address = None
self.end_address = None
self.start_end_in_config = False
self.floating_address = None
self.address_0 = None
self.address_1 = None
self.dynamic_allocation = False
self.gateway_address = None
self.logical_interface = None
def parse_config(self, system_config, config_type, network_type,
min_addresses=0, multicast_addresses=0, optional=False,
naming_type=DEFAULT_NAMES,
logical_interface_required=True):
network_prefix = NETWORK_PREFIX_NAMES[naming_type][network_type]
network_name = network_prefix + '_NETWORK'
if naming_type == HP_NAMES:
attr_prefix = network_prefix + '_'
else:
attr_prefix = ''
# Ensure network config is present
if not system_config.has_section(network_name):
if not optional:
raise ConfigFail("Missing config for network %s." %
network_name)
else:
# Optional interface - just return
return
# Parse/validate the VLAN
if system_config.has_option(network_name, attr_prefix + 'VLAN'):
self.vlan = system_config.getint(network_name,
attr_prefix + 'VLAN')
if self.vlan:
if not is_valid_vlan(self.vlan):
raise ConfigFail(
"Invalid %s value of %d for %s. Valid values: 1-4094" %
(attr_prefix + 'VLAN', self.vlan, network_name))
# Parse/validate the cidr
cidr_str = system_config.get(network_name, attr_prefix + 'CIDR')
try:
self.cidr = validate_network_str(
cidr_str, min_addresses)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'CIDR', cidr_str, network_name, e))
# Parse/validate the multicast subnet
if 0 < multicast_addresses and \
system_config.has_option(network_name,
attr_prefix + 'MULTICAST_CIDR'):
multicast_cidr_str = system_config.get(network_name, attr_prefix +
'MULTICAST_CIDR')
try:
self.multicast_cidr = validate_network_str(
multicast_cidr_str, multicast_addresses, multicast=True)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
network_name, e))
if self.cidr.version != self.multicast_cidr.version:
raise ConfigFail(
"Invalid %s value of %s for %s. Multicast "
"subnet and network IP families must be the same." %
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
network_name))
# Parse/validate the hardwired controller addresses
floating_address_str = None
address_0_str = None
address_1_str = None
if min_addresses == 1:
if (system_config.has_option(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_START_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_END_ADDRESS')):
raise ConfigFail(
"Only one IP address is required for OAM "
"network, use 'IP_ADDRESS' to specify the OAM IP "
"address")
floating_address_str = system_config.get(
network_name, attr_prefix + 'IP_ADDRESS')
try:
self.floating_address = validate_address_str(
floating_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_ADDRESS',
floating_address_str, network_name, e))
self.address_0 = self.floating_address
self.address_1 = self.floating_address
else:
if system_config.has_option(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS'):
floating_address_str = system_config.get(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS')
try:
self.floating_address = validate_address_str(
floating_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_FLOATING_ADDRESS',
floating_address_str, network_name, e))
if system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS'):
address_0_str = system_config.get(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS')
try:
self.address_0 = validate_address_str(
address_0_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_UNIT_0_ADDRESS',
address_0_str, network_name, e))
if system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS'):
address_1_str = system_config.get(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS')
try:
self.address_1 = validate_address_str(
address_1_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_UNIT_1_ADDRESS',
address_1_str, network_name, e))
# Parse/validate the start/end addresses
start_address_str = None
end_address_str = None
if system_config.has_option(
network_name, attr_prefix + 'IP_START_ADDRESS'):
start_address_str = system_config.get(
network_name, attr_prefix + 'IP_START_ADDRESS')
try:
self.start_address = validate_address_str(
start_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_START_ADDRESS',
start_address_str, network_name, e))
if system_config.has_option(
network_name, attr_prefix + 'IP_END_ADDRESS'):
end_address_str = system_config.get(
network_name, attr_prefix + 'IP_END_ADDRESS')
try:
self.end_address = validate_address_str(
end_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s " %
(attr_prefix + 'IP_END_ADDRESS',
end_address_str, network_name, e))
if start_address_str or end_address_str:
if not end_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_END_ADDRESS',
network_name))
if not start_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_START_ADDRESS',
network_name))
if not self.start_address < self.end_address:
raise ConfigFail(
"Start address %s not less than end address %s for %s."
% (str(self.start_address), str(self.end_address),
network_name))
if not IPRange(start_address_str, end_address_str).size >= \
min_addresses:
raise ConfigFail("Address range for %s must contain at "
"least %d addresses." %
(network_name, min_addresses))
self.start_end_in_config = True
if floating_address_str or address_0_str or address_1_str:
if not floating_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_FLOATING_ADDRESS',
network_name))
if not address_0_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_UNIT_0_ADDRESS',
network_name))
if not address_1_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_UNIT_1_ADDRESS',
network_name))
if start_address_str and floating_address_str:
raise ConfigFail("Overspecified network: Can only set %s "
"and %s OR %s, %s, and %s for "
"%s_NETWORK" %
(attr_prefix + 'IP_START_ADDRESS',
attr_prefix + 'IP_END_ADDRESS',
attr_prefix + 'IP_FLOATING_ADDRESS',
attr_prefix + 'IP_UNIT_0_ADDRESS',
attr_prefix + 'IP_UNIT_1_ADDRESS',
network_name))
if config_type == DEFAULT_CONFIG:
if not self.start_address:
self.start_address = self.cidr[2]
if not self.end_address:
self.end_address = self.cidr[-2]
# Parse/validate the dynamic IP address allocation
if system_config.has_option(network_name,
'DYNAMIC_ALLOCATION'):
dynamic_allocation = system_config.get(network_name,
'DYNAMIC_ALLOCATION')
if dynamic_allocation.lower() == 'y':
self.dynamic_allocation = True
elif dynamic_allocation.lower() == 'n':
self.dynamic_allocation = False
else:
raise ConfigFail(
"Invalid DYNAMIC_ALLOCATION value of %s for %s. "
"Valid values: Y or N" %
(dynamic_allocation, network_name))
# Parse/validate the gateway (optional)
if system_config.has_option(network_name, attr_prefix + 'GATEWAY'):
gateway_address_str = system_config.get(
network_name, attr_prefix + 'GATEWAY')
try:
self.gateway_address = validate_address_str(
gateway_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'GATEWAY',
gateway_address_str, network_name, e))
# Parse/validate the logical interface
if logical_interface_required or system_config.has_option(
network_name, attr_prefix + 'LOGICAL_INTERFACE'):
logical_interface_name = system_config.get(
network_name, attr_prefix + 'LOGICAL_INTERFACE')
self.logical_interface = LogicalInterface()
self.logical_interface.parse_config(system_config,
logical_interface_name)

View File

@ -1,10 +1,9 @@
# #
# Copyright (c) 2016-2019 Wind River Systems, Inc. # Copyright (c) 2016-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
from sysinv.common import constants as sysinv_constants
from tsconfig import tsconfig from tsconfig import tsconfig
@ -15,70 +14,9 @@ CONFIG_PERMDIR = tsconfig.CONFIG_PATH
HIERADATA_WORKDIR = '/tmp/hieradata' HIERADATA_WORKDIR = '/tmp/hieradata'
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata' HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'
ARMADA_PERMDIR = tsconfig.ARMADA_PATH
HELM_CHARTS_PERMDIR = tsconfig.PLATFORM_PATH + '/helm_charts'
HELM_OVERRIDES_PERMDIR = tsconfig.HELM_OVERRIDES_PATH
KEYRING_WORKDIR = '/tmp/python_keyring' KEYRING_WORKDIR = '/tmp/python_keyring'
KEYRING_PERMDIR = tsconfig.KEYRING_PATH KEYRING_PERMDIR = tsconfig.KEYRING_PATH
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete' INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
CONFIG_FAIL_FILE = '/var/run/.config_fail'
COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem"
FIREWALL_RULES_FILE = '/etc/platform/iptables.rules'
OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf'
INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed'
BACKUPS_PATH = '/opt/backups' BACKUPS_PATH = '/opt/backups'
INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log"
LINK_MTU_DEFAULT = "1500"
CINDER_LVM_THIN = "thin"
CINDER_LVM_THICK = "thick"
DEFAULT_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_DATABASE_STOR_SIZE
DEFAULT_SMALL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
DEFAULT_SMALL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE
DEFAULT_EXTENSION_STOR_SIZE = \
sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE
DEFAULT_PLATFORM_STOR_SIZE = \
sysinv_constants.DEFAULT_PLATFORM_STOR_SIZE
SYSTEM_CONFIG_TIMEOUT = 420
SERVICE_ENABLE_TIMEOUT = 180
MINIMUM_ROOT_DISK_SIZE = 500
MAXIMUM_CGCS_LV_SIZE = 500
LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30
SYSADMIN_MAX_PASSWORD_AGE = 45 # 45 days
LAG_MODE_ACTIVE_BACKUP = "active-backup"
LAG_MODE_BALANCE_XOR = "balance-xor"
LAG_MODE_8023AD = "802.3ad"
LAG_TXHASH_LAYER2 = "layer2"
LAG_MIIMON_FREQUENCY = 100
LOOPBACK_IFNAME = 'lo'
DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28'
DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124'
DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28'
DEFAULT_REGION_NAME = "RegionOne"
DEFAULT_SERVICE_PROJECT_NAME = "services"
SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \
"console. Continuing with this terminal may cause " \
"loss of connectivity and configuration failure."
SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console."

View File

@ -1,102 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Routines for URL-safe encrypting/decrypting
Cloned from git/glance/common
"""
import base64
import os
import random
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import modes
from oslo_utils import encodeutils
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
def urlsafe_encrypt(key, plaintext, blocksize=16):
"""Encrypts plaintext.
Resulting ciphertext will contain URL-safe characters.
If plaintext is Unicode, encode it to UTF-8 before encryption.
:param key: AES secret key
:param plaintext: Input text to be encrypted
:param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
:returns: Resulting ciphertext
"""
def pad(text):
"""Pads text to be encrypted"""
pad_length = (blocksize - len(text) % blocksize)
# NOTE(rosmaita): I know this looks stupid, but we can't just
# use os.urandom() to get the bytes because we use char(0) as
# a delimiter
pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF))
for i in range(pad_length - 1))
# We use chr(0) as a delimiter between text and padding
return text + b'\0' + pad
plaintext = encodeutils.to_utf8(plaintext)
key = encodeutils.to_utf8(key)
# random initial 16 bytes for CBC
init_vector = os.urandom(16)
backend = default_backend()
cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector),
backend=backend)
encryptor = cypher.encryptor()
padded = encryptor.update(
pad(six.binary_type(plaintext))) + encryptor.finalize()
encoded = base64.urlsafe_b64encode(init_vector + padded)
if six.PY3:
encoded = encoded.decode('ascii')
return encoded
def urlsafe_decrypt(key, ciphertext):
"""Decrypts URL-safe base64 encoded ciphertext.
On Python 3, the result is decoded from UTF-8.
:param key: AES secret key
:param ciphertext: The encrypted text to decrypt
:returns: Resulting plaintext
"""
# Cast from unicode
ciphertext = encodeutils.to_utf8(ciphertext)
key = encodeutils.to_utf8(key)
ciphertext = base64.urlsafe_b64decode(ciphertext)
backend = default_backend()
cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]),
backend=backend)
decryptor = cypher.decryptor()
padded = decryptor.update(ciphertext[16:]) + decryptor.finalize()
text = padded[:padded.rfind(b'\0')]
if six.PY3:
text = text.decode('utf-8')
return text

View File

@ -1,44 +0,0 @@
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
DC Manager Interactions
"""
from controllerconfig.common import log
from Crypto.Hash import MD5
from controllerconfig.common import crypt
import json
LOG = log.get_logger(__name__)
class UserList(object):
"""
User List
"""
def __init__(self, user_data, hash_string):
# Decrypt the data using input hash_string to generate
# the key
h = MD5.new()
h.update(hash_string)
encryption_key = h.hexdigest()
user_data_decrypted = crypt.urlsafe_decrypt(encryption_key,
user_data)
self._data = json.loads(user_data_decrypted)
def get_password(self, name):
"""
Search the users for the password
"""
for user in self._data:
if user['name'] == name:
return user['password']
return None

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2014-2019 Wind River Systems, Inc. # Copyright (c) 2014-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -20,56 +20,21 @@ class ConfigError(Exception):
return self.message or "" return self.message or ""
class ConfigFail(ConfigError):
"""General configuration error."""
pass
class ValidateFail(ConfigError): class ValidateFail(ConfigError):
"""Validation of data failed.""" """Validation of data failed."""
pass pass
class BackupFail(ConfigError):
"""Backup error."""
pass
class UpgradeFail(ConfigError): class UpgradeFail(ConfigError):
"""Upgrade error.""" """Upgrade error."""
pass pass
class BackupWarn(ConfigError):
"""Backup warning."""
pass
class RestoreFail(ConfigError):
"""Backup error."""
pass
class KeystoneFail(ConfigError): class KeystoneFail(ConfigError):
"""Keystone error.""" """Keystone error."""
pass pass
class SysInvFail(ConfigError):
"""System Inventory error."""
pass
class UserQuit(ConfigError):
"""User initiated quit operation."""
pass
class CloneFail(ConfigError):
"""Clone error."""
pass
class TidyStorageFail(ConfigError): class TidyStorageFail(ConfigError):
"""Tidy storage error.""" """Tidy storage error."""
pass pass

View File

@ -12,10 +12,9 @@ import datetime
import iso8601 import iso8601
from controllerconfig.common.exceptions import KeystoneFail from controllerconfig.common.exceptions import KeystoneFail
from controllerconfig.common import log from oslo_log import log
LOG = log.getLogger(__name__)
LOG = log.get_logger(__name__)
class Token(object): class Token(object):

View File

@ -1,49 +0,0 @@
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Logging
"""
import logging
import logging.handlers
_loggers = {}
def get_logger(name):
""" Get a logger or create one """
if name not in _loggers:
_loggers[name] = logging.getLogger(name)
return _loggers[name]
def setup_logger(logger):
""" Setup a logger """
# Send logs to /var/log/platform.log
syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1
formatter = logging.Formatter("configassistant[%(process)d] " +
"%(pathname)s:%(lineno)s " +
"%(levelname)8s [%(name)s] %(message)s")
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=syslog_facility)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def configure():
""" Setup logging """
for logger in _loggers:
setup_logger(_loggers[logger])

View File

@ -1,5 +1,5 @@
""" """
Copyright (c) 2015-2017 Wind River Systems, Inc. Copyright (c) 2015-2020 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0 SPDX-License-Identifier: Apache-2.0
@ -7,16 +7,15 @@ SPDX-License-Identifier: Apache-2.0
import json import json
from controllerconfig.common.exceptions import KeystoneFail from controllerconfig.common.exceptions import KeystoneFail
from controllerconfig.common import dcmanager
from controllerconfig.common import keystone from controllerconfig.common import keystone
from controllerconfig.common import log
from six.moves import http_client as httplib from six.moves import http_client as httplib
from six.moves.urllib import request as urlrequest from six.moves.urllib import request as urlrequest
from six.moves.urllib.error import HTTPError from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError from six.moves.urllib.error import URLError
from oslo_log import log
LOG = log.get_logger(__name__) LOG = log.getLogger(__name__)
def rest_api_request(token, method, api_cmd, api_cmd_headers=None, def rest_api_request(token, method, api_cmd, api_cmd_headers=None,
@ -324,16 +323,3 @@ def delete_project(token, api_url, id):
api_cmd = api_url + "/projects/" + id api_cmd = api_url + "/projects/" + id
response = rest_api_request(token, "DELETE", api_cmd,) response = rest_api_request(token, "DELETE", api_cmd,)
return keystone.Project(response) return keystone.Project(response)
def get_subcloud_config(token, api_url, subcloud_name,
hash_string):
"""
Ask DC Manager for our subcloud configuration
"""
api_cmd = api_url + "/subclouds/" + subcloud_name + "/config"
response = rest_api_request(token, "GET", api_cmd)
config = dict()
config['users'] = dcmanager.UserList(response['users'], hash_string)
return config

View File

@ -1,285 +0,0 @@
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
OpenStack
"""
import os
import time
import subprocess
from controllerconfig.common import log
from controllerconfig.common.exceptions import SysInvFail
from controllerconfig.common.rest_api_utils import get_token
from controllerconfig import sysinv_api as sysinv
LOG = log.get_logger(__name__)
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
class OpenStack(object):
def __init__(self):
self.admin_token = None
self.conf = {}
self._sysinv = None
source_command = 'source /etc/platform/openrc && env'
with open(os.devnull, "w") as fnull:
proc = subprocess.Popen(
['bash', '-c', source_command],
stdout=subprocess.PIPE, stderr=fnull)
for line in proc.stdout:
key, _, value = line.partition("=")
if key == 'OS_USERNAME':
self.conf['admin_user'] = value.strip()
elif key == 'OS_PASSWORD':
self.conf['admin_pwd'] = value.strip()
elif key == 'OS_PROJECT_NAME':
self.conf['admin_tenant'] = value.strip()
elif key == 'OS_AUTH_URL':
self.conf['auth_url'] = value.strip()
elif key == 'OS_REGION_NAME':
self.conf['region_name'] = value.strip()
elif key == 'OS_USER_DOMAIN_NAME':
self.conf['user_domain'] = value.strip()
elif key == 'OS_PROJECT_DOMAIN_NAME':
self.conf['project_domain'] = value.strip()
proc.communicate()
def __enter__(self):
if not self._connect():
raise Exception('Failed to connect')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._disconnect()
def __del__(self):
self._disconnect()
def _connect(self):
""" Connect to an OpenStack instance """
if self.admin_token is not None:
self._disconnect()
# Try to obtain an admin token from keystone
for _ in range(KEYSTONE_AUTH_SERVER_RETRY_CNT):
self.admin_token = get_token(self.conf['auth_url'],
self.conf['admin_tenant'],
self.conf['admin_user'],
self.conf['admin_pwd'],
self.conf['user_domain'],
self.conf['project_domain'])
if self.admin_token:
break
time.sleep(KEYSTONE_AUTH_SERVER_WAIT)
return self.admin_token is not None
def _disconnect(self):
""" Disconnect from an OpenStack instance """
self.admin_token = None
def lock_hosts(self, exempt_hostnames=None, progress_callback=None,
timeout=60):
""" Lock hosts of an OpenStack instance except for host names
in the exempt list
"""
failed_hostnames = []
if exempt_hostnames is None:
exempt_hostnames = []
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
if not hosts:
if progress_callback is not None:
progress_callback(0, 0, None, None)
return
wait = False
host_i = 0
for host in hosts:
if host.name in exempt_hostnames:
continue
if host.is_unlocked():
if not host.force_lock(self.admin_token,
self.conf['region_name']):
failed_hostnames.append(host.name)
LOG.warning("Could not lock %s" % host.name)
else:
wait = True
else:
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('locking %s' % host.name),
'DONE')
if wait and timeout > 5:
time.sleep(5)
timeout -= 5
for _ in range(0, timeout):
wait = False
for host in hosts:
if host.name in exempt_hostnames:
continue
if (host.name not in failed_hostnames) and host.is_unlocked():
host.refresh_data(self.admin_token,
self.conf['region_name'])
if host.is_locked():
LOG.info("Locked %s" % host.name)
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('locking %s' % host.name),
'DONE')
else:
LOG.info("Waiting for lock of %s" % host.name)
wait = True
if not wait:
break
time.sleep(1)
else:
failed_hostnames.append(host.name)
LOG.warning("Wait failed for lock of %s" % host.name)
return failed_hostnames
def power_off_hosts(self, exempt_hostnames=None, progress_callback=None,
timeout=60):
""" Power-off hosts of an OpenStack instance except for host names
in the exempt list
"""
if exempt_hostnames is None:
exempt_hostnames = []
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
hosts[:] = [host for host in hosts if host.support_power_off()]
if not hosts:
if progress_callback is not None:
progress_callback(0, 0, None, None)
return
wait = False
host_i = 0
for host in hosts:
if host.name in exempt_hostnames:
continue
if host.is_powered_on():
if not host.power_off(self.admin_token,
self.conf['region_name']):
raise SysInvFail("Could not power-off %s" % host.name)
wait = True
else:
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('powering off %s' % host.name),
'DONE')
if wait and timeout > 5:
time.sleep(5)
timeout -= 5
for _ in range(0, timeout):
wait = False
for host in hosts:
if host.name in exempt_hostnames:
continue
if host.is_powered_on():
host.refresh_data(self.admin_token,
self.conf['region_name'])
if host.is_powered_off():
LOG.info("Powered-Off %s" % host.name)
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('powering off %s' % host.name),
'DONE')
else:
LOG.info("Waiting for power-off of %s" % host.name)
wait = True
if not wait:
break
time.sleep(1)
else:
failed_hosts = [h.name for h in hosts if h.is_powered_on()]
msg = "Wait timeout for power-off of %s" % failed_hosts
LOG.info(msg)
raise SysInvFail(msg)
def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300,
interval_step=10):
"""Wait for hosts to be identified as disabled.
Run check every interval_step seconds
"""
if exempt_hostnames is None:
exempt_hostnames = []
for _ in range(timeout / interval_step):
hosts = sysinv.get_hosts(self.admin_token,
self.conf['region_name'])
if not hosts:
time.sleep(interval_step)
continue
for host in hosts:
if host.name in exempt_hostnames:
continue
if host.is_enabled():
LOG.info("host %s is still enabled" % host.name)
break
else:
LOG.info("all hosts disabled.")
return True
time.sleep(interval_step)
return False
@property
def sysinv(self):
if self._sysinv is None:
# TOX cannot import cgts_client and all the dependencies therefore
# the client is being lazy loaded since TOX doesn't actually
# require the cgtsclient module.
from cgtsclient import client as cgts_client
endpoint = self.admin_token.get_service_url(
self.conf['region_name'], "sysinv", "platform", 'admin')
self._sysinv = cgts_client.Client(
sysinv.API_VERSION,
endpoint=endpoint,
token=self.admin_token.get_id())
return self._sysinv

View File

@ -1,31 +0,0 @@
import sys
from controllerconfig.common import log
LOG = log.get_logger(__name__)
class ProgressRunner(object):
steps = []
def add(self, action, message):
self.steps.append((action, message))
def run(self):
total = len(self.steps)
for i, step in enumerate(self.steps, start=1):
action, message = step
LOG.info("Start step: %s" % message)
sys.stdout.write(
"\n%.2u/%.2u: %s ... " % (i, total, message))
sys.stdout.flush()
try:
action()
sys.stdout.write('DONE')
sys.stdout.flush()
except Exception:
sys.stdout.flush()
raise
LOG.info("Finish step: %s" % message)
sys.stdout.write("\n")
sys.stdout.flush()

View File

@ -1,629 +0,0 @@
"""
Copyright (c) 2015-2019 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from __future__ import print_function
from six.moves import configparser
import os
import subprocess
import sys
import textwrap
import time
from controllerconfig import utils
import uuid
from controllerconfig.common import constants
from controllerconfig.common import log
from controllerconfig.common import rest_api_utils as rutils
from controllerconfig.common.exceptions import KeystoneFail
from controllerconfig.common.configobjects import REGION_CONFIG
from controllerconfig.common.configobjects import SUBCLOUD_CONFIG
from controllerconfig import ConfigFail
from controllerconfig.configassistant import ConfigAssistant
from controllerconfig.systemconfig import parse_system_config
from controllerconfig.systemconfig import configure_management_interface
from controllerconfig.systemconfig import create_cgcs_config_file
from controllerconfig import DEFAULT_DOMAIN_NAME
# Temporary file for building cgcs_config
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
# For region mode, this is the list of users that we expect to find configured
# in the region config file as <USER>_USER_KEY and <USER>_PASSWORD.
# For distributed cloud, this is the list of users that we expect to find
# configured in keystone. The password for each user will be retrieved from
# the DC Manager in the system controller and added to the region config file.
# The format is:
# REGION_NAME = key in region config file for this user's region
# USER_KEY = key in region config file for this user's name
# USER_NAME = user name in keystone
REGION_NAME = 0
USER_KEY = 1
USER_NAME = 2
EXPECTED_USERS = [
('REGION_2_SERVICES', 'SYSINV', 'sysinv'),
('REGION_2_SERVICES', 'PATCHING', 'patching'),
('REGION_2_SERVICES', 'NFV', 'vim'),
('REGION_2_SERVICES', 'MTCE', 'mtce'),
('REGION_2_SERVICES', 'FM', 'fm'),
('REGION_2_SERVICES', 'BARBICAN', 'barbican')]
# This a description of the region 2 endpoints that we expect to configure or
# find configured in keystone. The format is as follows:
# SERVICE_NAME = key in region config file for this service's name
# SERVICE_TYPE = key in region config file for this service's type
# PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP
# INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP
# ADMIN_URL = required adminurl - {} is replaced with CLM floating IP
# DESCRIPTION = Description of the service (for automatic configuration)
SERVICE_NAME = 0
SERVICE_TYPE = 1
PUBLIC_URL = 2
INTERNAL_URL = 3
ADMIN_URL = 4
DESCRIPTION = 5
EXPECTED_REGION2_ENDPOINTS = [
('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE',
'http://{}:6385/v1',
'http://{}:6385/v1',
'http://{}:6385/v1',
'SysInv Service'),
('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE',
'http://{}:15491',
'http://{}:5491',
'http://{}:5491',
'Patching Service'),
('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE',
'http://{}:4545',
'http://{}:4545',
'http://{}:4545',
'Virtual Infrastructure Manager'),
('FM_SERVICE_NAME', 'FM_SERVICE_TYPE',
'http://{}:18002',
'http://{}:18002',
'http://{}:18002',
'Fault Management Service'),
('BARBICAN_SERVICE_NAME', 'BARBICAN_SERVICE_TYPE',
'http://{}:9311',
'http://{}:9311',
'http://{}:9311',
'OpenStack Key Manager Service'),
]
EXPECTED_KEYSTONE_ENDPOINT = (
'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE',
'http://{}:8081/keystone/main/v2.0',
'http://{}:8081/keystone/main/v2.0',
'http://{}:8081/keystone/admin/v2.0',
'OpenStack Identity')
LOG = log.get_logger(__name__)
def validate_region_one_keystone_config(region_config, token, api_url, users,
services, endpoints, create=False,
config_type=REGION_CONFIG,
user_config=None):
""" Validate that the required region one configuration are in place,
if create is True, any missing entries will be set up to be added
to keystone later on by puppet.
"""
region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME')
# Determine what keystone entries are expected
expected_users = EXPECTED_USERS
expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS
# Keystone is always in region 1
expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT]
domains = rutils.get_domains(token, api_url)
# Verify service project domain, creating if necessary
if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'):
project_domain = region_config.get('REGION_2_SERVICES',
'PROJECT_DOMAIN_NAME')
else:
project_domain = DEFAULT_DOMAIN_NAME
project_domain_id = domains.get_domain_id(project_domain)
if not project_domain_id:
if create and config_type == REGION_CONFIG:
region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME',
project_domain)
else:
raise ConfigFail(
"Keystone configuration error: service project domain '%s' is "
"not configured." % project_domain)
# Verify service project, creating if necessary
if region_config.has_option('SHARED_SERVICES',
'SERVICE_PROJECT_NAME'):
service_project = region_config.get('SHARED_SERVICES',
'SERVICE_PROJECT_NAME')
else:
service_project = region_config.get('SHARED_SERVICES',
'SERVICE_TENANT_NAME')
projects = rutils.get_projects(token, api_url)
project_id = projects.get_project_id(service_project)
if not project_id:
if create and config_type == REGION_CONFIG:
region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME',
service_project)
else:
raise ConfigFail(
"Keystone configuration error: service project '%s' is not "
"configured." % service_project)
# Verify and retrieve the id of the admin role (only needed when creating)
roles = rutils.get_roles(token, api_url)
role_id = roles.get_role_id('admin')
if not role_id and create:
raise ConfigFail("Keystone configuration error: No admin role present")
# verify that the service user domain is configured, creating if necessary
if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'):
user_domain = region_config.get('REGION_2_SERVICES',
'USER_DOMAIN_NAME')
else:
user_domain = DEFAULT_DOMAIN_NAME
domains = rutils.get_domains(token, api_url)
user_domain_id = domains.get_domain_id(user_domain)
if not user_domain_id:
if create and config_type == REGION_CONFIG:
region_config.set('REGION_2_SERVICES',
'USER_DOMAIN_NAME')
else:
raise ConfigFail(
"Unable to obtain id for for %s domain. Please ensure "
"keystone configuration is correct." % user_domain)
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
if config_type == REGION_CONFIG:
# Verify that all users are configured and can retrieve a token,
# Optionally set up to create missing users + their admin role
for user in expected_users:
auth_user = region_config.get(user[REGION_NAME],
user[USER_KEY] + '_USER_NAME')
user_id = users.get_user_id(auth_user)
auth_password = None
if not user_id and create:
if not region_config.has_option(
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
# Generate random password for new user via
# /dev/urandom if necessary
try:
region_config.set(
user[REGION_NAME], user[USER_KEY] + '_PASSWORD',
uuid.uuid4().hex[:10] + "TiC2*")
except Exception as e:
raise ConfigFail("Failed to generate random user "
"password: %s" % e)
elif user_id and user_domain_id and\
project_id and project_domain_id:
# If there is a user_id existing then we cannot use
# a randomized password as it was either created by
# a previous run of regionconfig or was created as
# part of Titanium Cloud Primary region config
if not region_config.has_option(
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
raise ConfigFail("Failed to find configured password "
"for pre-defined user %s" % auth_user)
auth_password = region_config.get(user[REGION_NAME],
user[USER_KEY] + '_PASSWORD')
# Verify that the existing user can seek an auth token
user_token = rutils.get_token(auth_url, service_project,
auth_user,
auth_password, user_domain,
project_domain)
if not user_token:
raise ConfigFail(
"Unable to obtain keystone token for %s user. "
"Please ensure keystone configuration is correct."
% auth_user)
else:
# For subcloud configs we re-use the users from the system controller
# (the primary region).
for user in expected_users:
auth_user = user[USER_NAME]
user_id = users.get_user_id(auth_user)
auth_password = None
if user_id:
# Add the password to the region config so it will be used when
# configuring services.
auth_password = user_config.get_password(user[USER_NAME])
region_config.set(user[REGION_NAME],
user[USER_KEY] + '_PASSWORD',
auth_password)
else:
raise ConfigFail(
"Unable to obtain user (%s). Please ensure "
"keystone configuration is correct." % user[USER_NAME])
# Verify that the existing user can seek an auth token
user_token = rutils.get_token(auth_url, service_project, auth_user,
auth_password, user_domain,
project_domain)
if not user_token:
raise ConfigFail(
"Unable to obtain keystone token for %s user. "
"Please ensure keystone configuration is correct." %
auth_user)
# Verify that region two endpoints & services for shared services
# match our requirements, optionally creating missing entries
for endpoint in expected_region_1_endpoints:
service_name = region_config.get('SHARED_SERVICES',
endpoint[SERVICE_NAME])
service_type = region_config.get('SHARED_SERVICES',
endpoint[SERVICE_TYPE])
try:
service_id = services.get_service_id(service_name, service_type)
except KeystoneFail as ex:
# No option to create services for region one, if those are not
# present, something is seriously wrong
raise ex
# Extract region one url information from the existing endpoint entry:
try:
endpoints.get_service_url(
region_1_name, service_id, "public")
endpoints.get_service_url(
region_1_name, service_id, "internal")
endpoints.get_service_url(
region_1_name, service_id, "admin")
except KeystoneFail as ex:
# Fail since shared services endpoints are not found
raise ConfigFail("Endpoint for shared service %s "
"is not configured" % service_name)
# Verify that region two endpoints & services match our requirements,
# optionally creating missing entries
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
'CAN_IP_START_ADDRESS')
if not public_address:
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
'CAN_IP_FLOATING_ADDRESS')
if not public_address:
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
'IP_START_ADDRESS')
if not public_address:
# AIO-SX configuration
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
'IP_ADDRESS')
if not public_address:
public_address = region_config.get('OAM_NETWORK',
'IP_FLOATING_ADDRESS')
if region_config.has_section('CLM_NETWORK'):
internal_address = region_config.get('CLM_NETWORK',
'CLM_IP_START_ADDRESS')
else:
internal_address = region_config.get('MGMT_NETWORK',
'IP_START_ADDRESS')
for endpoint in expected_region_2_endpoints:
service_name = utils.get_service(region_config, 'REGION_2_SERVICES',
endpoint[SERVICE_NAME])
service_type = utils.get_service(region_config, 'REGION_2_SERVICES',
endpoint[SERVICE_TYPE])
service_id = services.get_service_id(service_name, service_type)
expected_public_url = endpoint[PUBLIC_URL].format(public_address)
expected_internal_url = endpoint[INTERNAL_URL].format(internal_address)
expected_admin_url = endpoint[ADMIN_URL].format(internal_address)
try:
public_url = endpoints.get_service_url(region_2_name, service_id,
"public")
internal_url = endpoints.get_service_url(region_2_name, service_id,
"internal")
admin_url = endpoints.get_service_url(region_2_name, service_id,
"admin")
except KeystoneFail as ex:
# The endpoint will be created optionally
if not create:
raise ConfigFail("Keystone configuration error: Unable to "
"find endpoints for service %s"
% service_name)
continue
# Validate the existing endpoints
for endpointtype, found, expected in [
('public', public_url, expected_public_url),
('internal', internal_url, expected_internal_url),
('admin', admin_url, expected_admin_url)]:
if found != expected:
raise ConfigFail(
"Keystone configuration error for:\nregion ({}), "
"service name ({}), service type ({})\n"
"expected {}: {}\nconfigured {}: {}".format(
region_2_name, service_name, service_type,
endpointtype, expected, endpointtype, found))
def validate_region_one_ldap_config(region_config):
"""Validate ldap on region one by a ldap search"""
ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL')
cmd = ["ldapsearch", "-xH", ldapserver_uri,
"-b", "dc=cgcs,dc=local", "(objectclass=*)"]
try:
with open(os.devnull, "w") as fnull:
subprocess.check_call(cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
raise ConfigFail("LDAP configuration error: not accessible")
def set_subcloud_config_defaults(region_config):
"""Set defaults in region_config for subclouds"""
# We always create endpoints for subclouds
region_config.set('REGION_2_SERVICES', 'CREATE', 'Y')
# We use the default service project
region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME',
constants.DEFAULT_SERVICE_PROJECT_NAME)
# Add the necessary users to the region config, which will allow the
# validation code to run and will later result in services being
# configured to use the users from the system controller.
expected_users = EXPECTED_USERS
for user in expected_users:
# Add the user to the region config so to allow validation.
region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME',
user[USER_NAME])
def configure_region(config_file, config_type=REGION_CONFIG):
"""Configure the region"""
# Parse the region/subcloud config file
print("Parsing configuration file... ", end=' ')
region_config = parse_system_config(config_file)
print("DONE")
if config_type == SUBCLOUD_CONFIG:
# Set defaults in region_config for subclouds
set_subcloud_config_defaults(region_config)
# Validate the region/subcloud config file
print("Validating configuration file... ", end=' ')
try:
create_cgcs_config_file(None, region_config, None, None, None,
config_type=config_type,
validate_only=True)
except configparser.Error as e:
raise ConfigFail("Error parsing configuration file %s: %s" %
(config_file, e))
print("DONE")
# Bring up management interface to allow us to reach Region 1
print("Configuring management interface... ", end=' ')
configure_management_interface(region_config, config_type=config_type)
print("DONE")
# Get token from keystone
print("Retrieving keystone token...", end=' ')
sys.stdout.flush()
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'):
auth_project = region_config.get('SHARED_SERVICES',
'ADMIN_TENANT_NAME')
else:
auth_project = region_config.get('SHARED_SERVICES',
'ADMIN_PROJECT_NAME')
auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME')
auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD')
if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'):
admin_user_domain = region_config.get('SHARED_SERVICES',
'ADMIN_USER_DOMAIN')
else:
admin_user_domain = DEFAULT_DOMAIN_NAME
if region_config.has_option('SHARED_SERVICES',
'ADMIN_PROJECT_DOMAIN'):
admin_project_domain = region_config.get('SHARED_SERVICES',
'ADMIN_PROJECT_DOMAIN')
else:
admin_project_domain = DEFAULT_DOMAIN_NAME
attempts = 0
token = None
# Wait for connectivity to region one. It can take some time, especially if
# we have LAG on the management network.
while not token:
token = rutils.get_token(auth_url, auth_project, auth_user,
auth_password, admin_user_domain,
admin_project_domain)
if not token:
attempts += 1
if attempts < 10:
print("\rRetrieving keystone token...{}".format(
'.' * attempts), end=' ')
sys.stdout.flush()
time.sleep(10)
else:
raise ConfigFail(
"Unable to obtain keystone token. Please ensure "
"networking and keystone configuration is correct.")
print("DONE")
# Get services, endpoints, users and domains from keystone
print("Retrieving services, endpoints and users from keystone... ",
end=' ')
region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
service_name = region_config.get('SHARED_SERVICES',
'KEYSTONE_SERVICE_NAME')
service_type = region_config.get('SHARED_SERVICES',
'KEYSTONE_SERVICE_TYPE')
api_url = token.get_service_url(
region_name, service_name, service_type, "admin").replace(
'v2.0', 'v3')
services = rutils.get_services(token, api_url)
endpoints = rutils.get_endpoints(token, api_url)
users = rutils.get_users(token, api_url)
domains = rutils.get_domains(token, api_url)
if not services or not endpoints or not users:
raise ConfigFail(
"Unable to retrieve services, endpoints or users from keystone. "
"Please ensure networking and keystone configuration is correct.")
print("DONE")
user_config = None
if config_type == SUBCLOUD_CONFIG:
# Retrieve subcloud configuration from dcmanager
print("Retrieving configuration from dcmanager... ", end=' ')
dcmanager_url = token.get_service_url(
'SystemController', 'dcmanager', 'dcmanager', "admin")
subcloud_name = region_config.get('REGION_2_SERVICES',
'REGION_NAME')
subcloud_management_subnet = region_config.get('MGMT_NETWORK',
'CIDR')
hash_string = subcloud_name + subcloud_management_subnet
subcloud_config = rutils.get_subcloud_config(token, dcmanager_url,
subcloud_name,
hash_string)
user_config = subcloud_config['users']
print("DONE")
try:
# Configure missing region one keystone entries
create = True
# Prepare region configuration for puppet to create keystone identities
if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and
region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
print("Preparing keystone configuration... ", end=' ')
# If keystone configuration for this region already in place,
# validate it only
else:
# Validate region one keystone config
create = False
print("Validating keystone configuration... ", end=' ')
validate_region_one_keystone_config(region_config, token, api_url,
users, services, endpoints, create,
config_type=config_type,
user_config=user_config)
print("DONE")
# validate ldap if it is shared
if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'):
print("Validating ldap configuration... ", end=' ')
validate_region_one_ldap_config(region_config)
print("DONE")
# Create cgcs_config file
print("Creating config apply file... ", end=' ')
try:
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config,
services, endpoints, domains,
config_type=config_type)
except configparser.Error as e:
raise ConfigFail("Error parsing configuration file %s: %s" %
(config_file, e))
print("DONE")
# Configure controller
assistant = ConfigAssistant()
assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False)
except ConfigFail as e:
print("A configuration failure has occurred.", end=' ')
raise e
def show_help_region():
print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
print(textwrap.fill(
"Perform region configuration using the region "
"configuration from CONFIG_FILE.", 80))
print("--allow-ssh Allow configuration to be executed in "
"ssh\n")
def show_help_subcloud():
print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
print(textwrap.fill(
"Perform subcloud configuration using the subcloud "
"configuration from CONFIG_FILE.", 80))
print("--allow-ssh Allow configuration to be executed in "
"ssh\n")
def config_main(config_type=REGION_CONFIG):
allow_ssh = False
if config_type == REGION_CONFIG:
config_file = "/home/sysadmin/region_config"
elif config_type == SUBCLOUD_CONFIG:
config_file = "/home/sysadmin/subcloud_config"
else:
raise ConfigFail("Invalid config_type: %s" % config_type)
arg = 1
while arg < len(sys.argv):
if sys.argv[arg] in ['--help', '-h', '-?']:
if config_type == REGION_CONFIG:
show_help_region()
else:
show_help_subcloud()
exit(1)
elif sys.argv[arg] == "--allow-ssh":
allow_ssh = True
elif arg == len(sys.argv) - 1:
config_file = sys.argv[arg]
else:
print("Invalid option. Use --help for more information.")
exit(1)
arg += 1
log.configure()
# Check if that the command is being run from the console
if utils.is_ssh_parent():
if allow_ssh:
print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80))
print('')
else:
print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80))
exit(1)
if not os.path.isfile(config_file):
print("Config file %s does not exist." % config_file)
exit(1)
try:
configure_region(config_file, config_type=config_type)
except KeyboardInterrupt:
print("\nAborting configuration")
except ConfigFail as e:
LOG.exception(e)
print("\nConfiguration failed: {}".format(e))
except Exception as e:
LOG.exception(e)
print("\nConfiguration failed: {}".format(e))
else:
print("\nConfiguration finished successfully.")
finally:
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
os.remove(TEMP_CGCS_CONFIG_FILE)
def region_main():
config_main(REGION_CONFIG)
def subcloud_main():
config_main(SUBCLOUD_CONFIG)

View File

@ -1,579 +0,0 @@
#
# Copyright (c) 2014-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
System Inventory Interactions
"""
import json
import openstack
from six.moves.urllib import request as urlrequest
from six.moves.urllib.error import URLError
from six.moves.urllib.error import HTTPError
from controllerconfig.common import log
from controllerconfig.common.exceptions import KeystoneFail
LOG = log.get_logger(__name__)
API_VERSION = 1
# Host Personality Constants
HOST_PERSONALITY_NOT_SET = ""
HOST_PERSONALITY_UNKNOWN = "unknown"
HOST_PERSONALITY_CONTROLLER = "controller"
HOST_PERSONALITY_WORKER = "worker"
HOST_PERSONALITY_STORAGE = "storage"
# Host Administrative State Constants
HOST_ADMIN_STATE_NOT_SET = ""
HOST_ADMIN_STATE_UNKNOWN = "unknown"
HOST_ADMIN_STATE_LOCKED = "locked"
HOST_ADMIN_STATE_UNLOCKED = "unlocked"
# Host Operational State Constants
HOST_OPERATIONAL_STATE_NOT_SET = ""
HOST_OPERATIONAL_STATE_UNKNOWN = "unknown"
HOST_OPERATIONAL_STATE_ENABLED = "enabled"
HOST_OPERATIONAL_STATE_DISABLED = "disabled"
# Host Availability State Constants
HOST_AVAIL_STATE_NOT_SET = ""
HOST_AVAIL_STATE_UNKNOWN = "unknown"
HOST_AVAIL_STATE_AVAILABLE = "available"
HOST_AVAIL_STATE_ONLINE = "online"
HOST_AVAIL_STATE_OFFLINE = "offline"
HOST_AVAIL_STATE_POWERED_OFF = "powered-off"
HOST_AVAIL_STATE_POWERED_ON = "powered-on"
# Host Board Management Constants
HOST_BM_TYPE_NOT_SET = ""
HOST_BM_TYPE_UNKNOWN = "unknown"
HOST_BM_TYPE_ILO3 = 'ilo3'
HOST_BM_TYPE_ILO4 = 'ilo4'
# Host invprovision state
HOST_PROVISIONING = "provisioning"
HOST_PROVISIONED = "provisioned"
class Host(object):
def __init__(self, hostname, host_data=None):
self.name = hostname
self.personality = HOST_PERSONALITY_NOT_SET
self.admin_state = HOST_ADMIN_STATE_NOT_SET
self.operational_state = HOST_OPERATIONAL_STATE_NOT_SET
self.avail_status = []
self.bm_type = HOST_BM_TYPE_NOT_SET
self.uuid = None
self.config_status = None
self.invprovision = None
self.boot_device = None
self.rootfs_device = None
self.console = None
self.tboot = None
if host_data is not None:
self.__host_set_state__(host_data)
def __host_set_state__(self, host_data):
if host_data is None:
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
self.avail_status = []
self.bm_type = HOST_BM_TYPE_NOT_SET
# Set personality
if host_data['personality'] == "controller":
self.personality = HOST_PERSONALITY_CONTROLLER
elif host_data['personality'] == "worker":
self.personality = HOST_PERSONALITY_WORKER
elif host_data['personality'] == "storage":
self.personality = HOST_PERSONALITY_STORAGE
else:
self.personality = HOST_PERSONALITY_UNKNOWN
# Set administrative state
if host_data['administrative'] == "locked":
self.admin_state = HOST_ADMIN_STATE_LOCKED
elif host_data['administrative'] == "unlocked":
self.admin_state = HOST_ADMIN_STATE_UNLOCKED
else:
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
# Set operational state
if host_data['operational'] == "enabled":
self.operational_state = HOST_OPERATIONAL_STATE_ENABLED
elif host_data['operational'] == "disabled":
self.operational_state = HOST_OPERATIONAL_STATE_DISABLED
else:
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
# Set availability status
self.avail_status[:] = []
if host_data['availability'] == "available":
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
elif host_data['availability'] == "online":
self.avail_status.append(HOST_AVAIL_STATE_ONLINE)
elif host_data['availability'] == "offline":
self.avail_status.append(HOST_AVAIL_STATE_OFFLINE)
elif host_data['availability'] == "power-on":
self.avail_status.append(HOST_AVAIL_STATE_POWERED_ON)
elif host_data['availability'] == "power-off":
self.avail_status.append(HOST_AVAIL_STATE_POWERED_OFF)
else:
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
# Set board management type
if host_data['bm_type'] is None:
self.bm_type = HOST_BM_TYPE_NOT_SET
elif host_data['bm_type'] == 'ilo3':
self.bm_type = HOST_BM_TYPE_ILO3
elif host_data['bm_type'] == 'ilo4':
self.bm_type = HOST_BM_TYPE_ILO4
else:
self.bm_type = HOST_BM_TYPE_UNKNOWN
if host_data['invprovision'] == 'provisioned':
self.invprovision = HOST_PROVISIONED
else:
self.invprovision = HOST_PROVISIONING
self.uuid = host_data['uuid']
self.config_status = host_data['config_status']
self.boot_device = host_data['boot_device']
self.rootfs_device = host_data['rootfs_device']
self.console = host_data['console']
self.tboot = host_data['tboot']
def __host_update__(self, admin_token, region_name):
try:
url = admin_token.get_service_admin_url("platform", "sysinv",
region_name)
url += "/ihosts/" + self.name
request_info = urlrequest.Request(url)
request_info.add_header("X-Auth-Token", admin_token.get_id())
request_info.add_header("Accept", "application/json")
request = urlrequest.urlopen(request_info)
response = json.loads(request.read())
request.close()
return response
except KeystoneFail as e:
LOG.error("Keystone authentication failed:{} ".format(e))
return None
except HTTPError as e:
LOG.error("%s, %s" % (e.code, e.read()))
if e.code == 401:
admin_token.set_expired()
return None
except URLError as e:
LOG.error(e)
return None
def __host_action__(self, admin_token, action, region_name):
try:
url = admin_token.get_service_admin_url("platform", "sysinv",
region_name)
url += "/ihosts/" + self.name
request_info = urlrequest.Request(url)
request_info.get_method = lambda: 'PATCH'
request_info.add_header("X-Auth-Token", admin_token.get_id())
request_info.add_header("Content-type", "application/json")
request_info.add_header("Accept", "application/json")
request_info.add_data(action)
request = urlrequest.urlopen(request_info)
request.close()
return True
except KeystoneFail as e:
LOG.error("Keystone authentication failed:{} ".format(e))
return False
except HTTPError as e:
LOG.error("%s, %s" % (e.code, e.read()))
if e.code == 401:
admin_token.set_expired()
return False
except URLError as e:
LOG.error(e)
return False
def is_unlocked(self):
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED)
def is_locked(self):
return(not self.is_unlocked())
def is_enabled(self):
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED)
def is_controller_enabled_provisioned(self):
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED and
self.personality == HOST_PERSONALITY_CONTROLLER and
self.invprovision == HOST_PROVISIONED)
def is_disabled(self):
return(not self.is_enabled())
def support_power_off(self):
return(HOST_BM_TYPE_NOT_SET != self.bm_type)
def is_powered_off(self):
for status in self.avail_status:
if status == HOST_AVAIL_STATE_POWERED_OFF:
return(self.admin_state == HOST_ADMIN_STATE_LOCKED and
self.operational_state ==
HOST_OPERATIONAL_STATE_DISABLED)
return False
def is_powered_on(self):
return not self.is_powered_off()
def refresh_data(self, admin_token, region_name):
""" Ask the System Inventory for an update view of the host """
host_data = self.__host_update__(admin_token, region_name)
self.__host_set_state__(host_data)
def lock(self, admin_token, region_name):
""" Asks the Platform to perform a lock against a host """
if self.is_unlocked():
action = json.dumps([{"path": "/action",
"value": "lock", "op": "replace"}])
return self.__host_action__(admin_token, action, region_name)
return True
def force_lock(self, admin_token, region_name):
""" Asks the Platform to perform a force lock against a host """
if self.is_unlocked():
action = json.dumps([{"path": "/action",
"value": "force-lock", "op": "replace"}])
return self.__host_action__(admin_token, action, region_name)
return True
def unlock(self, admin_token, region_name):
""" Asks the Platform to perform an ulock against a host """
if self.is_locked():
action = json.dumps([{"path": "/action",
"value": "unlock", "op": "replace"}])
return self.__host_action__(admin_token, action, region_name)
return True
def power_off(self, admin_token, region_name):
""" Asks the Platform to perform a power-off against a host """
if self.is_powered_on():
action = json.dumps([{"path": "/action",
"value": "power-off", "op": "replace"}])
return self.__host_action__(admin_token, action, region_name)
return True
def power_on(self, admin_token, region_name):
""" Asks the Platform to perform a power-on against a host """
if self.is_powered_off():
action = json.dumps([{"path": "/action",
"value": "power-on", "op": "replace"}])
return self.__host_action__(admin_token, action, region_name)
return True
def get_hosts(admin_token, region_name, personality=None,
exclude_hostnames=None):
""" Asks System Inventory for a list of hosts """
if exclude_hostnames is None:
exclude_hostnames = []
try:
url = admin_token.get_service_admin_url("platform", "sysinv",
region_name)
url += "/ihosts/"
request_info = urlrequest.Request(url)
request_info.add_header("X-Auth-Token", admin_token.get_id())
request_info.add_header("Accept", "application/json")
request = urlrequest.urlopen(request_info)
response = json.loads(request.read())
request.close()
host_list = []
if personality is None:
for host in response['ihosts']:
if host['hostname'] not in exclude_hostnames:
host_list.append(Host(host['hostname'], host))
else:
for host in response['ihosts']:
if host['hostname'] not in exclude_hostnames:
if (host['personality'] == "controller" and
personality == HOST_PERSONALITY_CONTROLLER):
host_list.append(Host(host['hostname'], host))
elif (host['personality'] == "worker" and
personality == HOST_PERSONALITY_WORKER):
host_list.append(Host(host['hostname'], host))
elif (host['personality'] == "storage" and
personality == HOST_PERSONALITY_STORAGE):
host_list.append(Host(host['hostname'], host))
return host_list
except KeystoneFail as e:
LOG.error("Keystone authentication failed:{} ".format(e))
return []
except HTTPError as e:
LOG.error("%s, %s" % (e.code, e.read()))
if e.code == 401:
admin_token.set_expired()
return []
except URLError as e:
LOG.error(e)
return []
def dict_to_patch(values, install_action=False):
# install default action
if install_action:
values.update({'action': 'install'})
patch = []
for key, value in values.items():
path = '/' + key
patch.append({'op': 'replace', 'path': path, 'value': value})
return patch
def get_shared_services():
try:
services = ""
with openstack.OpenStack() as client:
systems = client.sysinv.isystem.list()
if systems:
services = systems[0].capabilities.get("shared_services", "")
except Exception as e:
LOG.exception("failed to get shared services")
raise e
return services
def get_alarms():
""" get all alarms """
alarm_list = []
try:
with openstack.OpenStack() as client:
alarm_list = client.sysinv.ialarm.list()
except Exception as e:
LOG.exception("failed to get alarms")
raise e
return alarm_list
def controller_enabled_provisioned(hostname):
""" check if host is enabled """
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if (hostname == host.name and
host.is_controller_enabled_provisioned()):
LOG.info("host %s is enabled/provisioned" % host.name)
return True
except Exception as e:
LOG.exception("failed to check if host is enabled/provisioned")
raise e
return False
def get_system_uuid():
""" get system uuid """
try:
sysuuid = ""
with openstack.OpenStack() as client:
systems = client.sysinv.isystem.list()
if systems:
sysuuid = systems[0].uuid
except Exception as e:
LOG.exception("failed to get system uuid")
raise e
return sysuuid
def get_oam_ip():
""" get OAM ip details """
try:
with openstack.OpenStack() as client:
oam_list = client.sysinv.iextoam.list()
if oam_list:
return oam_list[0]
except Exception as e:
LOG.exception("failed to get OAM IP")
raise e
return None
def get_mac_addresses(hostname):
""" get MAC addresses for the host """
macs = {}
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
port_list = client.sysinv.ethernet_port.list(host.uuid)
macs = {port.name: port.mac for port in port_list}
except Exception as e:
LOG.exception("failed to get MAC addresses")
raise e
return macs
def get_disk_serial_ids(hostname):
""" get disk serial ids for the host """
disk_serial_ids = {}
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
disk_list = client.sysinv.idisk.list(host.uuid)
disk_serial_ids = {
disk.device_node: disk.serial_id for disk in disk_list}
except Exception as e:
LOG.exception("failed to get disks")
raise e
return disk_serial_ids
def update_clone_system(descr, hostname):
""" update system parameters on clone installation """
try:
with openstack.OpenStack() as client:
systems = client.sysinv.isystem.list()
if not systems:
return False
values = {
'name': "Cloned_system",
'description': descr
}
patch = dict_to_patch(values)
LOG.info("Updating system: {} [{}]".format(systems[0].name, patch))
client.sysinv.isystem.update(systems[0].uuid, patch)
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
values = {
'location': {},
'serialid': ""
}
patch = dict_to_patch(values)
client.sysinv.ihost.update(host.uuid, patch)
LOG.info("Updating host: {} [{}]".format(host, patch))
except Exception as e:
LOG.exception("failed to update system parameters")
raise e
return True
def get_config_status(hostname):
""" get config status of the host """
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
return host.config_status
except Exception as e:
LOG.exception("failed to get config status")
raise e
return None
def get_host_data(hostname):
""" get data for the specified host """
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
return host
except Exception as e:
LOG.exception("failed to get host data")
raise e
return None
def do_worker_config_complete(hostname):
""" enable worker functionality """
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if hostname == host.name:
# Create/apply worker manifests
values = {
'action': "subfunction_config"
}
patch = dict_to_patch(values)
LOG.info("Applying worker manifests: {} [{}]"
.format(host, patch))
client.sysinv.ihost.update(host.uuid, patch)
except Exception as e:
LOG.exception("worker_config_complete failed")
raise e
def get_storage_backend_services():
""" get all storage backends and their assigned services """
backend_service_dict = {}
try:
with openstack.OpenStack() as client:
backend_list = client.sysinv.storage_backend.list()
for backend in backend_list:
backend_service_dict.update(
{backend.backend: backend.services})
except Exception as e:
LOG.exception("failed to get storage backend services")
raise e
return backend_service_dict

View File

@ -1,499 +0,0 @@
"""
Copyright (c) 2015-2019 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from __future__ import print_function
from six.moves import configparser
import os
import readline
import sys
import textwrap
from controllerconfig.common import constants
from controllerconfig.common import log
from controllerconfig.common.exceptions import BackupFail
from controllerconfig.common.exceptions import RestoreFail
from controllerconfig.common.exceptions import UserQuit
from controllerconfig.common.exceptions import CloneFail
from controllerconfig import lag_mode_to_str
from controllerconfig import Network
from controllerconfig import validate
from controllerconfig import ConfigFail
from controllerconfig import DEFAULT_CONFIG
from controllerconfig import REGION_CONFIG
from controllerconfig import SUBCLOUD_CONFIG
from controllerconfig import MGMT_TYPE
from controllerconfig import HP_NAMES
from controllerconfig import DEFAULT_NAMES
from controllerconfig.configassistant import ConfigAssistant
from controllerconfig import backup_restore
from controllerconfig import utils
from controllerconfig import clone
# Temporary file for building cgcs_config
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
LOG = log.get_logger(__name__)
def parse_system_config(config_file):
"""Parse system config file"""
system_config = configparser.RawConfigParser()
try:
system_config.read(config_file)
except Exception as e:
LOG.exception(e)
raise ConfigFail("Error parsing system config file")
# Dump configuration for debugging
# for section in config.sections():
# print "Section: %s" % section
# for (name, value) in config.items(section):
# print "name: %s, value: %s" % (name, value)
return system_config
def configure_management_interface(region_config, config_type=REGION_CONFIG):
"""Bring up management interface
"""
mgmt_network = Network()
if region_config.has_section('CLM_NETWORK'):
naming_type = HP_NAMES
else:
naming_type = DEFAULT_NAMES
if config_type == SUBCLOUD_CONFIG:
min_addresses = 5
else:
min_addresses = 8
try:
mgmt_network.parse_config(region_config, config_type, MGMT_TYPE,
min_addresses=min_addresses,
naming_type=naming_type)
except ConfigFail:
raise
except Exception as e:
LOG.exception("Error parsing configuration file")
raise ConfigFail("Error parsing configuration file: %s" % e)
try:
# Remove interface config files currently installed
utils.remove_interface_config_files()
# Create the management interface configuration files.
# Code based on ConfigAssistant._write_interface_config_management
parameters = utils.get_interface_config_static(
mgmt_network.start_address,
mgmt_network.cidr,
mgmt_network.gateway_address)
if mgmt_network.logical_interface.lag_interface:
management_interface = 'bond0'
else:
management_interface = mgmt_network.logical_interface.ports[0]
if mgmt_network.vlan:
management_interface_name = "%s.%s" % (management_interface,
mgmt_network.vlan)
utils.write_interface_config_vlan(
management_interface_name,
mgmt_network.logical_interface.mtu,
parameters)
# underlying interface has no additional parameters
parameters = None
else:
management_interface_name = management_interface
if mgmt_network.logical_interface.lag_interface:
utils.write_interface_config_bond(
management_interface,
mgmt_network.logical_interface.mtu,
lag_mode_to_str(mgmt_network.logical_interface.lag_mode),
None,
constants.LAG_MIIMON_FREQUENCY,
mgmt_network.logical_interface.ports[0],
mgmt_network.logical_interface.ports[1],
parameters)
else:
utils.write_interface_config_ethernet(
management_interface,
mgmt_network.logical_interface.mtu,
parameters)
# Restart networking with the new management interface configuration
utils.restart_networking()
# Send a GARP for floating address. Doing this to help in
# cases where we are re-installing in a lab and another node
# previously held the floating address.
if mgmt_network.cidr.version == 4:
utils.send_interface_garp(management_interface_name,
mgmt_network.start_address)
except Exception:
LOG.exception("Failed to configure management interface")
raise ConfigFail("Failed to configure management interface")
def create_cgcs_config_file(output_file, system_config,
services, endpoints, domains,
config_type=REGION_CONFIG, validate_only=False):
"""
Create cgcs_config file or just perform validation of the system_config if
validate_only=True.
:param output_file: filename of output cgcs_config file
:param system_config: system configuration
:param services: keystone services (not used if validate_only)
:param endpoints: keystone endpoints (not used if validate_only)
:param domains: keystone domains (not used if validate_only)
:param config_type: specify region, subcloud or standard config
:param validate_only: used to validate the input system_config
:return:
"""
cgcs_config = None
if not validate_only:
cgcs_config = configparser.RawConfigParser()
cgcs_config.optionxform = str
# general error checking, if not validate_only cgcs config data is returned
validate(system_config, config_type, cgcs_config)
# Region configuration: services, endpoints and domain
if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only:
# The services and endpoints are not available in the validation phase
region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME')
keystone_service_name = system_config.get('SHARED_SERVICES',
'KEYSTONE_SERVICE_NAME')
keystone_service_type = system_config.get('SHARED_SERVICES',
'KEYSTONE_SERVICE_TYPE')
keystone_service_id = services.get_service_id(keystone_service_name,
keystone_service_type)
keystone_admin_url = endpoints.get_service_url(region_1_name,
keystone_service_id,
"admin")
keystone_internal_url = endpoints.get_service_url(region_1_name,
keystone_service_id,
"internal")
keystone_public_url = endpoints.get_service_url(region_1_name,
keystone_service_id,
"public")
cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url)
cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url)
cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url)
cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI',
keystone_internal_url)
cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url)
# if ldap is a shared service
if (system_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL')):
ldap_service_url = system_config.get('SHARED_SERVICES',
'LDAP_SERVICE_URL')
cgcs_config.set('cREGION', 'LDAP_SERVICE_URI', ldap_service_url)
cgcs_config.set('cREGION', 'LDAP_SERVICE_NAME', 'open-ldap')
cgcs_config.set('cREGION', 'LDAP_REGION_NAME', region_1_name)
# If primary region is non-TiC and keystone entries already created,
# the flag will tell puppet not to create them.
if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and
system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True')
# System Timezone configuration
if system_config.has_option('SYSTEM', 'TIMEZONE'):
timezone = system_config.get('SYSTEM', 'TIMEZONE')
if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone):
raise ConfigFail(
"Timezone file %s does not exist" % timezone)
# Dump results for debugging
# for section in cgcs_config.sections():
# print "[%s]" % section
# for (name, value) in cgcs_config.items(section):
# print "%s=%s" % (name, value)
if not validate_only:
# Write config file
with open(output_file, 'w') as config_file:
cgcs_config.write(config_file)
def configure_system(config_file):
"""Configure the system"""
# Parse the system config file
print("Parsing system configuration file... ", end=' ')
system_config = parse_system_config(config_file)
print("DONE")
# Validate the system config file
print("Validating system configuration file... ", end=' ')
try:
create_cgcs_config_file(None, system_config, None, None, None,
DEFAULT_CONFIG, validate_only=True)
except configparser.Error as e:
raise ConfigFail("Error parsing configuration file %s: %s" %
(config_file, e))
print("DONE")
# Create cgcs_config file
print("Creating config apply file... ", end=' ')
try:
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config,
None, None, None, DEFAULT_CONFIG)
except configparser.Error as e:
raise ConfigFail("Error parsing configuration file %s: %s" %
(config_file, e))
print("DONE")
def show_help():
print("Usage: %s\n"
"--backup <name> Backup configuration using the given "
"name\n"
"--clone-iso <name> Clone and create an image with "
"the given file name\n"
"--clone-status Status of the last installation of "
"cloned image\n"
"--restore-system "
"<include-storage-reinstall | exclude-storage-reinstall> "
"<name>\n"
" Restore system configuration from backup "
"file with\n"
" the given name, full path required\n"
% sys.argv[0])
def show_help_lab_only():
print("Usage: %s\n"
"Perform initial configuration\n"
"\nThe following options are for lab use only:\n"
"--answerfile <file> Apply the configuration from the specified "
"file without\n"
" any validation or user interaction\n"
"--default Apply default configuration with no NTP or "
"DNS server\n"
" configuration (suitable for testing in a "
"virtual\n"
" environment)\n"
"--archive-dir <dir> Directory to store the archive in\n"
"--provision Provision initial system data only\n"
% sys.argv[0])
def no_complete(text, state):
return
def main():
options = {}
answerfile = None
backup_name = None
archive_dir = constants.BACKUPS_PATH
do_default_config = False
do_backup = False
do_system_restore = False
include_storage_reinstall = False
do_clone = False
do_non_interactive = False
do_provision = False
system_config_file = "/home/sysadmin/system_config"
allow_ssh = False
# Disable completion as the default completer shows python commands
readline.set_completer(no_complete)
# remove any previous config fail flag file
if os.path.exists(constants.CONFIG_FAIL_FILE) is True:
os.remove(constants.CONFIG_FAIL_FILE)
if os.environ.get('CGCS_LABMODE'):
options['labmode'] = True
arg = 1
while arg < len(sys.argv):
if sys.argv[arg] == "--answerfile":
arg += 1
if arg < len(sys.argv):
answerfile = sys.argv[arg]
else:
print("--answerfile option requires a file to be specified")
exit(1)
elif sys.argv[arg] == "--backup":
arg += 1
if arg < len(sys.argv):
backup_name = sys.argv[arg]
else:
print("--backup requires the name of the backup")
exit(1)
do_backup = True
elif sys.argv[arg] == "--restore-system":
arg += 1
if arg < len(sys.argv):
if sys.argv[arg] in ["include-storage-reinstall",
"exclude-storage-reinstall"]:
if sys.argv[arg] == "include-storage-reinstall":
include_storage_reinstall = True
arg += 1
if arg < len(sys.argv):
backup_name = sys.argv[arg]
else:
print(textwrap.fill(
"--restore-system requires the filename "
" of the backup", 80))
exit(1)
else:
backup_name = sys.argv[arg]
else:
print(textwrap.fill(
"--restore-system requires the filename "
"of the backup", 80))
exit(1)
do_system_restore = True
elif sys.argv[arg] == "--archive-dir":
arg += 1
if arg < len(sys.argv):
archive_dir = sys.argv[arg]
else:
print("--archive-dir requires a directory")
exit(1)
elif sys.argv[arg] == "--clone-iso":
arg += 1
if arg < len(sys.argv):
backup_name = sys.argv[arg]
else:
print("--clone-iso requires the name of the image")
exit(1)
do_clone = True
elif sys.argv[arg] == "--clone-status":
clone.clone_status()
exit(0)
elif sys.argv[arg] == "--default":
do_default_config = True
elif sys.argv[arg] == "--config-file":
arg += 1
if arg < len(sys.argv):
system_config_file = sys.argv[arg]
else:
print("--config-file requires the filename of the config file")
exit(1)
do_non_interactive = True
elif sys.argv[arg] in ["--help", "-h", "-?"]:
show_help()
exit(1)
elif sys.argv[arg] == "--labhelp":
show_help_lab_only()
exit(1)
elif sys.argv[arg] == "--provision":
do_provision = True
elif sys.argv[arg] == "--allow-ssh":
allow_ssh = True
elif sys.argv[arg] == "--kubernetes":
# This is a temporary flag for use during development. Once things
# are stable, we will remove it and make kubernetes the default.
options['kubernetes'] = True
else:
print("Invalid option. Use --help for more information.")
exit(1)
arg += 1
if [do_backup,
do_system_restore,
do_clone,
do_default_config,
do_non_interactive].count(True) > 1:
print("Invalid combination of options selected")
exit(1)
if answerfile and [do_backup,
do_system_restore,
do_clone,
do_default_config,
do_non_interactive].count(True) > 0:
print("The --answerfile option cannot be used with the selected "
"option")
exit(1)
log.configure()
if not do_backup and not do_clone:
# Check if that the command is being run from the console
if utils.is_ssh_parent():
if allow_ssh:
print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80))
print('')
else:
print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80))
exit(1)
# Reduce the printk console log level to avoid noise during configuration
printk_levels = ''
with open('/proc/sys/kernel/printk', 'r') as f:
printk_levels = f.readline()
temp_printk_levels = '3' + printk_levels[1:]
with open('/proc/sys/kernel/printk', 'w') as f:
f.write(temp_printk_levels)
try:
if do_backup:
backup_restore.backup(backup_name, archive_dir)
print("\nBackup complete")
elif do_system_restore:
backup_restore.restore_system(backup_name,
include_storage_reinstall)
print("\nSystem restore complete")
elif do_clone:
clone.clone(backup_name, archive_dir)
print("\nCloning complete")
elif do_provision:
assistant = ConfigAssistant(**options)
assistant.provision(answerfile)
else:
print(textwrap.fill(
"Please use bootstrap playbook to configure the "
"first controller.", 80))
exit(1)
if do_non_interactive:
if not os.path.isfile(system_config_file):
raise ConfigFail("Config file %s does not exist." %
system_config_file)
if (os.path.exists(constants.CGCS_CONFIG_FILE) or
os.path.exists(constants.CONFIG_PERMDIR) or
os.path.exists(
constants.INITIAL_CONFIG_COMPLETE_FILE)):
raise ConfigFail("Configuration has already been done "
"and cannot be repeated.")
configure_system(system_config_file)
answerfile = TEMP_CGCS_CONFIG_FILE
assistant = ConfigAssistant(**options)
assistant.configure(answerfile, do_default_config)
print("\nConfiguration was applied\n")
print(textwrap.fill(
"Please complete any out of service commissioning steps "
"with system commands and unlock controller to proceed.", 80))
assistant.check_required_interfaces_status()
except KeyboardInterrupt:
print("\nAborting configuration")
except BackupFail as e:
print("\nBackup failed: {}".format(e))
except RestoreFail as e:
print("\nRestore failed: {}".format(e))
except ConfigFail as e:
print("\nConfiguration failed: {}".format(e))
except CloneFail as e:
print("\nCloning failed: {}".format(e))
except UserQuit:
print("\nAborted configuration")
finally:
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
os.remove(TEMP_CGCS_CONFIG_FILE)
# Restore the printk console log level
with open('/proc/sys/kernel/printk', 'w') as f:
f.write(printk_levels)

View File

@ -1,78 +0,0 @@
[SYSTEM]
SYSTEM_MODE=duplex
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_3]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth2
[MGMT_NETWORK]
VLAN=121
IP_START_ADDRESS=192.168.204.102
IP_END_ADDRESS=192.168.204.199
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
DYNAMIC_ALLOCATION=N
[OAM_NETWORK]
;VLAN=
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.99
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
[REGION2_PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_USER_DOMAIN=admin_domain
ADMIN_PROJECT_DOMAIN=admin_domain
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=FULL_TEST
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
USER_DOMAIN_NAME=service_domain
PROJECT_DOMAIN_NAME=service_domain
SYSINV_USER_NAME=sysinvTWO
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patchingTWO
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vimTWO
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtceTWO
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fmTWO
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,78 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXEBOOT_SUBNET = 192.168.203.0/24
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = no
MANAGEMENT_INTERFACE = eth0
MANAGEMENT_VLAN = 121
MANAGEMENT_INTERFACE_NAME = eth0.121
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = eth1
EXTERNAL_OAM_INTERFACE_NAME = eth1
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = admin_domain
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = admin_domain
SERVICE_PROJECT_NAME = FULL_TEST
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patchingTWO
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinvTWO
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vimTWO
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtceTWO
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fmTWO
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = service_domain
PROJECT_DOMAIN_NAME = service_domain
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1,77 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
[STORAGE]
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_3]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth2
[MGMT_NETWORK]
VLAN=121
IP_START_ADDRESS=192.168.204.102
IP_END_ADDRESS=192.168.204.199
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
DYNAMIC_ALLOCATION=N
[OAM_NETWORK]
;VLAN=
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.99
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
[REGION2_PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=FULL_TEST
LDAP_SERVICE_URL=ldap://192.168.204.12:389
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinvTWO
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patchingTWO
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vimTWO
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtceTWO
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fmTWO
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,81 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXEBOOT_SUBNET = 192.168.203.0/24
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = no
MANAGEMENT_INTERFACE = eth0
MANAGEMENT_VLAN = 121
MANAGEMENT_INTERFACE_NAME = eth0.121
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = eth1
EXTERNAL_OAM_INTERFACE_NAME = eth1
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = Default
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = Default
SERVICE_PROJECT_NAME = FULL_TEST
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patchingTWO
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinvTWO
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vimTWO
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtceTWO
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fmTWO
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
LDAP_SERVICE_URI = ldap://192.168.204.12:389
LDAP_SERVICE_NAME = open-ldap
LDAP_REGION_NAME = RegionOne
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1 +0,0 @@
# Dummy certificate file

View File

@ -1,62 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=192.168.204.0/24
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
CONTROLLER_0_ADDRESS=192.168.204.3
CONTROLLER_1_ADDRESS=192.168.204.4
NFS_MANAGEMENT_ADDRESS_1=192.168.204.7
NFS_MANAGEMENT_ADDRESS_2=192.168.204.8
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=10.10.10.0/24
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=False
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,62 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=192.168.204.0/24
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
CONTROLLER_0_ADDRESS=192.168.204.3
CONTROLLER_1_ADDRESS=192.168.204.4
NFS_MANAGEMENT_ADDRESS_1=192.168.204.5
NFS_MANAGEMENT_ADDRESS_2=192.168.204.6
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=10.10.10.0/24
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=False
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,62 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=1234::/64
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=1234::2
CONTROLLER_0_ADDRESS=1234::3
CONTROLLER_1_ADDRESS=1234::4
NFS_MANAGEMENT_ADDRESS_1=1234::5
NFS_MANAGEMENT_ADDRESS_2=1234::6
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_MULTICAST_SUBNET=ff08::1:1:0/124
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=abcd::/64
EXTERNAL_OAM_GATEWAY_ADDRESS=abcd::1
EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2
EXTERNAL_OAM_0_ADDRESS=abcd::3
EXTERNAL_OAM_1_ADDRESS=abcd::4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=False
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,76 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=192.168.204.0/24
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
CONTROLLER_0_ADDRESS=192.168.204.3
CONTROLLER_1_ADDRESS=192.168.204.4
NFS_MANAGEMENT_ADDRESS_1=192.168.204.5
NFS_MANAGEMENT_ADDRESS_2=192.168.204.6
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=10.10.10.0/24
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=1.2.3.4
NAMESERVER_2=5.6.7.8
NAMESERVER_3=NC
[cDOCKER_PROXY]
# Docker Proxy Configuration
DOCKER_HTTP_PROXY=http://proxy.com:123
DOCKER_HTTPS_PROXY=https://proxy.com:123
DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2
[cDOCKER_REGISTRY]
# Docker Registry Configuration
DOCKER_K8S_REGISTRY=my.registry.com:5000
DOCKER_GCR_REGISTRY=my.registry.com
DOCKER_QUAY_REGISTRY=1.2.3.4:5000
DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000
IS_SECURE_REGISTRY=False
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=False
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,94 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=192.168.204.0/24
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
CONTROLLER_0_ADDRESS=192.168.204.103
CONTROLLER_1_ADDRESS=192.168.204.104
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_START_ADDRESS=192.168.204.102
MANAGEMENT_END_ADDRESS=192.168.204.199
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=10.10.10.0/24
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=True
REGION_1_NAME=RegionOne
REGION_2_NAME=RegionTwo
ADMIN_USER_NAME=admin
ADMIN_USER_DOMAIN=Default
ADMIN_PROJECT_NAME=admin
ADMIN_PROJECT_DOMAIN=Default
SERVICE_PROJECT_NAME=service
SERVICE_USER_DOMAIN=Default
SERVICE_PROJECT_DOMAIN=Default
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,94 +0,0 @@
[cSYSTEM]
# System Configuration
SYSTEM_MODE=duplex
TIMEZONE=UTC
[cPXEBOOT]
# PXEBoot Network Support Configuration
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
[cMGMT]
# Management Network Configuration
MANAGEMENT_INTERFACE_NAME=eth1
MANAGEMENT_INTERFACE=eth1
MANAGEMENT_MTU=1500
MANAGEMENT_SUBNET=192.168.204.0/24
LAG_MANAGEMENT_INTERFACE=no
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
CONTROLLER_0_ADDRESS=192.168.204.103
CONTROLLER_1_ADDRESS=192.168.204.104
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
CONTROLLER_FLOATING_HOSTNAME=controller
CONTROLLER_HOSTNAME_PREFIX=controller-
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
DYNAMIC_ADDRESS_ALLOCATION=yes
MANAGEMENT_START_ADDRESS=192.168.204.102
MANAGEMENT_END_ADDRESS=192.168.204.199
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
[cCLUSTER]
# Cluster Host Network Configuration
CLUSTER_INTERFACE_NAME=eth1
CLUSTER_INTERFACE=eth1
CLUSTER_VLAN=NC
CLUSTER_MTU=1500
CLUSTER_SUBNET=192.168.206.0/24
LAG_CLUSTER_INTERFACE=no
[cEXT_OAM]
# External OAM Network Configuration
EXTERNAL_OAM_INTERFACE_NAME=eth0
EXTERNAL_OAM_INTERFACE=eth0
EXTERNAL_OAM_VLAN=NC
EXTERNAL_OAM_MTU=1500
LAG_EXTERNAL_OAM_INTERFACE=no
EXTERNAL_OAM_SUBNET=10.10.10.0/24
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
[cDNS]
# DNS Configuration
NAMESERVER_1=8.8.8.8
NAMESERVER_2=8.8.4.4
NAMESERVER_3=NC
[cSECURITY]
[cREGION]
# Region Configuration
REGION_CONFIG=True
REGION_1_NAME=RegionOne
REGION_2_NAME=RegionTwo
ADMIN_USER_NAME=admin
ADMIN_USER_DOMAIN=Default
ADMIN_PROJECT_NAME=admin
ADMIN_PROJECT_DOMAIN=Default
SERVICE_PROJECT_NAME=service
SERVICE_USER_DOMAIN=Default
SERVICE_PROJECT_DOMAIN=Default
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[cAUTHENTICATION]
ADMIN_PASSWORD=Li69nux*

View File

@ -1,72 +0,0 @@
[SYSTEM]
SYSTEM_MODE=duplex
TIMEZONE=UTC
[STORAGE]
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=Y
LAG_MODE=4
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1,eth2
[CLM_NETWORK]
CLM_VLAN=123
CLM_IP_START_ADDRESS=192.168.204.102
CLM_IP_END_ADDRESS=192.168.204.199
CLM_CIDR=192.168.204.0/24
CLM_MULTICAST_CIDR=239.1.1.0/28
CLM_GATEWAY=192.168.204.12
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CAN_NETWORK]
CAN_VLAN=125
CAN_IP_START_ADDRESS=10.10.10.2
CAN_IP_END_ADDRESS=10.10.10.4
CAN_CIDR=10.10.10.0/24
;CAN_GATEWAY=10.10.10.1
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[REGION2_PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=service
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,82 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXEBOOT_SUBNET = 192.168.203.0/24
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = yes
MANAGEMENT_BOND_MEMBER_0 = eth1
MANAGEMENT_BOND_MEMBER_1 = eth2
MANAGEMENT_BOND_POLICY = 802.3ad
MANAGEMENT_INTERFACE = bond0
MANAGEMENT_VLAN = 123
MANAGEMENT_INTERFACE_NAME = bond0.123
MANAGEMENT_GATEWAY_ADDRESS = 192.168.204.12
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = bond0
EXTERNAL_OAM_VLAN = 125
EXTERNAL_OAM_INTERFACE_NAME = bond0.125
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = Default
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = Default
SERVICE_PROJECT_NAME = service
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patching
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinv
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vim
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtce
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1,81 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
[STORAGE]
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[CLM_NETWORK]
;CLM_VLAN=123
CLM_IP_START_ADDRESS=192.168.204.102
CLM_IP_END_ADDRESS=192.168.204.199
CLM_CIDR=192.168.204.0/24
CLM_MULTICAST_CIDR=239.1.1.0/28
;CLM_GATEWAY=192.168.204.12
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CAN_NETWORK]
;CAN_VLAN=
CAN_IP_START_ADDRESS=10.10.10.2
CAN_IP_END_ADDRESS=10.10.10.4
CAN_CIDR=10.10.10.0/24
CAN_GATEWAY=10.10.10.1
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[REGION2_PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
[NETWORK]
VSWITCH_TYPE=nuage_vrs
METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=service
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,73 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = no
MANAGEMENT_INTERFACE = eth1
MANAGEMENT_INTERFACE_NAME = eth1
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = eth0
EXTERNAL_OAM_INTERFACE_NAME = eth0
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = Default
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = Default
SERVICE_PROJECT_NAME = service
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patching
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinv
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vim
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtce
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1,77 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
[STORAGE]
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[CLM_NETWORK]
;CLM_VLAN=123
CLM_IP_START_ADDRESS=192.168.204.102
CLM_IP_END_ADDRESS=192.168.204.199
CLM_CIDR=192.168.204.0/24
CLM_MULTICAST_CIDR=239.1.1.0/28
;CLM_GATEWAY=192.168.204.12
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CAN_NETWORK]
;CAN_VLAN=
CAN_IP_START_ADDRESS=10.10.10.2
CAN_IP_END_ADDRESS=10.10.10.4
CAN_CIDR=10.10.10.0/24
CAN_GATEWAY=10.10.10.1
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[REGION2_PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=service
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,73 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = no
MANAGEMENT_INTERFACE = eth1
MANAGEMENT_INTERFACE_NAME = eth1
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = eth0
EXTERNAL_OAM_INTERFACE_NAME = eth0
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = Default
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = Default
SERVICE_PROJECT_NAME = service
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patching
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinv
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vim
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtce
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1,77 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
[STORAGE]
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[CLM_NETWORK]
;CLM_VLAN=123
CLM_IP_START_ADDRESS=192.168.204.102
CLM_IP_END_ADDRESS=192.168.204.199
CLM_CIDR=192.168.204.0/24
CLM_MULTICAST_CIDR=239.1.1.0/28
;CLM_GATEWAY=192.168.204.12
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CAN_NETWORK]
;CAN_VLAN=
CAN_IP_START_ADDRESS=10.10.10.2
CAN_IP_END_ADDRESS=10.10.10.4
CAN_CIDR=10.10.10.0/24
CAN_GATEWAY=10.10.10.1
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[REGION2_PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=service
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,78 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
[STORAGE]
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[CLM_NETWORK]
;CLM_VLAN=123
CLM_IP_START_ADDRESS=192.168.204.102
CLM_IP_END_ADDRESS=192.168.204.199
CLM_CIDR=192.168.204.0/24
CLM_MULTICAST_CIDR=239.1.1.0/28
;CLM_GATEWAY=192.168.204.12
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CAN_NETWORK]
;CAN_VLAN=
CAN_IP_FLOATING_ADDRESS=10.10.10.2
CAN_IP_UNIT_0_ADDRESS=10.10.10.3
CAN_IP_UNIT_1_ADDRESS=10.10.10.4
CAN_CIDR=10.10.10.0/24
CAN_GATEWAY=10.10.10.1
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[REGION2_PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
[SHARED_SERVICES]
REGION_NAME=RegionOne
ADMIN_PROJECT_NAME=admin
ADMIN_USER_NAME=admin
ADMIN_PASSWORD=Li69nux*
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_SERVICE_NAME=keystone
KEYSTONE_SERVICE_TYPE=identity
SERVICE_PROJECT_NAME=service
[REGION_2_SERVICES]
REGION_NAME=RegionTwo
SYSINV_USER_NAME=sysinv
SYSINV_PASSWORD=password2WO*
SYSINV_SERVICE_NAME=sysinv
SYSINV_SERVICE_TYPE=platform
PATCHING_USER_NAME=patching
PATCHING_PASSWORD=password2WO*
PATCHING_SERVICE_NAME=patching
PATCHING_SERVICE_TYPE=patching
NFV_USER_NAME=vim
NFV_PASSWORD=password2WO*
MTCE_USER_NAME=mtce
MTCE_PASSWORD=password2WO*
FM_USER_NAME=fm
FM_PASSWORD=password2WO*
BARBICAN_USER_NAME=barbican
BARBICAN_PASSWORD=barbican2WO*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,73 +0,0 @@
[cSYSTEM]
TIMEZONE = UTC
SYSTEM_MODE = duplex
[cPXEBOOT]
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
[cMGMT]
MANAGEMENT_MTU = 1500
MANAGEMENT_SUBNET = 192.168.204.0/24
LAG_MANAGEMENT_INTERFACE = no
MANAGEMENT_INTERFACE = eth1
MANAGEMENT_INTERFACE_NAME = eth1
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
CONTROLLER_0_ADDRESS = 192.168.204.103
CONTROLLER_1_ADDRESS = 192.168.204.104
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
CONTROLLER_FLOATING_HOSTNAME = controller
CONTROLLER_HOSTNAME_PREFIX = controller-
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
DYNAMIC_ADDRESS_ALLOCATION = no
MANAGEMENT_START_ADDRESS = 192.168.204.102
MANAGEMENT_END_ADDRESS = 192.168.204.199
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
[cEXT_OAM]
EXTERNAL_OAM_MTU = 1500
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
LAG_EXTERNAL_OAM_INTERFACE = no
EXTERNAL_OAM_INTERFACE = eth0
EXTERNAL_OAM_INTERFACE_NAME = eth0
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
[cREGION]
REGION_CONFIG = True
REGION_1_NAME = RegionOne
REGION_2_NAME = RegionTwo
ADMIN_USER_NAME = admin
ADMIN_USER_DOMAIN = Default
ADMIN_PROJECT_NAME = admin
ADMIN_PROJECT_DOMAIN = Default
SERVICE_PROJECT_NAME = service
KEYSTONE_SERVICE_NAME = keystone
KEYSTONE_SERVICE_TYPE = identity
PATCHING_USER_NAME = patching
PATCHING_PASSWORD = password2WO*
SYSINV_USER_NAME = sysinv
SYSINV_PASSWORD = password2WO*
SYSINV_SERVICE_NAME = sysinv
SYSINV_SERVICE_TYPE = platform
NFV_USER_NAME = vim
NFV_PASSWORD = password2WO*
MTCE_USER_NAME = mtce
MTCE_PASSWORD = password2WO*
FM_USER_NAME = fm
FM_PASSWORD = password2WO*
BARBICAN_USER_NAME = barbican
BARBICAN_PASSWORD = barbican2WO*
USER_DOMAIN_NAME = Default
PROJECT_DOMAIN_NAME = Default
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
[cAUTHENTICATION]
ADMIN_PASSWORD = Li69nux*

View File

@ -1,55 +0,0 @@
[SYSTEM]
SYSTEM_MODE = duplex
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
;VLAN=123
IP_START_ADDRESS=192.168.204.2
IP_END_ADDRESS=192.168.204.99
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
DYNAMIC_ALLOCATION=Y
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.4
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,53 +0,0 @@
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
VLAN=123
CIDR=1234::/64
MULTICAST_CIDR=ff08::1:1:0/124
DYNAMIC_ALLOCATION=Y
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
;IP_START_ADDRESS=abcd::2
;IP_END_ADDRESS=abcd::4
IP_FLOATING_ADDRESS=abcd::2
IP_UNIT_0_ADDRESS=abcd::3
IP_UNIT_1_ADDRESS=abcd::4
CIDR=abcd::/64
GATEWAY=abcd::1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
[PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,70 +0,0 @@
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
INTERFACE_MTU=1500
INTERFACE_LINK_CAPACITY=1000
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
;INTERFACE_LINK_CAPACITY=
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
;VLAN=123
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
DYNAMIC_ALLOCATION=Y
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CLUSTER_NETWORK]
CIDR=192.168.206.0/24
DYNAMIC_ALLOCATION=Y
IP_START_ADDRESS=192.168.206.2
IP_END_ADDRESS=192.168.206.245
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
;IP_START_ADDRESS=10.10.10.2
;IP_END_ADDRESS=10.10.10.4
IP_FLOATING_ADDRESS=10.10.10.20
IP_UNIT_0_ADDRESS=10.10.10.30
IP_UNIT_1_ADDRESS=10.10.10.40
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
[DNS]
# DNS Configuration
NAMESERVER_1=1.2.3.4
NAMESERVER_2=5.6.7.8
[DOCKER_PROXY]
# Docker Proxy Configuration
DOCKER_HTTP_PROXY=http://proxy.com:123
DOCKER_HTTPS_PROXY=https://proxy.com:123
DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2
[DOCKER_REGISTRY]
# Docker Registry Configuration
DOCKER_K8S_REGISTRY=my.registry.com:5000
DOCKER_GCR_REGISTRY=my.registry.com
DOCKER_QUAY_REGISTRY=1.2.3.4:5000
DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000
IS_SECURE_REGISTRY=False
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,55 +0,0 @@
[SYSTEM]
SYSTEM_MODE=duplex
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=Y
LAG_MODE=4
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1,eth2
[MGMT_NETWORK]
VLAN=123
IP_START_ADDRESS=192.168.204.102
IP_END_ADDRESS=192.168.204.199
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[CLUSTER_NETWORK]
VLAN=126
IP_START_ADDRESS=192.168.206.102
IP_END_ADDRESS=192.168.206.199
CIDR=192.168.206.0/24
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
VLAN=125
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.4
CIDR=10.10.10.0/24
;GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,49 +0,0 @@
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
INTERFACE_MTU=1500
INTERFACE_LINK_CAPACITY=1000
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
;INTERFACE_LINK_CAPACITY=
INTERFACE_PORTS=eth0
[PXEBOOT_NETWORK]
PXEBOOT_CIDR=192.168.102.0/24
IP_START_ADDRESS=192.168.102.32
IP_END_ADDRESS=192.168.102.54
[MGMT_NETWORK]
VLAN=123
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
DYNAMIC_ALLOCATION=Y
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
;IP_START_ADDRESS=10.10.10.2
;IP_END_ADDRESS=10.10.10.4
IP_FLOATING_ADDRESS=10.10.10.20
IP_UNIT_0_ADDRESS=10.10.10.30
IP_UNIT_1_ADDRESS=10.10.10.40
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,51 +0,0 @@
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
;VLAN=123
IP_START_ADDRESS=192.168.204.102
IP_END_ADDRESS=192.168.204.199
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.4
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
[BOARD_MANAGEMENT_NETWORK]
VLAN=1
MTU=1496
SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,63 +0,0 @@
;[DNS]
;NAMESERVER_1=8.8.8.8
;NAMESERVER_2=8.8.4.4
;NAMESERVER_3=
;[NTP]
;NTP_SERVER_1=0.pool.ntp.org
;NTP_SERVER_2=1.pool.ntp.org
;NTP_SERVER_3=2.pool.ntp.org
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
;VLAN=123
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
DYNAMIC_ALLOCATION=Y
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
;IP_START_ADDRESS=10.10.10.2
;IP_END_ADDRESS=10.10.10.4
IP_FLOATING_ADDRESS=10.10.10.20
IP_UNIT_0_ADDRESS=10.10.10.30
IP_UNIT_1_ADDRESS=10.10.10.40
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,46 +0,0 @@
;[DNS]
;NAMESERVER_1=8.8.8.8
;NAMESERVER_2=8.8.4.4
;NAMESERVER_3=
;[NTP]
;NTP_SERVER_1=0.pool.ntp.org
;NTP_SERVER_2=1.pool.ntp.org
;NTP_SERVER_3=2.pool.ntp.org
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[OAM_NETWORK]
IP_ADDRESS=10.10.10.20
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION
[SYSTEM]
SYSTEM_TYPE=All-in-one
SYSTEM_MODE=simplex

View File

@ -1,24 +0,0 @@
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
CIDR=192.168.42.0/28
[OAM_NETWORK]
IP_ADDRESS=10.10.10.20
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION
[SYSTEM]
SYSTEM_TYPE=All-in-one
SYSTEM_MODE=simplex

View File

@ -1,52 +0,0 @@
;LOGICAL_INTERFACE_<number>
; LAG_INTERFACE <Y/N>
; LAG_MODE One of 1) Active-backup policy
; 2) Balanced XOR policy
; 4) 802.3ad (LACP) policy
; Interface for pxebooting can only be LACP
; INTERFACE_MTU <mtu size>
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
[LOGICAL_INTERFACE_1]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth1
[LOGICAL_INTERFACE_2]
LAG_INTERFACE=N
;LAG_MODE=
INTERFACE_MTU=1500
INTERFACE_PORTS=eth0
[MGMT_NETWORK]
;VLAN=123
IP_START_ADDRESS=192.168.204.20
IP_END_ADDRESS=192.168.204.99
CIDR=192.168.204.0/24
MULTICAST_CIDR=239.1.1.0/28
DYNAMIC_ALLOCATION=N
;GATEWAY=192.168.204.12
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
[OAM_NETWORK]
;VLAN=
IP_START_ADDRESS=10.10.10.2
IP_END_ADDRESS=10.10.10.4
CIDR=10.10.10.0/24
GATEWAY=10.10.10.1
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
;[PXEBOOT_NETWORK]
;PXEBOOT_CIDR=192.168.203.0/24
;[BOARD_MANAGEMENT_NETWORK]
;VLAN=1
;MTU=1496
;SUBNET=192.168.203.0/24
[AUTHENTICATION]
ADMIN_PASSWORD=Li69nux*
[VERSION]
RELEASE = TEST.SW.VERSION

View File

@ -1,103 +0,0 @@
"""
Copyright (c) 2014 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
import difflib
import filecmp
import os
from mock import patch
import controllerconfig.configassistant as ca
import controllerconfig.common.constants as constants
@patch('controllerconfig.configassistant.get_rootfs_node')
@patch('controllerconfig.configassistant.get_net_device_list')
def _test_answerfile(tmpdir, filename,
mock_get_net_device_list,
mock_get_rootfs_node,
compare_results=True,
ca_options={}):
""" Test import and generation of answerfile """
mock_get_net_device_list.return_value = \
['eth0', 'eth1', 'eth2']
mock_get_rootfs_node.return_value = '/dev/sda'
assistant = ca.ConfigAssistant(**ca_options)
# Create the path to the answerfile
answerfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", filename)
# Input the config from the answerfile
assistant.input_config_from_file(answerfile)
# Test the display method
print("Output from display_config:")
assistant.display_config()
# Ensure we can write the configuration
constants.CONFIG_WORKDIR = os.path.join(str(tmpdir), 'config_workdir')
constants.CGCS_CONFIG_FILE = os.path.join(constants.CONFIG_WORKDIR,
'cgcs_config')
assistant.write_config_file()
# Add the password to the generated file so it can be compared with the
# answerfile
with open(constants.CGCS_CONFIG_FILE, 'a') as f:
f.write("\n[cAUTHENTICATION]\nADMIN_PASSWORD=Li69nux*\n")
# Do a diff between the answerfile and the generated config file
print("\n\nDiff of answerfile vs. generated config file:\n")
with open(answerfile) as a, open(constants.CGCS_CONFIG_FILE) as b:
a_lines = a.readlines()
b_lines = b.readlines()
differ = difflib.Differ()
diff = differ.compare(a_lines, b_lines)
print(''.join(diff))
if compare_results:
# Fail the testcase if the answerfile and generated config file don't
# match.
assert filecmp.cmp(answerfile, constants.CGCS_CONFIG_FILE)
def test_answerfile_default(tmpdir):
""" Test import of answerfile with default values """
_test_answerfile(tmpdir, "cgcs_config.default")
def test_answerfile_ipv6(tmpdir):
""" Test import of answerfile with ipv6 oam values """
_test_answerfile(tmpdir, "cgcs_config.ipv6")
def test_answerfile_ceph(tmpdir):
""" Test import of answerfile with ceph backend values """
_test_answerfile(tmpdir, "cgcs_config.ceph")
def test_answerfile_region(tmpdir):
""" Test import of answerfile with region values """
_test_answerfile(tmpdir, "cgcs_config.region")
def test_answerfile_region_nuage_vrs(tmpdir):
""" Test import of answerfile with region values for nuage_vrs"""
_test_answerfile(tmpdir, "cgcs_config.region_nuage_vrs")
def test_answerfile_kubernetes(tmpdir):
""" Test import of answerfile with kubernetes values """
_test_answerfile(tmpdir, "cgcs_config.kubernetes",
ca_options={"kubernetes": True})

View File

@ -1,759 +0,0 @@
"""
Copyright (c) 2014-2019 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from __future__ import print_function
from six.moves import configparser
import difflib
import filecmp
import fileinput
import mock
from mock import patch
import os
import pytest
import shutil
import sys
import controllerconfig.common.exceptions as exceptions
from controllerconfig import REGION_CONFIG
from controllerconfig import validate
import controllerconfig.common.keystone as keystone
from controllerconfig.tests import test_answerfile
sys.modules['fm_core'] = mock.Mock()
import controllerconfig.systemconfig as cr # noqa: E402
FAKE_SERVICE_DATA = {u'services': [
{u'type': u'keystore', u'description': u'Barbican Key Management Service',
u'enabled': True, u'id': u'9029af23540f4eecb0b7f70ac5e00152',
u'name': u'barbican'},
{u'type': u'network', u'description': u'OpenStack Networking service',
u'enabled': True, u'id': u'85a8a3342a644df193af4b68d5b65ce5',
u'name': u'neutron'}, {u'type': u'cloudformation',
u'description':
u'OpenStack Cloudformation Service',
u'enabled': True,
u'id': u'abbf431acb6d45919cfbefe55a0f27fa',
u'name': u'heat-cfn'},
{u'type': u'object-store', u'description': u'OpenStack object-store',
u'enabled': True, u'id': u'd588956f759f4bbda9e65a1019902b9c',
u'name': u'swift'},
{u'type': u'volumev2',
u'description': u'OpenStack Volume Service v2.0 API',
u'enabled': True, u'id': u'e6e356112daa4af588d9b9dadcf98bc4',
u'name': u'cinderv2'},
{u'type': u'volume', u'description': u'OpenStack Volume Service',
u'enabled': True, u'id': u'505aa37457774e55b545654aa8630822',
u'name': u'cinder'}, {u'type': u'orchestration',
u'description': u'OpenStack Orchestration Service',
u'enabled': True,
u'id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'name': u'heat'},
{u'type': u'compute', u'description': u'OpenStack Compute Service',
u'enabled': True, u'id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'name': u'nova'},
{u'type': u'identity', u'description': u'OpenStack Identity',
u'enabled': True, u'id': u'1fe7b1de187b47228fe853fbbd149664',
u'name': u'keystone'},
{u'type': u'image', u'description': u'OpenStack Image Service',
u'enabled': True, u'id': u'd41750c98a864fdfb25c751b4ad84996',
u'name': u'glance'},
{u'type': u'database', u'description': u'Trove Database As A Service',
u'enabled': True, u'id': u'82265e39a77b4097bd8aee4f78e13867',
u'name': u'trove'},
{u'type': u'patching', u'description': u'Patching Service',
u'enabled': True, u'id': u'8515c4f28f9346199eb8704bca4f5db4',
u'name': u'patching'},
{u'type': u'platform', u'description': u'SysInv Service', u'enabled': True,
u'id': u'08758bed8d894ddaae744a97db1080b3', u'name': u'sysinv'},
{u'type': u'computev3', u'description': u'Openstack Compute Service v3',
u'enabled': True, u'id': u'959f2214543a47549ffd8c66f98d27d4',
u'name': u'novav3'}]}
FAKE_ENDPOINT_DATA = {u'endpoints': [
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'505aa37457774e55b545654aa8630822',
u'id': u'de19beb4a4924aa1ba25af3ee64e80a0',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'505aa37457774e55b545654aa8630822',
u'id': u'de19beb4a4924aa1ba25af3ee64e80a1',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8776/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'505aa37457774e55b545654aa8630822',
u'id': u'de19beb4a4924aa1ba25af3ee64e80a2',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'373259a6bbcf493b86c9f9530e86d323',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'373259a6bbcf493b86c9f9530e86d324',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8774/v2/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'373259a6bbcf493b86c9f9530e86d324',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'c51dc9354b5a41c9883ec3871b9fd271',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'c51dc9354b5a41c9883ec3871b9fd272',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8004/v1/%(tenant_id)s',
u'region': u'RegionTwo', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'c51dc9354b5a41c9883ec3871b9fd273',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
u'enabled': True, u'interface': u'admin',
u'id': u'e132bb9dd0fe459687c3b04074bcb1ac',
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
u'enabled': True, u'interface': u'internal',
u'id': u'e132bb9dd0fe459687c3b04074bcb1ad',
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
{u'url': u'http://10.10.10.2:8000/v1', u'region': u'RegionOne',
u'enabled': True, u'interface': u'public',
u'id': u'e132bb9dd0fe459687c3b04074bcb1ae',
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
u'id': u'031bfbfd581f4a42b361f93fdc4fe266',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
u'id': u'031bfbfd581f4a42b361f93fdc4fe267',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8774/v3', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
u'id': u'031bfbfd581f4a42b361f93fdc4fe268',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8081/keystone/admin/v2.0',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
u'id': u'6fa36df1cc4f4e97a1c12767c8a1159f',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8081/keystone/main/v2.0',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
u'id': u'6fa36df1cc4f4e97a1c12767c8a11510',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8081/keystone/main/v2.0',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
u'id': u'6fa36df1cc4f4e97a1c12767c8a11512',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'74a7a918dd854b66bb33f1e4e0e768bc',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'74a7a918dd854b66bb33f1e4e0e768bd',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:9696/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'74a7a918dd854b66bb33f1e4e0e768be',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
u'id': u'd8ae3a69f08046d1a8f031bbd65381a4',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:6385/v1', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'08758bed8d894ddaae744a97db1080b5',
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'61ad227efa3b4cdd867618041a7064dc',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'61ad227efa3b4cdd867618041a7064dd',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8004/v1/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
u'id': u'61ad227efa3b4cdd867618041a7064de',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8888/v1', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
u'id': u'be557ddb742e46328159749a21e6e286',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8888/v1/AUTH_$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
u'id': u'be557ddb742e46328159749a21e6e287',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:8888/v1/AUTH_$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
u'id': u'be557ddb742e46328159749a21e6e288',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'050d07db8c5041288f29020079177f0b',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'050d07db8c5041288f29020079177f0c',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8777', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'050d07db8c5041288f29020079177f0d',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
u'id': u'53af565e4d7245929df7af2ba0ff46db',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
u'id': u'53af565e4d7245929df7af2ba0ff46dc',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:5491', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
u'id': u'53af565e4d7245929df7af2ba0ff46dd',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8779/v1.0/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10a',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10b',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:9292/v2', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10c',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10a',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10b',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:9292/v2', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
u'id': u'06fdb367cb63414987ee1653a016d10c',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'f15d22a9526648ff8833460e2dce1431',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'f15d22a9526648ff8833460e2dce1432',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:8777/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
u'id': u'f15d22a9526648ff8833460e2dce1433',
u'interface': u'public'},
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
u'id': u'5e6c6ffdbcd544f8838430937a0d81a7',
u'interface': u'admin'},
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
u'id': u'5e6c6ffdbcd544f8838430937a0d81a8',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8000/v1/', u'region': u'RegionTwo',
u'enabled': True,
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
u'id': u'5e6c6ffdbcd544f8838430937a0d81a9',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'87dc648502ee49fb86a4ca87d8d6028d',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'87dc648502ee49fb86a4ca87d8d6028e',
u'interface': u'internal'},
{u'url': u'http://10.10.10.2:8774/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
u'id': u'87dc648502ee49fb86a4ca87d8d6028f',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'd326bf63f6f94b12924b03ff42ba63bd',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'd326bf63f6f94b12924b03ff42ba63be',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:9696/', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
u'id': u'd326bf63f6f94b12924b03ff42ba63bf',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
u'id': u'61b8bb77edf644f1ad4edf9b953d44c7',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
u'id': u'61b8bb77edf644f1ad4edf9b953d44c8',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:8776/v2/$(tenant_id)s',
u'region': u'RegionOne', u'enabled': True,
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
u'id': u'61b8bb77edf644f1ad4edf9b953d44c9',
u'interface': u'public'},
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
u'id': u'a1aa2af22caf460eb421d75ab1ce6125',
u'interface': u'admin'},
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
u'id': u'a1aa2af22caf460eb421d75ab1ce6126',
u'interface': u'internal'},
{u'url': u'http://10.10.10.12:9312/v1', u'region': u'RegionOne',
u'enabled': True,
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
u'id': u'a1aa2af22caf460eb421d75ab1ce6127',
u'interface': u'public'}]}
FAKE_DOMAIN_DATA = {u'domains': [
{u'id': u'default', u'enabled': True,
u'description':
u'Owns users and tenants (i.e. projects) available on Identity API '
u'v2.',
u'links': {
u'self':
u'http://192.168.204.12:8081/keystone/main/v3/domains/default'},
u'name': u'Default'},
{u'id': u'05d847889e9a4cb9aa94f541eb6b9e2e',
u'enabled': True,
u'description': u'Contains users and projects created by heat',
u'links': {
u'self':
u'http://192.168.204.12:8081/keystone/main/v3/domains/'
u'05d847889e9a4cb9aa94f541eb6b9e2e'},
u'name': u'heat'}],
u'links': {
u'self': u'http://192.168.204.12:8081/keystone/main/v3/domains',
u'next': None,
u'previous': None}}
def _dump_config(config):
""" Prints contents of config object """
for section in config.sections():
print("[%s]" % section)
for (name, value) in config.items(section):
print("%s=%s" % (name, value))
def _replace_in_file(filename, old, new):
""" Replaces old with new in file filename. """
for line in fileinput.FileInput(filename, inplace=1):
line = line.replace(old, new)
print(line, end='')
fileinput.close()
@patch('controllerconfig.configassistant.ConfigAssistant.get_sysadmin_sig')
def _test_region_config(tmpdir, inputfile, resultfile,
mock_get_sysadmin_sig):
""" Test import and generation of answerfile """
mock_get_sysadmin_sig.return_value = None
# Create the path to the output file
outputfile = os.path.join(str(tmpdir), 'output')
# Parse the region_config file
region_config = cr.parse_system_config(inputfile)
# Dump results for debugging
print("Parsed region_config:\n")
_dump_config(region_config)
# Validate the region config file
cr.create_cgcs_config_file(outputfile, region_config,
keystone.ServiceList(FAKE_SERVICE_DATA),
keystone.EndpointList(FAKE_ENDPOINT_DATA),
keystone.DomainList(FAKE_DOMAIN_DATA))
# Make a local copy of the results file
local_resultfile = os.path.join(str(tmpdir), 'result')
shutil.copyfile(resultfile, local_resultfile)
# Do a diff between the output and the expected results
print("\n\nDiff of output file vs. expected results file:\n")
with open(outputfile) as a, open(local_resultfile) as b:
a_lines = a.readlines()
b_lines = b.readlines()
differ = difflib.Differ()
diff = differ.compare(a_lines, b_lines)
print(''.join(diff))
# Fail the testcase if the output doesn't match the expected results
assert filecmp.cmp(outputfile, local_resultfile)
# Now test that configassistant can parse this answerfile. We can't
# compare the resulting cgcs_config file because the ordering, spacing
# and comments are different between the answerfile generated by
# systemconfig and ConfigAssistant.
test_answerfile._test_answerfile(tmpdir, outputfile, compare_results=False)
# Validate the region config file.
# Using onboard validation since the validator's reference version number
# is only set at build-time when validating offboard
validate(region_config, REGION_CONFIG, None, False)
def test_region_config_simple(tmpdir):
""" Test import of simple region_config file """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.simple")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.simple.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_simple_can_ips(tmpdir):
""" Test import of simple region_config file with unit ips for CAN """
print("IN TEST ################################################")
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.simple.can_ips")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.simple.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_lag_vlan(tmpdir):
""" Test import of region_config file with lag and vlan """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.lag.vlan")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.lag.vlan.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_security(tmpdir):
""" Test import of region_config file with security config """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.security")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.security.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_nuage_vrs(tmpdir):
""" Test import of region_config file with nuage vrs config """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.nuage_vrs")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"region_config.nuage_vrs.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_share_keystone_only(tmpdir):
""" Test import of Titanium Cloud region_config file with
shared keystone """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"TiS_region_config.share.keystoneonly")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"TiS_region_config.share.keystoneonly.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_share_keystone_glance_cinder(tmpdir):
""" Test import of Titanium Cloud region_config file with shared keystone,
glance and cinder """
regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"TiS_region_config.shareall")
resultfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"TiS_region_config.shareall.result")
_test_region_config(tmpdir, regionfile, resultfile)
def test_region_config_validation():
""" Test detection of various errors in region_config file """
# Create the path to the region_config files
simple_regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "region_config.simple")
lag_vlan_regionfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "region_config.lag.vlan")
# Test detection of non-required CINDER_* parameters
region_config = cr.parse_system_config(simple_regionfile)
region_config.set('STORAGE', 'CINDER_BACKEND', 'lvm')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, True)
region_config = cr.parse_system_config(simple_regionfile)
region_config.set('STORAGE', 'CINDER_DEVICE',
'/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
region_config = cr.parse_system_config(simple_regionfile)
region_config.set('STORAGE', 'CINDER_STORAGE', '10')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test detection of an invalid PXEBOOT_CIDR
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
'FD00::0000/64')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
'192.168.1.0/29')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
region_config.remove_option('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
with pytest.raises(configparser.NoOptionError):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(configparser.NoOptionError):
validate(region_config, REGION_CONFIG, None, False)
# Test overlap of CLM_CIDR
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('CLM_NETWORK', 'CLM_CIDR', '192.168.203.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test invalid CLM LAG_MODE
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test CLM_VLAN not allowed
region_config = cr.parse_system_config(simple_regionfile)
region_config.set('CLM_NETWORK', 'CLM_VLAN', '123')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test CLM_VLAN missing
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.remove_option('CLM_NETWORK', 'CLM_VLAN')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test overlap of CAN_CIDR
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.203.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.204.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.205.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test invalid CAN LAG_MODE
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.add_section('LOGICAL_INTERFACE_2')
region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
region_config.set('CAN_NETWORK', 'CAN_LOGICAL_INTERFACE',
'LOGICAL_INTERFACE_2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test CAN_VLAN overlap
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('CAN_NETWORK', 'CAN_VLAN', '123')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test CAN_VLAN missing
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.remove_option('CAN_NETWORK', 'CAN_VLAN')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test missing gateway
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.remove_option('CLM_NETWORK', 'CLM_GATEWAY')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)
# Test two gateways
region_config = cr.parse_system_config(lag_vlan_regionfile)
region_config.set('CAN_NETWORK', 'CAN_GATEWAY', '10.10.10.1')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, region_config, None, None, None,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(region_config, REGION_CONFIG, None, False)

View File

@ -1,601 +0,0 @@
"""
Copyright (c) 2014-2019 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from six.moves import configparser
import mock
import os
import pytest
import sys
import controllerconfig.common.exceptions as exceptions
from controllerconfig import validate
from controllerconfig import DEFAULT_CONFIG
sys.modules['fm_core'] = mock.Mock()
import controllerconfig.systemconfig as cr # noqa: E402
def _dump_config(config):
""" Prints contents of config object """
for section in config.sections():
print("[%s]" % section)
for (name, value) in config.items(section):
print("%s=%s" % (name, value))
def _test_system_config(filename):
""" Test import and generation of answerfile """
# Parse the system_config file
system_config = cr.parse_system_config(filename)
# Dump results for debugging
print("Parsed system_config:\n")
_dump_config(system_config)
# Validate the system config file
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
# Validate the system config file.
# Using onboard validation since the validator's reference version number
# is only set at build-time when validating offboard
validate(system_config, DEFAULT_CONFIG, None, False)
def test_system_config_simple():
""" Test import of simple system_config file """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
_test_system_config(systemfile)
def test_system_config_ipv6():
""" Test import of system_config file with ipv6 oam """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
_test_system_config(systemfile)
def test_system_config_lag_vlan():
""" Test import of system_config file with lag and vlan """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
_test_system_config(systemfile)
def test_system_config_security():
""" Test import of system_config file with security config """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.security")
_test_system_config(systemfile)
def test_system_config_ceph():
""" Test import of system_config file with ceph config """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
_test_system_config(systemfile)
def test_system_config_simplex():
""" Test import of system_config file for AIO-simplex """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.simplex")
_test_system_config(systemfile)
def test_system_config_simplex_mgmt():
""" Test import of system_config file for AIO-simplex with management
configuration"""
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"system_config.simplex_mgmt")
_test_system_config(systemfile)
# Test MGMT_NETWORK parameters that are not allowed
system_config = cr.parse_system_config(systemfile)
system_config.set('MGMT_NETWORK', 'GATEWAY', '192.168.42.1')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config = cr.parse_system_config(systemfile)
system_config.set('MGMT_NETWORK', 'LOGICAL_INTERFACE',
'LOGICAL_INTERFACE_1')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test overlap with OAM network
system_config = cr.parse_system_config(systemfile)
system_config.set('MGMT_NETWORK', 'CIDR', '10.10.10.0/24')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test IPv6 management CIDR (not supported)
system_config = cr.parse_system_config(systemfile)
system_config.set('MGMT_NETWORK', 'CIDR', 'FD01::0000/64')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test management CIDR that is too small
system_config = cr.parse_system_config(systemfile)
system_config.set('MGMT_NETWORK', 'CIDR', '192.168.42.0/29')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
def test_system_config_validation():
""" Test detection of various errors in system_config file """
# Create the path to the system_config files
simple_systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
ipv6_systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
lag_vlan_systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
ceph_systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
static_addr_systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"system_config.static_addr")
# Test floating outside of OAM_NETWORK CIDR
system_config = cr.parse_system_config(ipv6_systemfile)
system_config.set('OAM_NETWORK', 'IP_FLOATING_ADDRESS', '5555::5')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test non-ipv6 unit address
system_config = cr.parse_system_config(ipv6_systemfile)
system_config.set('OAM_NETWORK', 'IP_UNIT_0_ADDRESS', '10.10.10.3')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test missing pxeboot network when using IPv6 management network
system_config = cr.parse_system_config(ipv6_systemfile)
system_config.remove_section('PXEBOOT_NETWORK')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test ridiculously sized management network
system_config = cr.parse_system_config(ipv6_systemfile)
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '1234::b:0:0:0')
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS',
'1234::b:ffff:ffff:ffff')
system_config.remove_option('MGMT_NETWORK', 'IP_FLOATING_ADDRESS')
system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_0_ADDRESS')
system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_1_ADDRESS')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test using start/end addresses
system_config = cr.parse_system_config(ipv6_systemfile)
system_config.set('OAM_NETWORK', 'IP_START_ADDRESS', 'abcd::2')
system_config.set('OAM_NETWORK', 'IP_END_ADDRESS', 'abcd::4')
system_config.remove_option('OAM_NETWORK', 'IP_FLOATING_ADDRESS')
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_0_ADDRESS')
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_1_ADDRESS')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of an invalid PXEBOOT_CIDR
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
'FD00::0000/64')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
'192.168.1.0/29')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.remove_option('PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
with pytest.raises(configparser.NoOptionError):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(configparser.NoOptionError):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test overlap of MGMT_NETWORK CIDR
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('MGMT_NETWORK', 'CIDR', '192.168.203.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test invalid MGMT_NETWORK LAG_MODE
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK VLAN not allowed
system_config = cr.parse_system_config(simple_systemfile)
system_config.set('MGMT_NETWORK', 'VLAN', '123')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK VLAN missing
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.remove_option('MGMT_NETWORK', 'VLAN')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK start address specified without end address
system_config = cr.parse_system_config(simple_systemfile)
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK end address specified without start address
system_config = cr.parse_system_config(simple_systemfile)
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK start and end range does not have enough addresses
system_config = cr.parse_system_config(static_addr_systemfile)
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.8')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK start address not in subnet
system_config = cr.parse_system_config(simple_systemfile)
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.200.2')
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.254')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test MGMT_NETWORK end address not in subnet
system_config = cr.parse_system_config(simple_systemfile)
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.214.254')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test overlap of CLUSTER_NETWORK CIDR
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.203.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.204.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test invalid CLUSTER_NETWORK LAG_MODE
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.add_section('LOGICAL_INTERFACE_2')
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
system_config.set('CLUSTER_NETWORK', 'LOGICAL_INTERFACE',
'LOGICAL_INTERFACE_2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test CLUSTER_NETWORK VLAN overlap
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('CLUSTER_NETWORK', 'VLAN', '123')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test overlap of OAM_NETWORK CIDR
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('OAM_NETWORK', 'CIDR', '192.168.203.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.set('OAM_NETWORK', 'CIDR', '192.168.204.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.set('OAM_NETWORK', 'CIDR', '192.168.205.0/26')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test invalid OAM_NETWORK LAG_MODE
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.add_section('LOGICAL_INTERFACE_2')
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
system_config.set('OAM_NETWORK', 'LOGICAL_INTERFACE',
'LOGICAL_INTERFACE_2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test OAM_NETWORK VLAN overlap
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('OAM_NETWORK', 'VLAN', '123')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
system_config.set('OAM_NETWORK', 'VLAN', '126')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test OAM_NETWORK VLAN missing
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.remove_option('OAM_NETWORK', 'VLAN')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test missing gateway
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.remove_option('MGMT_NETWORK', 'GATEWAY')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test two gateways
system_config = cr.parse_system_config(lag_vlan_systemfile)
system_config.set('OAM_NETWORK', 'GATEWAY', '10.10.10.1')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of unsupported NTP NTP_SERVER
system_config = cr.parse_system_config(simple_systemfile)
system_config.add_section('NTP')
system_config.set('NTP', 'NTP_SERVER_1', '0.pool.ntp.org')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
# Test detection of overspecification of MGMT network addresses
system_config = cr.parse_system_config(ceph_systemfile)
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '192.168.204.3')
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '192.168.204.6')
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '192.168.204.9')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of overspecification of OAM network addresses
system_config = cr.parse_system_config(ceph_systemfile)
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '10.10.10.2')
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '10.10.10.3')
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '10.10.10.4')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of invalid release version
system_config = cr.parse_system_config(ceph_systemfile)
system_config.set('VERSION', 'RELEASE', '15.12')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
def test_pxeboot_range():
""" Test import of system_config file for PXEBoot network address """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/", "system_config.pxeboot")
# Test import and generation of answer file
_test_system_config(systemfile)
# Test detection of invalid PXEBoot network start address
system_config = cr.parse_system_config(systemfile)
system_config.set('PXEBOOT_NETWORK', 'IP_START_ADDRESS', '8.123.122.345')
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of invalid PXEBoot network end address
system_config = cr.parse_system_config(systemfile)
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.345')
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of smaller PXEBoot network end address
system_config = cr.parse_system_config(systemfile)
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '192.168.102.30')
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of PXEBoot network range less than min required (8)
system_config = cr.parse_system_config(systemfile)
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.34')
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
def test_kubernetes():
""" Test import of system_config file for kubernetes """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"system_config.kubernetes")
# Test import and generation of answer file
_test_system_config(systemfile)
# Test CLUSTER_NETWORK start address specified without end address
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test CLUSTER_NETWORK end address specified without start address
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of overspecification of CLUSTER network addresses
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_FLOATING_ADDRESS',
'192.168.206.103')
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_0_ADDRESS',
'192.168.206.106')
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_1_ADDRESS',
'192.168.206.109')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional DNS configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DNS')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional docker proxy configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DOCKER_PROXY')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional docker registry configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DOCKER_REGISTRY')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)

View File

@ -19,11 +19,12 @@ from cinderclient.v3 import client as cinder_client_v3
from glanceclient import Client from glanceclient import Client
from cinderclient import utils as c_utils from cinderclient import utils as c_utils
from controllerconfig.common import log
from controllerconfig.common.rest_api_utils import get_token from controllerconfig.common.rest_api_utils import get_token
from controllerconfig.common.exceptions import TidyStorageFail from controllerconfig.common.exceptions import TidyStorageFail
LOG = log.get_logger(__name__) from oslo_log import log
LOG = log.getLogger(__name__)
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60 KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
@ -565,8 +566,6 @@ def main():
show_help() show_help()
exit(1) exit(1)
log.configure()
result_file = sys.argv[1] result_file = sys.argv[1]
try: try:

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2016-2019 Wind River Systems, Inc. # Copyright (c) 2016-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -42,13 +42,12 @@ from tsconfig.tsconfig import CONTROLLER_UPGRADE_STARTED_FLAG
from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG
from controllerconfig.common import constants from controllerconfig.common import constants
from controllerconfig.common import log
from controllerconfig import utils as cutils from controllerconfig import utils as cutils
from controllerconfig import backup_restore
from controllerconfig.upgrades import utils from controllerconfig.upgrades import utils
LOG = log.get_logger(__name__) from oslo_log import log
LOG = log.getLogger(__name__)
POSTGRES_MOUNT_PATH = '/mnt/postgresql' POSTGRES_MOUNT_PATH = '/mnt/postgresql'
POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump' POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump'
@ -865,8 +864,6 @@ def main():
exit(1) exit(1)
arg += 1 arg += 1
log.configure()
if not from_release or not to_release: if not from_release or not to_release:
print("Both the FROM_RELEASE and TO_RELEASE must be specified") print("Both the FROM_RELEASE and TO_RELEASE must be specified")
exit(1) exit(1)
@ -955,9 +952,10 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release):
extract_relative_directory(archive, 'config/ssh_config', extract_relative_directory(archive, 'config/ssh_config',
tmp_config_path + '/ssh_config') tmp_config_path + '/ssh_config')
# TODO: Switch this over to use Ansible
# Restore certificate files if they are in the archive # Restore certificate files if they are in the archive
backup_restore.restore_etc_ssl_dir(archive, # backup_restore.restore_etc_ssl_dir(archive,
configpath=tmp_config_path) # configpath=tmp_config_path)
# Extract etc files # Extract etc files
archive.extract('etc/hostname', '/') archive.extract('etc/hostname', '/')
@ -975,11 +973,12 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release):
path = 'config/' + file path = 'config/' + file
extract_relative_file(archive, path, tmp_config_path) extract_relative_file(archive, path, tmp_config_path)
# TODO: Switch this over to use Ansible
# Extract distributed cloud addn_hosts file if present in archive. # Extract distributed cloud addn_hosts file if present in archive.
if backup_restore.file_exists_in_archive( # if backup_restore.file_exists_in_archive(
archive, 'config/dnsmasq.addn_hosts_dc'): # archive, 'config/dnsmasq.addn_hosts_dc'):
extract_relative_file( # extract_relative_file(
archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path) # archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path)
def extract_postgres_data(archive): def extract_postgres_data(archive):
@ -1114,7 +1113,8 @@ def upgrade_controller_simplex(backup_file):
to_release = metadata['upgrade']['to_release'] to_release = metadata['upgrade']['to_release']
check_load_version(to_release) check_load_version(to_release)
backup_restore.check_load_subfunctions(archive, staging_dir) # TODO: Switch this over to use Ansible
# backup_restore.check_load_subfunctions(archive, staging_dir)
# Patching is potentially a multi-phase step. # Patching is potentially a multi-phase step.
# If the controller is impacted by patches from the backup, # If the controller is impacted by patches from the backup,
@ -1271,7 +1271,8 @@ def upgrade_controller_simplex(backup_file):
LOG.info("Generating manifests for %s" % LOG.info("Generating manifests for %s" %
sysinv_constants.CONTROLLER_0_HOSTNAME) sysinv_constants.CONTROLLER_0_HOSTNAME)
backup_restore.configure_loopback_interface(archive) # TODO: Switch this over to use Ansible
# backup_restore.configure_loopback_interface(archive)
print_log_info("Creating configs...") print_log_info("Creating configs...")
cutils.create_system_config() cutils.create_system_config()
@ -1301,10 +1302,10 @@ def upgrade_controller_simplex(backup_file):
cutils.apply_banner_customization() cutils.apply_banner_customization()
backup_restore.restore_ldap(archive, backup_restore.ldap_permdir, # TODO: Switch this over to use Ansible
staging_dir) # backup_restore.restore_ldap(archive, backup_restore.ldap_permdir,
# staging_dir)
backup_restore.restore_std_dir(archive, backup_restore.home_permdir) # backup_restore.restore_std_dir(archive, backup_restore.home_permdir)
archive.close() archive.close()
shutil.rmtree(staging_dir, ignore_errors=True) shutil.rmtree(staging_dir, ignore_errors=True)
@ -1352,8 +1353,6 @@ def simplex_main():
exit(1) exit(1)
arg += 1 arg += 1
log.configure()
# Enforce that the command is being run from the console # Enforce that the command is being run from the console
if cutils.is_ssh_parent(): if cutils.is_ssh_parent():
print ( print (

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2015-2019 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -15,13 +15,13 @@ import subprocess
import tsconfig.tsconfig as tsc import tsconfig.tsconfig as tsc
from controllerconfig import backup_restore
from controllerconfig.common import log
from controllerconfig.common import constants from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants from sysinv.common import constants as sysinv_constants
from controllerconfig.upgrades import utils from controllerconfig.upgrades import utils
LOG = log.get_logger(__name__) from oslo_log import log
LOG = log.getLogger(__name__)
def get_upgrade_databases(shared_services): def get_upgrade_databases(shared_services):
@ -197,8 +197,9 @@ def create_simplex_backup(software_upgrade):
with open(metadata_filename, 'w') as metadata_file: with open(metadata_filename, 'w') as metadata_file:
metadata_file.write(json_data) metadata_file.write(json_data)
backup_filename = get_upgrade_backup_filename(software_upgrade) # TODO: Switch this over to use Ansible
backup_restore.backup(backup_filename, constants.BACKUPS_PATH) # backup_filename = get_upgrade_backup_filename(software_upgrade)
# backup_restore.backup(backup_filename, constants.BACKUPS_PATH)
LOG.info("Create simplex backup complete") LOG.info("Create simplex backup complete")

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2016-2019 Wind River Systems, Inc. # Copyright (c) 2016-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -21,14 +21,13 @@ import yaml
from tsconfig.tsconfig import SW_VERSION from tsconfig.tsconfig import SW_VERSION
from tsconfig.tsconfig import PLATFORM_PATH from tsconfig.tsconfig import PLATFORM_PATH
from controllerconfig import DEFAULT_DOMAIN_NAME
from controllerconfig import utils as cutils from controllerconfig import utils as cutils
from controllerconfig.common import log
from controllerconfig.common import constants from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants from sysinv.common import constants as sysinv_constants
from oslo_log import log
LOG = log.get_logger(__name__) LOG = log.getLogger(__name__)
POSTGRES_PATH = '/var/lib/postgresql' POSTGRES_PATH = '/var/lib/postgresql'
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION) POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
@ -36,6 +35,9 @@ RABBIT_PATH = '/var/lib/rabbitmq'
CONTROLLER_1_HOSTNAME = "controller-1" CONTROLLER_1_HOSTNAME = "controller-1"
DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n" DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n"
# well-known default domain name
DEFAULT_DOMAIN_NAME = 'Default'
# Migration script actions # Migration script actions
ACTION_START = "start" ACTION_START = "start"
ACTION_MIGRATE = "migrate" ACTION_MIGRATE = "migrate"

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2014-2019 Wind River Systems, Inc. # Copyright (c) 2014-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -8,151 +8,27 @@
Utilities Utilities
""" """
import collections
import errno
import glob import glob
import os import os
import shutil import shutil
import socket
import subprocess import subprocess
import time import time
import sys
import yaml import yaml
from six.moves import configparser
import re import re
import six import six
import netaddr import netaddr
from tsconfig import tsconfig from tsconfig import tsconfig
from sysinv.common import constants as sysinv_constants
from controllerconfig.common import constants from controllerconfig.common import constants
from controllerconfig.common import log
from controllerconfig.common.exceptions import ValidateFail from controllerconfig.common.exceptions import ValidateFail
from oslo_log import log
LOOPBACK_IFNAME = 'lo' LOG = log.getLogger(__name__)
NETWORK_SCRIPTS_PATH = '/etc/sysconfig/network-scripts'
NETWORK_SCRIPTS_PREFIX = 'ifcfg'
NETWORK_SCRIPTS_LOOPBACK = '%s-%s' % (NETWORK_SCRIPTS_PREFIX, LOOPBACK_IFNAME)
BOND_MIIMON_DEFAULT = 100
LOG = log.get_logger(__name__)
DEVNULL = open(os.devnull, 'w') DEVNULL = open(os.devnull, 'w')
EXPECTED_SERVICE_NAME_AND_TYPE = (
{"KEYSTONE_SERVICE_NAME": "keystone",
"KEYSTONE_SERVICE_TYPE": "identity",
"SYSINV_SERVICE_NAME": "sysinv",
"SYSINV_SERVICE_TYPE": "platform",
"PATCHING_SERVICE_NAME": "patching",
"PATCHING_SERVICE_TYPE": "patching",
"NFV_SERVICE_NAME": "vim",
"NFV_SERVICE_TYPE": "nfv",
"FM_SERVICE_NAME": "fm",
"FM_SERVICE_TYPE": "faultmanagement",
"BARBICAN_SERVICE_NAME": "barbican",
"BARBICAN_SERVICE_TYPE": "key-manager",
})
def filesystem_get_free_space(path):
""" Get Free space of directory """
statvfs = os.statvfs(path)
return (statvfs.f_frsize * statvfs.f_bavail)
def directory_get_size(start_dir, regex=None):
"""
Get total size of a directory tree in bytes
:param start_dir: top of tree
:param regex: only include files matching this regex (if provided)
:return: size in bytes
"""
total_size = 0
for dirpath, _, filenames in os.walk(start_dir):
for filename in filenames:
if regex is None or regex.match(filename):
filep = os.path.join(dirpath, filename)
try:
total_size += os.path.getsize(filep)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
return total_size
def print_bytes(sizeof):
""" Pretty print bytes """
for size in ['Bytes', 'KB', 'MB', 'GB', 'TB']:
if abs(sizeof) < 1024.0:
return "%3.1f %s" % (sizeof, size)
sizeof /= 1024.0
def modprobe_drbd():
"""Load DRBD module"""
try:
mod_parms = subprocess.check_output(['drbdadm', 'sh-mod-parms'],
close_fds=True).rstrip()
subprocess.call(["modprobe", "-s", "drbd", mod_parms], stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to load drbd module")
raise
def drbd_start(resource):
"""Start drbd resource"""
try:
subprocess.check_call(["drbdadm", "up", resource],
stdout=DEVNULL)
subprocess.check_call(["drbdadm", "primary", resource],
stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to start drbd %s" % resource)
raise
def drbd_stop(resource):
"""Stop drbd resource"""
try:
subprocess.check_call(["drbdadm", "secondary", resource],
stdout=DEVNULL)
# Allow time for demotion to be processed
time.sleep(1)
subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to stop drbd %s" % resource)
raise
def mount(device, directory):
"""Mount a directory"""
try:
subprocess.check_call(["mount", device, directory], stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to mount %s filesystem" % directory)
raise
def umount(directory):
"""Unmount a directory"""
try:
subprocess.check_call(["umount", directory], stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to umount %s filesystem" % directory)
raise
def start_service(name): def start_service(name):
""" Start a systemd service """ """ Start a systemd service """
@ -181,48 +57,6 @@ def restart_service(name):
raise raise
def start_lsb_service(name):
""" Start a Linux Standard Base service """
try:
script = os.path.join("/etc/init.d", name)
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
subprocess.check_call([script, "start"],
env=dict(os.environ,
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to start %s service" % name)
raise
def stop_lsb_service(name):
""" Stop a Linux Standard Base service """
try:
script = os.path.join("/etc/init.d", name)
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
subprocess.check_call([script, "stop"],
env=dict(os.environ,
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to stop %s service" % name)
raise
def restart_lsb_service(name):
""" Restart a Linux Standard Base service """
try:
script = os.path.join("/etc/init.d", name)
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
subprocess.check_call([script, "restart"],
env=dict(os.environ,
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
stdout=DEVNULL)
except subprocess.CalledProcessError:
LOG.error("Failed to restart %s service" % name)
raise
def check_sm_service(service, state): def check_sm_service(service, state):
""" Check whether an SM service has the supplied state """ """ Check whether an SM service has the supplied state """
try: try:
@ -245,34 +79,6 @@ def wait_sm_service(service, timeout=180):
return False return False
def is_active(service):
""" Check whether an SM service is active """
return check_sm_service(service, 'enabled-active')
def get_controller_hostname():
"""
Get the hostname for this controller
:return: controller hostname
"""
return socket.gethostname()
def get_mate_controller_hostname():
"""
Get the hostname for the mate controller
:return: mate controller hostname
"""
my_hostname = socket.gethostname()
if my_hostname.endswith('-0'):
postfix = '-1'
elif my_hostname.endswith('-1'):
postfix = '-0'
else:
raise Exception("Invalid controller hostname")
return my_hostname.rsplit('-', 1)[0] + postfix
def get_address_from_hosts_file(hostname): def get_address_from_hosts_file(hostname):
""" """
Get the IP address of a host from the /etc/hosts file Get the IP address of a host from the /etc/hosts file
@ -286,303 +92,6 @@ def get_address_from_hosts_file(hostname):
raise Exception("Hostname %s not found in /etc/hosts" % hostname) raise Exception("Hostname %s not found in /etc/hosts" % hostname)
def validate_and_normalize_mac(address):
"""Validate a MAC address and return normalized form.
Checks whether the supplied MAC address is formally correct and
normalize it to all lower case.
:param address: MAC address to be validated and normalized.
:returns: Normalized and validated MAC address.
:raises: InvalidMAC If the MAC address is not valid.
"""
if not is_valid_mac(address):
raise Exception("InvalidMAC %s" % address)
return address.lower()
def is_valid_ip(address):
if not is_valid_ipv4(address):
return is_valid_ipv6(address)
return True
def lag_mode_to_str(lag_mode):
if lag_mode == 0:
return "balance-rr"
if lag_mode == 1:
return "active-backup"
elif lag_mode == 2:
return "balance-xor"
elif lag_mode == 3:
return "broadcast"
elif lag_mode == 4:
return "802.3ad"
elif lag_mode == 5:
return "balance-tlb"
elif lag_mode == 6:
return "balance-alb"
else:
raise Exception(
"Invalid LAG_MODE value of %d. Valid values: 0-6" % lag_mode)
def is_combined_load():
return 'worker' in tsconfig.subfunctions
def get_system_type():
if is_combined_load():
return sysinv_constants.TIS_AIO_BUILD
return sysinv_constants.TIS_STD_BUILD
def get_security_profile():
eprofile = sysinv_constants.SYSTEM_SECURITY_PROFILE_EXTENDED
if tsconfig.security_profile == eprofile:
return eprofile
return sysinv_constants.SYSTEM_SECURITY_PROFILE_STANDARD
def is_cpe():
return get_system_type() == sysinv_constants.TIS_AIO_BUILD
def get_interface_config_common(device, mtu=None):
"""
Return the interface configuration parameters that is common to all
device types.
"""
parameters = collections.OrderedDict()
parameters['BOOTPROTO'] = 'none'
parameters['ONBOOT'] = 'yes'
parameters['DEVICE'] = device
# Increased to accommodate devices that require more time to
# complete link auto-negotiation
parameters['LINKDELAY'] = '20'
if mtu:
parameters['MTU'] = mtu
return parameters
def get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway):
"""
Return the interface configuration parameters for all IPv4 static
addressing.
"""
parameters = collections.OrderedDict()
parameters['IPADDR'] = ip_address
parameters['NETMASK'] = ip_subnet.netmask
parameters['BROADCAST'] = ip_subnet.broadcast
if ip_gateway:
parameters['GATEWAY'] = ip_gateway
return parameters
def get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway):
"""
Return the interface configuration parameters for all IPv6 static
addressing.
"""
parameters = collections.OrderedDict()
parameters['IPV6INIT'] = 'yes'
parameters['IPV6ADDR'] = netaddr.IPNetwork('%s/%u' % (ip_address,
ip_subnet.prefixlen))
if ip_gateway:
parameters['IPV6_DEFAULTGW'] = ip_gateway
return parameters
def get_interface_config_static(ip_address, ip_subnet, ip_gateway=None):
"""
Return the interface configuration parameters for all IP static
addressing.
"""
if netaddr.IPAddress(ip_address).version == 4:
return get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway)
else:
return get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway)
def write_interface_config_file(device, parameters):
"""
Write interface configuration parameters to the network scripts
directory named after the supplied device.
:param device device name as str
:param parameters dict of parameters
"""
filename = os.path.join(NETWORK_SCRIPTS_PATH, "%s-%s" %
(NETWORK_SCRIPTS_PREFIX, device))
try:
with open(filename, 'w') as f:
for parameter, value in parameters.items():
f.write("%s=%s\n" % (parameter, str(value)))
except IOError:
LOG.error("Failed to create file: %s" % filename)
raise
def write_interface_config_ethernet(device, mtu=None, parameters=None):
"""Write the interface configuration for an Ethernet device."""
config = get_interface_config_common(device, mtu)
if parameters:
config.update(parameters)
write_interface_config_file(device, config)
def write_interface_config_vlan(device, mtu, parameters=None):
"""Write the interface configuration for a VLAN device."""
config = get_interface_config_vlan()
if parameters:
config.update(parameters)
write_interface_config_ethernet(device, mtu, parameters=config)
def write_interface_config_slave(device, master, parameters=None):
"""Write the interface configuration for a bond slave device."""
config = get_interface_config_slave(master)
if parameters:
config.update(parameters)
write_interface_config_ethernet(device, parameters=config)
def write_interface_config_bond(device, mtu, mode, txhash, miimon,
member1, member2, parameters=None):
"""Write the interface configuration for a bond master device."""
config = get_interface_config_bond(mode, txhash, miimon)
if parameters:
config.update(parameters)
write_interface_config_ethernet(device, mtu, parameters=config)
# create slave device configuration files
if member1:
write_interface_config_slave(member1, device)
if member2:
write_interface_config_slave(member2, device)
def get_interface_config_vlan():
"""
Return the interface configuration parameters for all IP static
addressing.
"""
parameters = collections.OrderedDict()
parameters['VLAN'] = 'yes'
return parameters
def get_interface_config_slave(master):
"""
Return the interface configuration parameters for bond interface
slave devices.
"""
parameters = collections.OrderedDict()
parameters['MASTER'] = master
parameters['SLAVE'] = 'yes'
parameters['PROMISC'] = 'yes'
return parameters
def get_interface_config_bond(mode, txhash, miimon):
"""
Return the interface configuration parameters for bond interface
master devices.
"""
options = "mode=%s miimon=%s" % (mode, miimon)
if txhash:
options += " xmit_hash_policy=%s" % txhash
if mode == constants.LAG_MODE_8023AD:
options += " lacp_rate=fast"
parameters = collections.OrderedDict()
parameters['BONDING_OPTS'] = "\"%s\"" % options
return parameters
def remove_interface_config_files(stdout=None, stderr=None):
"""
Remove all existing interface configuration files.
"""
files = glob.glob1(NETWORK_SCRIPTS_PATH, "%s-*" % NETWORK_SCRIPTS_PREFIX)
for file in [f for f in files if f != NETWORK_SCRIPTS_LOOPBACK]:
ifname = file[len(NETWORK_SCRIPTS_PREFIX) + 1:] # remove prefix
subprocess.check_call(["ifdown", ifname],
stdout=stdout, stderr=stderr)
os.remove(os.path.join(NETWORK_SCRIPTS_PATH, file))
def remove_interface_ip_address(device, ip_address, ip_subnet,
stdout=None, stderr=None):
"""Remove an IP address from an interface"""
subprocess.check_call(
["ip", "addr", "del",
str(ip_address) + "/" + str(ip_subnet.prefixlen),
"dev", device],
stdout=stdout, stderr=stderr)
def send_interface_garp(device, ip_address, stdout=None, stderr=None):
"""Send a GARP message for the supplied address"""
subprocess.call(
["arping", "-c", "3", "-A", "-q", "-I",
device, str(ip_address)],
stdout=stdout, stderr=stderr)
def restart_networking(stdout=None, stderr=None):
"""
Restart networking services.
"""
# Kill any leftover dhclient process from the boot
subprocess.call(["pkill", "dhclient"])
# remove any existing IP addresses
ifs = glob.glob1('/sys/class/net', "*")
for i in [i for i in ifs if i != LOOPBACK_IFNAME]:
subprocess.call(
["ip", "link", "set", "dev", i, "down"])
subprocess.call(
["ip", "addr", "flush", "dev", i])
subprocess.call(
["ip", "-6", "addr", "flush", "dev", i])
subprocess.check_call(["systemctl", "restart", "network"],
stdout=stdout, stderr=stderr)
def output_to_dict(output):
dict = {}
output = [_f for _f in output.split('\n') if _f]
for row in output:
values = row.split()
if len(values) != 2:
raise Exception("The following output does not respect the "
"format: %s" % row)
dict[values[1]] = values[0]
return dict
def get_install_uuid():
""" Get the install uuid from the feed directory. """
uuid_fname = None
try:
uuid_dir = '/www/pages/feed/rel-' + tsconfig.SW_VERSION
uuid_fname = os.path.join(uuid_dir, 'install_uuid')
with open(uuid_fname, 'r') as uuid_file:
install_uuid = uuid_file.readline().rstrip()
except IOError:
LOG.error("Failed to open file: %s", uuid_fname)
raise Exception("Failed to retrieve install UUID")
return install_uuid
def write_simplex_flag(): def write_simplex_flag():
""" Write simplex flag. """ """ Write simplex flag. """
simplex_flag = "/etc/platform/simplex" simplex_flag = "/etc/platform/simplex"
@ -634,37 +143,6 @@ def apply_manifest(controller_address_0, personality, manifest, hieradata,
raise Exception(msg) raise Exception(msg)
def create_system_controller_config(filename):
""" Create any additional parameters needed for system controller"""
# set keystone endpoint region name and sysinv keystone authtoken
# region name
config = {
'keystone::endpoint::region':
sysinv_constants.SYSTEM_CONTROLLER_REGION,
'sysinv::region_name':
sysinv_constants.SYSTEM_CONTROLLER_REGION,
}
try:
with open(filename, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
except Exception:
LOG.exception("failed to write config file: %s" % filename)
raise
def create_static_config():
cmd = ["/usr/bin/sysinv-puppet",
"create-static-config",
constants.HIERADATA_WORKDIR]
try:
os.makedirs(constants.HIERADATA_WORKDIR)
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
msg = "Failed to create puppet hiera static config"
print(msg)
raise Exception(msg)
def create_system_config(): def create_system_config():
cmd = ["/usr/bin/sysinv-puppet", cmd = ["/usr/bin/sysinv-puppet",
"create-system-config", "create-system-config",
@ -692,34 +170,6 @@ def create_host_config(hostname=None):
raise Exception(msg) raise Exception(msg)
def shutdown_file_systems():
""" Shutdown filesystems """
umount("/var/lib/postgresql")
drbd_stop("drbd-pgsql")
stop_service("www-pages-helm_charts.mount")
umount("/opt/platform")
drbd_stop("drbd-platform")
umount("/opt/extension")
drbd_stop("drbd-extension")
if os.path.exists("/opt/patch-vault"):
umount("/opt/patch-vault")
drbd_stop("drbd-patch-vault")
umount("/var/lib/rabbitmq")
drbd_stop("drbd-rabbit")
stop_service("etcd.service")
stop_service("opt-etcd.mount")
drbd_stop("drbd-etcd")
umount("/var/lib/docker-distribution")
drbd_stop("drbd-dockerdistribution")
def persist_config(): def persist_config():
"""Copy temporary config files into new DRBD filesystem""" """Copy temporary config files into new DRBD filesystem"""
@ -862,24 +312,6 @@ def configure_hostname(hostname):
raise Exception("Failed to configure hostname") raise Exception("Failed to configure hostname")
def progress(steps, step, action, result, newline=False):
"""Display progress."""
if steps == 0:
hashes = 45
percentage = 100
else:
hashes = (step * 45) / steps
percentage = (step * 100) / steps
sys.stdout.write("\rStep {0:{width}d} of {1:d} [{2:45s}] "
"[{3:d}%]".format(min(step, steps), steps,
'#' * hashes, percentage,
width=len(str(steps))))
if step == steps or newline:
sys.stdout.write("\n")
sys.stdout.flush()
def touch(fname): def touch(fname):
with open(fname, 'a'): with open(fname, 'a'):
os.utime(fname, None) os.utime(fname, None)
@ -898,47 +330,6 @@ def is_ssh_parent():
return False return False
def is_valid_vlan(vlan):
"""Determine whether vlan is valid."""
try:
if 0 < int(vlan) < 4095:
return True
else:
return False
except (ValueError, TypeError):
return False
def is_mtu_valid(mtu):
"""Determine whether a mtu is valid."""
try:
if int(mtu) < 576:
return False
elif int(mtu) > 9216:
return False
else:
return True
except (ValueError, TypeError):
return False
def is_valid_hostname(hostname):
"""Determine whether a hostname is valid as per RFC 1123."""
# Maximum length of 255
if not hostname or len(hostname) > 255:
return False
# Allow a single dot on the right hand side
if hostname[-1] == ".":
hostname = hostname[:-1]
# Create a regex to ensure:
# - hostname does not begin or end with a dash
# - each segment is 1 to 63 characters long
# - valid characters are A-Z (any case) and 0-9
valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) # noqa pylint: disable=anomalous-backslash-in-string
return all(valid_re.match(x) for x in hostname.split("."))
def is_valid_mac(mac): def is_valid_mac(mac):
"""Verify the format of a MAC addres.""" """Verify the format of a MAC addres."""
if not mac: if not mac:
@ -969,32 +360,6 @@ def validate_network_str(network_str, minimum_size,
"Invalid subnet - not a valid IP subnet") "Invalid subnet - not a valid IP subnet")
def is_valid_filename(filename):
return '\0' not in filename
def is_valid_by_path(filename):
return "/dev/disk/by-path" in filename and "-part" not in filename
def is_valid_url(url_str):
# Django URL validation patterns
r = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain...
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
url = r.match(url_str)
if url:
return True
else:
return False
def is_valid_domain(url_str): def is_valid_domain(url_str):
r = re.compile( r = re.compile(
r'^(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain... r'^(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain...
@ -1010,54 +375,6 @@ def is_valid_domain(url_str):
return False return False
def is_valid_ipv4(address):
"""Verify that address represents a valid IPv4 address."""
try:
return netaddr.valid_ipv4(address)
except Exception:
return False
def is_valid_ipv6(address):
try:
return netaddr.valid_ipv6(address)
except Exception:
return False
def is_valid_domain_or_ip(url_str):
if url_str:
if is_valid_domain(url_str):
return True
ip_with_port = url_str.split(':')
if len(ip_with_port) <= 2:
# check ipv4 or ipv4 with port
return is_valid_ipv4(ip_with_port[0])
else:
# check ipv6 with port
if '[' in url_str:
try:
bkt_idx = url_str.index(']')
if bkt_idx + 1 == len(url_str):
# brackets without port
return False
else:
return is_valid_ipv6(url_str[1:bkt_idx])
except Exception:
return False
else:
# check ipv6 without port
return is_valid_ipv6(url_str)
else:
return False
def is_valid_bool_str(val):
"""Check if the provided string is a valid bool string or not."""
boolstrs = ('true', 'false')
return str(val).lower() in boolstrs
def validate_address_str(ip_address_str, network): def validate_address_str(ip_address_str, network):
"""Determine whether an address is valid.""" """Determine whether an address is valid."""
try: try:
@ -1087,125 +404,3 @@ def ip_version_to_string(ip_version):
return "IPv6" return "IPv6"
else: else:
return "IP" return "IP"
def validate_nameserver_address_str(ip_address_str, subnet_version=None):
"""Determine whether a nameserver address is valid."""
try:
ip_address = netaddr.IPAddress(ip_address_str)
if subnet_version is not None and ip_address.version != subnet_version:
msg = ("Invalid IP version - must match OAM subnet version " +
ip_version_to_string(subnet_version))
raise ValidateFail(msg)
return ip_address
except netaddr.AddrFormatError:
msg = "Invalid address - not a valid %s address" % \
ip_version_to_string(subnet_version)
raise ValidateFail(msg)
def validate_address(ip_address, network):
"""Determine whether an address is valid."""
if ip_address.version != network.version:
msg = ("Invalid IP version - must match network version " +
ip_version_to_string(network.version))
raise ValidateFail(msg)
elif ip_address == network:
raise ValidateFail("Cannot use network address")
elif ip_address == network.broadcast:
raise ValidateFail("Cannot use broadcast address")
elif ip_address not in network:
raise ValidateFail("Address must be in subnet %s" % str(network))
def check_network_overlap(new_network, configured_networks):
""" Validate that new_network does not overlap any configured_networks.
"""
if any(new_network.ip in subnet for subnet in
configured_networks):
raise ValidateFail(
"Subnet %s overlaps with another configured subnet" % new_network)
def validate_openstack_password(password, rules_file,
section="security_compliance"):
try:
config = configparser.RawConfigParser()
parsed_config = config.read(rules_file)
if not parsed_config:
msg = ("Cannot parse rules file: %s" % rules_file)
raise Exception(msg)
if not config.has_section(section):
msg = ("Required section '%s' not found in rules file" % section)
raise Exception(msg)
password_regex = get_optional(config, section, 'password_regex')
password_regex_description = get_optional(config, section,
'password_regex_description')
if not password_regex:
msg = ("Required option 'password_regex' not found in "
"rule file: %s" % rules_file)
raise Exception(msg)
# Even if regex_description is not found, we will proceed
# and give a generic failure warning instead
if not password_regex_description:
password_regex_description = ("Password does not meet "
"complexity criteria")
if not isinstance(password, six.string_types):
msg = "Password must be a string type"
raise Exception(msg)
try:
# config parser would read in the string as a literal
# representation which would fail regex matching
password_regex = password_regex.strip('"')
if not re.match(password_regex, password):
return False, password_regex_description
except re.error:
msg = ("Unable to validate password due to invalid "
"complexity criteria ('password_regex')")
raise Exception(msg)
except Exception:
raise Exception("Password validation failed")
return True, ""
def extract_openstack_password_rules_from_file(
rules_file, section="security_compliance"):
try:
config = configparser.RawConfigParser()
parsed_config = config.read(rules_file)
if not parsed_config:
msg = ("Cannot parse rules file: %s" % rules_file)
raise Exception(msg)
if not config.has_section(section):
msg = ("Required section '%s' not found in rules file" % section)
raise Exception(msg)
rules = config.items(section)
if not rules:
msg = ("section '%s' contains no configuration options" % section)
raise Exception(msg)
return dict(rules)
except Exception:
raise Exception("Failed to extract password rules from file")
def get_optional(conf, section, key):
if conf.has_option(section, key):
return conf.get(section, key)
return None
def get_service(conf, section, key):
if key in EXPECTED_SERVICE_NAME_AND_TYPE:
if conf.has_option(section, key):
value = conf.get(section, key)
if value != EXPECTED_SERVICE_NAME_AND_TYPE[key]:
raise ValidateFail("Unsupported %s: %s " % (key, value))
else:
value = EXPECTED_SERVICE_NAME_AND_TYPE[key]
return value
else:
return conf.get(section, key)

View File

@ -35,22 +35,14 @@ load-plugins=
# fixme: (notes, xxx, fixme) # fixme: (notes, xxx, fixme)
# W0101: unreachable-code # W0101: unreachable-code
# W0105: pointless-string-statement # W0105: pointless-string-statement
# W0110: deprecated-lambda
# W0120: useless-else-on-loop
# W0201: attribute-defined-outside-init
# W0211: bad-staticmethod-argument
# W0212: protected-access
# W0311: bad-indentation # W0311: bad-indentation
# W0403: relative-import # W0403: relative-import
# W0612: unused-variable
# W0613: unused-argument # W0613: unused-argument
# W0621: redefined-outer-name # W0621: redefined-outer-name
# W0622: redefined-builtin # W0622: redefined-builtin
# W0631: undefined-loop-variable
# W0703: broad-except # W0703: broad-except
# W1501: bad-open-mode # W1501: bad-open-mode
disable=C, R, fixme, W0101, W0105, W0110, W0120, W0201, W0211, W0212, W0311, W0403, disable=C, R, fixme, W0101, W0105, W0311, W0403, W0613, W0621, W0622, W0703, W1501
W0612, W0613, W0621, W0622, W0631, W0703, W1501
[REPORTS] [REPORTS]

View File

@ -1,42 +0,0 @@
#! /bin/bash
########################################################################
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
########################################################################
PLATFORMOPENRC="/etc/platform/openrc"
if [ -e ${PLATFORMOPENRC} ] ; then
source ${PLATFORMOPENRC} &>/dev/null
else
echo "Admin credentials not found"
exit
fi
# Delete all the servers
echo "Deleting all servers [`openstack server list --all`]"
found=false
for i in $(openstack server list --all -c ID -f value); do
`openstack server delete $i &> /dev/null`
echo $i deleted
found=true
done
if $found; then
sleep 30
fi
echo "Deleted all servers [`openstack server list --all`]"
# Delete all the volumes
echo "Deleting all volumes [`openstack volume list --all`]"
found=false
for i in $(openstack volume list --all -c ID -f value); do
`openstack volume delete $i &> /dev/null`
echo $i deleted
found=true
done
if $found; then
sleep 30
fi
echo "Deleted all volumes [`openstack volume list --all`]"

View File

@ -1,321 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import time
import uuid
import shutil
import tempfile
import subprocess
from six.moves import configparser
import tsconfig.tsconfig as tsconfig
from controllerconfig.common import log
import controllerconfig.utils as utils
import controllerconfig.sysinv_api as sysinv
import controllerconfig.backup_restore as backup_restore
import controllerconfig.clone as clone
from controllerconfig.common.exceptions import CloneFail
from sysinv.common import constants as si_const
LOG = log.get_logger("cloning")
DEVNULL = open(os.devnull, 'w')
INI_FILE = os.path.join("/", clone.CLONE_ARCHIVE_DIR, clone.CLONE_ISO_INI)
SECTION = "clone_iso"
parser = configparser.SafeConfigParser()
clone_name = ""
def console_log(str, err=False):
""" Log onto console also """
if err:
str = "Failed to install clone-image. " + str
LOG.error(str)
else:
LOG.info(str)
print("\n" + str)
def persist(key, value):
""" Write into ini file """
parser.set(SECTION, key, value)
with open(INI_FILE, 'w') as f:
parser.write(f)
def set_result(value):
""" Set the result of installation of clone image """
persist(clone.RESULT, value)
persist(clone.INSTALLED, time.strftime("%Y-%m-%d %H:%M:%S %Z"))
def validate_hardware_compatibility():
""" validate if cloned-image can be installed on this h/w """
valid = True
disk_paths = ""
if parser.has_option(SECTION, "disks"):
disk_paths = parser.get(SECTION, "disks")
if not disk_paths:
console_log("Missing value [disks] in ini file")
valid = False
for d in disk_paths.split():
disk_path, size = d.split('#')
if os.path.exists('/dev/disk/by-path/' + disk_path):
LOG.info("Disk [{}] exists".format(disk_path))
disk_size = clone.get_disk_size('/dev/disk/by-path/' +
disk_path)
if int(disk_size) >= int(size):
LOG.info("Disk size is good: {} >= {}"
.format(utils.print_bytes(int(disk_size)),
utils.print_bytes(int(size))))
else:
console_log("Not enough disk size[{}], "
"found:{} looking_for:{}".format(
disk_path, utils.print_bytes(int(disk_size)),
utils.print_bytes(int(size))), err=True)
valid = False
else:
console_log("Disk [{}] does not exist!"
.format(disk_path), err=True)
valid = False
interfaces = ""
if parser.has_option(SECTION, "interfaces"):
interfaces = parser.get(SECTION, "interfaces")
if not interfaces:
console_log("Missing value [interfaces] in ini file")
valid = False
for f in interfaces.split():
if os.path.exists('/sys/class/net/' + f):
LOG.info("Interface [{}] exists".format(f))
else:
console_log("Interface [{}] does not exist!"
.format(f), err=True)
valid = False
maxcpuid = ""
if parser.has_option(SECTION, "cpus"):
maxcpuid = parser.get(SECTION, "cpus")
if not maxcpuid:
console_log("Missing value [cpus] in ini file")
valid = False
else:
my_maxcpuid = clone.get_online_cpus()
if int(maxcpuid) <= int(my_maxcpuid):
LOG.info("Got enough cpus {},{}".format(
maxcpuid, my_maxcpuid))
else:
console_log("Not enough CPUs, found:{} looking_for:{}"
.format(my_maxcpuid, maxcpuid), err=True)
valid = False
mem_total = ""
if parser.has_option(SECTION, "mem"):
mem_total = parser.get(SECTION, "mem")
if not mem_total:
console_log("Missing value [mem] in ini file")
valid = False
else:
my_mem_total = clone.get_total_mem()
# relaxed RAM check: within 1 GiB
if (int(mem_total) - (1024 * 1024)) <= int(my_mem_total):
LOG.info("Got enough memory {},{}".format(
mem_total, my_mem_total))
else:
console_log("Not enough memory; found:{} kB, "
"looking for a minimum of {} kB"
.format(my_mem_total, mem_total), err=True)
valid = False
if not valid:
console_log("Validation failure!")
set_result(clone.FAIL)
time.sleep(20)
exit(1)
console_log("Successful validation")
def update_sysuuid_in_archive(tmpdir):
"""Update system uuid in system archive file."""
sysuuid = str(uuid.uuid4())
clone.find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
"CLONEISO_SYSTEM_UUID", sysuuid)
LOG.info("System uuid updated [%s]" % sysuuid)
def update_db(archive_dir, backup_name):
""" Update DB before restore """
path_to_archive = os.path.join(archive_dir, backup_name)
LOG.info("Updating system archive [%s] DB." % path_to_archive)
tmpdir = tempfile.mkdtemp(dir=archive_dir)
try:
subprocess.check_call(
['gunzip', path_to_archive + '.tgz'],
stdout=DEVNULL, stderr=DEVNULL)
# Extract only postgres dir to update system uuid
subprocess.check_call(
['tar', '-x',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'postgres'],
stdout=DEVNULL, stderr=DEVNULL)
update_sysuuid_in_archive(tmpdir)
subprocess.check_call(
['tar', '--update',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'postgres'],
stdout=DEVNULL, stderr=DEVNULL)
subprocess.check_call(['gzip', path_to_archive + '.tar'])
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
except Exception as e:
LOG.error("Update of system archive {} failed {}".format(
path_to_archive, str(e)))
raise CloneFail("Failed to update system archive")
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def config_worker():
"""
Enable worker functionality for AIO system.
:return: True if worker-config-complete is executed
"""
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
console_log("Applying worker manifests for {}. "
"Node will reboot on completion."
.format(utils.get_controller_hostname()))
sysinv.do_worker_config_complete(utils.get_controller_hostname())
time.sleep(30)
# worker-config-complete has no logs to console. So, wait
# for some time before showing the login prompt.
for i in range(1, 10):
console_log("worker-config in progress..")
time.sleep(30)
console_log("Timed out on do_worker_config_complete")
raise CloneFail("Timed out on do_worker_config_complete")
return True
else:
# worker_config_complete is not needed.
return False
def finalize_install():
""" Complete the installation """
subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases'])
console_log("Updating system parameters...")
i = 1
system_update = False
# Retries if sysinv is not yet ready
while i < 10:
time.sleep(20)
LOG.info("Attempt %d to update system parameters..." % i)
try:
if sysinv.update_clone_system('Cloned_from_' + clone_name,
utils.get_controller_hostname()):
system_update = True
break
except Exception:
# Sysinv might not be ready yet
pass
i += 1
if not system_update:
LOG.error("System update failed")
raise CloneFail("System update failed")
try:
output = subprocess.check_output(["finish_install_clone.sh"],
stderr=subprocess.STDOUT)
LOG.info("finish_install_clone out: {}".format(output))
except Exception:
console_log("Failed to cleanup stale OpenStack resources. "
"Manually delete the Volumes and Instances.")
def cleanup():
""" Cleanup after installation """
LOG.info("Cleaning up...")
subprocess.call(['systemctl', 'disable', 'install-clone'], stderr=DEVNULL)
OLD_FILE = os.path.join(tsconfig.PLATFORM_CONF_PATH, clone.CLONE_ISO_INI)
if os.path.exists(OLD_FILE):
os.remove(OLD_FILE)
if os.path.exists(INI_FILE):
os.chmod(INI_FILE, 0o400)
shutil.move(INI_FILE, tsconfig.PLATFORM_CONF_PATH)
shutil.rmtree(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
ignore_errors=True)
log.configure()
if os.path.exists(INI_FILE):
try:
parser.read(INI_FILE)
if parser.has_section(SECTION):
clone_name = parser.get(SECTION, clone.NAME)
LOG.info("System archive [%s] to be installed." % clone_name)
first_boot = False
last_result = clone.IN_PROGRESS
if not parser.has_option(SECTION, clone.RESULT):
# first boot after cloning
first_boot = True
else:
last_result = parser.get(SECTION, clone.RESULT)
LOG.info("Last attempt to install clone was [{}]"
.format(last_result))
if last_result == clone.IN_PROGRESS:
if first_boot:
update_db(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
clone_name + '_system')
else:
# Booting up after patch application, do validation
validate_hardware_compatibility()
console_log("+++++ Starting to install clone-image [{}] +++++"
.format(clone_name))
set_result(clone.IN_PROGRESS)
clone_arch_path = os.path.join("/", clone.CLONE_ARCHIVE_DIR,
clone_name)
if (backup_restore.RESTORE_RERUN_REQUIRED ==
backup_restore.restore_system(
clone_arch_path + "_system.tgz",
clone=True)):
# If there are no patches to be applied, run validation
# code and resume restore. If patches were applied, node
# will be rebooted and validate will after reboot.
validate_hardware_compatibility()
LOG.info("validate passed, resuming restore...")
backup_restore.restore_system(
clone_arch_path + "_system.tgz", clone=True)
console_log("System archive installed from [%s]" % clone_name)
backup_restore.restore_images(clone_arch_path + "_images.tgz",
clone=True)
console_log("Images archive installed from [%s]" % clone_name)
finalize_install()
set_result(clone.OK)
if not config_worker():
# do cleanup if worker_config_complete is not required
cleanup()
elif last_result == clone.OK:
# Installation completed successfully before last reboot
cleanup()
else:
LOG.error("Bad file: {}".format(INI_FILE))
set_result(clone.FAIL)
exit(1)
except Exception as e:
console_log("Clone [%s] installation failed" % clone_name)
LOG.exception("install failed")
set_result(clone.FAIL)
exit(1)
else:
console_log("nothing to do, Not installing clone?")

View File

@ -1,30 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import keyring
import os
import sys
def get_stealth_password():
"""Get the stealth password vault for manifest to run"""
orig_root = os.environ.get('XDG_DATA_HOME', None)
os.environ["XDG_DATA_HOME"] = "/tmp"
stealth_pw = keyring.get_password("CGCS", "admin")
if orig_root is not None:
os.environ("XDG_DATA_HOME",orig_root)
else:
del os.environ["XDG_DATA_HOME"]
return stealth_pw
if __name__ == "__main__":
sys.stdout.write(get_stealth_password())
sys.stdout.flush()
sys.exit(0)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2015-2017 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -18,9 +18,6 @@ setup(
include_package_data=False, include_package_data=False,
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'config_controller = controllerconfig.systemconfig:main',
'config_region = controllerconfig.regionconfig:region_main',
'config_subcloud = controllerconfig.regionconfig:subcloud_main',
'config_management = controllerconfig.config_management:main', 'config_management = controllerconfig.config_management:main',
'upgrade_controller = controllerconfig.upgrades.controller:main', 'upgrade_controller = controllerconfig.upgrades.controller:main',
'upgrade_controller_simplex = ' 'upgrade_controller_simplex = '

View File

@ -4,7 +4,7 @@
# and then run "tox" from this directory. # and then run "tox" from this directory.
[tox] [tox]
envlist = flake8, py27, pylint envlist = flake8, pylint
# Tox does not work if the path to the workdir is too long, so move it to /tmp # Tox does not work if the path to the workdir is too long, so move it to /tmp
toxworkdir = /tmp/{env:USER}_cctox toxworkdir = /tmp/{env:USER}_cctox
stxdir = {toxinidir}/../../.. stxdir = {toxinidir}/../../..
@ -41,21 +41,13 @@ commands = flake8 {posargs}
# H101: Use TODO(NAME) # H101: Use TODO(NAME)
# H102: Apache 2.0 license header not found # H102: Apache 2.0 license header not found
# H104: File contains nothing but comments # H104: File contains nothing but comments
# H238: old style class declaration, use new style (inherit from `object`)
# H306: imports not in alphabetical order # H306: imports not in alphabetical order
# H401: docstring should not start with a space # H401: docstring should not start with a space
# H403: multi line docstrings should end on a new line
# H404: multi line docstring should start without a leading new line # H404: multi line docstring should start without a leading new line
# H405: multi line docstring summary not separated with an empty line # H405: multi line docstring summary not separated with an empty line
ignore = H101,H102,H104,H238,H306,H401,H403,H404,H405 ignore = H101,H102,H104,H306,H401,H404,H405
exclude = build exclude = build
[testenv:py27]
basepython = python2.7
commands =
find . -type f -name "*.pyc" -delete
py.test {posargs}
[testenv:cover] [testenv:cover]
basepython = python2.7 basepython = python2.7
deps = {[testenv]deps} deps = {[testenv]deps}

View File

@ -14,9 +14,9 @@ import psycopg2
import sys import sys
from psycopg2.extras import RealDictCursor from psycopg2.extras import RealDictCursor
from controllerconfig.common import log from oslo_log import log
LOG = log.get_logger(__name__) LOG = log.getLogger(__name__)
def main(): def main():

View File

@ -13,9 +13,9 @@ import sys
from sysinv.common import constants from sysinv.common import constants
from psycopg2.extras import RealDictCursor from psycopg2.extras import RealDictCursor
from controllerconfig.common import log from oslo_log import log
LOG = log.get_logger(__name__) LOG = log.getLogger(__name__)
# Sections that need to be removed from retired Ceph cache tiering feature # Sections that need to be removed from retired Ceph cache tiering feature
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering' SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'

View File

@ -55,10 +55,7 @@ Configuration for the Controller node.
#install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ #install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
install -d -m 755 %{buildroot}%{local_bindir} install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh
install -d -m 755 %{buildroot}%{local_goenabledd} install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh

View File

@ -21,7 +21,7 @@
# -- Project information ----------------------------------------------------- # -- Project information -----------------------------------------------------
project = u'stx-config' project = u'StarlingX Configuration'
copyright = u'2018, StarlingX' copyright = u'2018, StarlingX'
author = u'StarlingX' author = u'StarlingX'

View File

@ -1,32 +1,35 @@
======================== ===============================
stx-config Documentation StarlingX Configuration Project
======================== ===============================
This is the documentation for StarlingX system configuration management. The starlingx/config project provides system configuration management.
Release Notes -------------
Release notes
------------- -------------
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
Release Notes <https://docs.starlingx.io/releasenotes/stx-config> Release notes <https://docs.starlingx.io/releasenotes/config>
API Reference -------------
API reference
------------- -------------
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
API Reference <https://docs.starlingx.io/api-ref/stx-config> API reference <https://docs.starlingx.io/api-ref/config>
-----
Links Links
----- -----
* Source: `stx-config`_ * Source: `starlingx/config`_
* Code Review: `Gerrit`_ * Code review: `Gerrit`_
* Bugs: `Storyboard`_ * Project tracking: `Storyboard`_
.. _stx-config: https://opendev.org/starlingx/config/ .. _starlingx/config: https://opendev.org/starlingx/config/
.. _Gerrit: https://review.opendev.org/#/q/project:starlingx/config .. _Gerrit: https://review.opendev.org/#/q/project:starlingx/config
.. _Storyboard: https://storyboard.openstack.org/#!/project/starlingx/config .. _Storyboard: https://storyboard.openstack.org/#!/project/starlingx/config

View File

@ -57,7 +57,7 @@ source_suffix = '.rst'
master_doc = 'index' master_doc = 'index'
# General information about the project. # General information about the project.
project = 'stx-config' project = u'StarlingX Configuration'
# Release notes are version independent, no need to set version and release # Release notes are version independent, no need to set version and release
release = '' release = ''

View File

@ -1,6 +1,6 @@
======================== =====================================
stx-config Release Notes StarlingX Configuration Release Notes
======================== =====================================
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2

View File

@ -1,2 +1,2 @@
SRC_DIR="cgts-client" SRC_DIR="cgts-client"
TIS_PATCH_VER=73 TIS_PATCH_VER=74

View File

@ -18,6 +18,9 @@ Requires: python3-prettytable
Requires: bash-completion Requires: bash-completion
Requires: python3-neutronclient Requires: python3-neutronclient
Requires: python3-keystoneclient Requires: python3-keystoneclient
Requires: python3-oslo-i18n
Requires: python3-oslo-serialization
Requires: python3-oslo-utils
# Needed for python2 and python3 compatible # Needed for python2 and python3 compatible
Requires: python3-six Requires: python3-six

View File

@ -0,0 +1,8 @@
[run]
branch = True
source = cgtsclient
omit = cgtsclient/tests/*
[report]
ignore_errors = True

View File

@ -0,0 +1,32 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/usage.html
"""
import oslo_i18n
DOMAIN = 'python-cgtsclient'
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -4,9 +4,9 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
from cgtsclient._i18n import _
from cgtsclient.common import utils from cgtsclient.common import utils
from cgtsclient import exc from cgtsclient import exc
from cgtsclient.openstack.common.gettextutils import _
def _get_ksclient(**kwargs): def _get_ksclient(**kwargs):

View File

@ -119,6 +119,22 @@ class Resource(object):
else: else:
return self.__dict__[k] return self.__dict__[k]
# deepcopy is invoked on this object which causes infinite recursion in python3
# unless the copy and deepcopy methods are overridden
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __repr__(self): def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager') k != 'manager')

View File

@ -528,7 +528,7 @@ class ResponseBodyIterator(object):
def __iter__(self): def __iter__(self):
while True: while True:
yield self.next() yield six.next()
def next(self): def next(self):
chunk = self.resp.read(CHUNKSIZE) chunk = self.resp.read(CHUNKSIZE)

View File

@ -41,13 +41,10 @@ from prettytable import NONE
from datetime import datetime from datetime import datetime
from dateutil import parser from dateutil import parser
from cgtsclient import exc
from cgtsclient.openstack.common import importutils
from functools import wraps from functools import wraps
# noinspection PyProtectedMember from cgtsclient import exc
from wrapping_formatters import _get_width from oslo_utils import importutils
from cgtsclient.common import wrapping_formatters from cgtsclient.common import wrapping_formatters
from six.moves import input from six.moves import input
@ -300,7 +297,7 @@ def pt_builder(field_labels, fields, formatters, paging, printer=default_printer
output = self.pt.get_string() output = self.pt.get_string()
if wrapping_formatters.is_nowrap_set(): if wrapping_formatters.is_nowrap_set():
return output return output
output_width = _get_width(output) output_width = wrapping_formatters._get_width(output)
if output_width <= self.terminal_width: if output_width <= self.terminal_width:
return output return output
# At this point pretty Table (self.pt) does not fit the terminal width so let's # At this point pretty Table (self.pt) does not fit the terminal width so let's
@ -476,7 +473,7 @@ def print_dict_with_format(data, wrap=0, output_format=None):
def print_dict_value(d): def print_dict_value(d):
# Print values on a single line separated by spaces # Print values on a single line separated by spaces
# e.g. 'available ntp' # e.g. 'available ntp'
print (' '.join(map(str, d.values()))) print(' '.join(map(str, d.values())))
def print_dict(d, dict_property="Property", wrap=0): def print_dict(d, dict_property="Property", wrap=0):

View File

@ -801,6 +801,7 @@ def _simpleTestHarness(no_wrap):
print("nowrap = {}".format(is_nowrap_set())) print("nowrap = {}".format(is_nowrap_set()))
if __name__ == "__main__": if __name__ == "__main__":
_simpleTestHarness(True) _simpleTestHarness(True)
_simpleTestHarness(False) _simpleTestHarness(False)

View File

@ -1,14 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,14 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,254 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, SINA Corporation.
#
"""Extracts OpenStack config option info from module(s)."""
import imp
import os
import re
import six
import socket
import sys
import textwrap
from oslo_config import cfg
from cgtsclient.openstack.common import gettextutils
from cgtsclient.openstack.common import importutils
gettextutils.install('python-cgtsclient')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)]
pkg_names.sort()
ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names]
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print("# Total option count: %d" % OPTION_COUNT)
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except ImportError as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all([is_opt(x) for x in attr_obj])):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
elif s == _get_my_ip():
return '10.0.0.1'
elif s == socket.gethostname():
return 'python-cgtsclient'
elif s.strip() != s:
return '"%s"' % s
return s
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
if len(sys.argv) < 2:
print("usage: %s [srcfile]...\n" % sys.argv[0])
sys.exit(0)
generate(sys.argv[1:])
if __name__ == '__main__':
main()

View File

@ -1,50 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from cgts.openstack.common.gettextutils import _
"""
import gettext
import os
_localedir = os.environ.get('cgtsclient'.upper() + '_LOCALEDIR')
_t = gettext.translation('cgtsclient', localedir=_localedir, fallback=True)
def _(msg):
return _t.ugettext(msg)
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
"""
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)

View File

@ -1,66 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Import a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,118 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Root wrapper for OpenStack services
"""
from __future__ import print_function
import logging
import os
import pwd
import signal
import subprocess
import sys
from six.moves import configparser
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
RC_NOEXECFOUND = 96
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _exit_error(execname, message, errorcode, log=True):
print("%s: %s" % (execname, message))
if log:
logging.error(message)
sys.exit(errorcode)
def main():
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "cgtsclient",
"__init__.py")):
sys.path.insert(0, possible_topdir)
from cgtsclient.openstack.common.rootwrap import wrapper
# Load configuration
try:
rawconfig = configparser.RawConfigParser()
rawconfig.read(configfile)
config = wrapper.RootwrapConfig(rawconfig)
except ValueError as exc:
msg = "Incorrect value in %s: %s" % (configfile, exc.message)
_exit_error(execname, msg, RC_BADCONFIG, log=False)
except configparser.Error:
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
RC_BADCONFIG, log=False)
if config.use_syslog:
wrapper.setup_syslog(execname,
config.syslog_log_facility,
config.syslog_log_level)
# Execute command if it matches any of the loaded filters
filters = wrapper.load_filters(config.filters_path)
try:
filtermatch = wrapper.match_filter(filters, userargs,
exec_dirs=config.exec_dirs)
if filtermatch:
command = filtermatch.get_command(userargs,
exec_dirs=config.exec_dirs)
if config.use_syslog:
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
os.getlogin(), pwd.getpwuid(os.getuid())[0],
command, filtermatch.name))
obj = subprocess.Popen(command,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
preexec_fn=_subprocess_setup,
env=filtermatch.get_environment(userargs))
obj.wait()
sys.exit(obj.returncode)
except wrapper.FilterMatchNotExecutable as exc:
msg = ("Executable not found: %s (filter match = %s)"
% (exc.match.exec_path, exc.match.name))
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
except wrapper.NoFilterMatched:
msg = ("Unauthorized command: %s (no filter matched)"
% ' '.join(userargs))
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)

View File

@ -1,228 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
class CommandFilter(object):
"""Command filter only checking that the 1st argument matches exec_path."""
def __init__(self, exec_path, run_as, *args):
self.name = ''
self.exec_path = exec_path
self.run_as = run_as
self.args = args
self.real_exec = None
def get_exec(self, exec_dirs=[]):
"""Returns existing executable, or empty string if none found."""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
if self.exec_path.startswith('/'):
if os.access(self.exec_path, os.X_OK):
self.real_exec = self.exec_path
else:
for binary_path in exec_dirs:
expanded_path = os.path.join(binary_path, self.exec_path)
if os.access(expanded_path, os.X_OK):
self.real_exec = expanded_path
break
return self.real_exec
def match(self, userargs):
"""Only check that the first argument (command) matches exec_path."""
return os.path.basename(self.exec_path) == userargs[0]
def get_command(self, userargs, exec_dirs=[]):
"""Returns command to execute (with sudo -u if run_as != root)."""
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
"""Returns specific environment to set, None if none."""
return None
class RegExpFilter(CommandFilter):
"""Command filter doing regexp matching for every argument."""
def match(self, userargs):
# Early skip if command or number of args don't match
if (len(self.args) != len(userargs)):
# DENY: argument numbers don't match
return False
# Compare each arg (anchoring pattern explicitly at end of string)
for (pattern, arg) in zip(self.args, userargs):
try:
if not re.match(pattern + '$', arg):
break
except re.error:
# DENY: Badly-formed filter
return False
else:
# ALLOW: All arguments matched
return True
# DENY: Some arguments did not match
return False
class PathFilter(CommandFilter):
"""Command filter checking that path arguments are within given dirs
One can specify the following constraints for command arguments:
1) pass - pass an argument as is to the resulting command
2) some_str - check if an argument is equal to the given string
3) abs path - check if a path argument is within the given base dir
A typical rootwrapper filter entry looks like this:
# cmdname: filter name, raw command, user, arg_i_constraint [, ...]
chown: PathFilter, /bin/chown, root, nova, /var/lib/images
"""
def match(self, userargs):
command, arguments = userargs[0], userargs[1:]
equal_args_num = len(self.args) == len(arguments)
exec_is_valid = super(PathFilter, self).match(userargs)
args_equal_or_pass = all(
arg == 'pass' or arg == value
for arg, value in zip(self.args, arguments)
if not os.path.isabs(arg) # arguments not specifying abs paths
)
paths_are_within_base_dirs = all(
os.path.commonprefix([arg, os.path.realpath(value)]) == arg
for arg, value in zip(self.args, arguments)
if os.path.isabs(arg) # arguments specifying abs paths
)
return (equal_args_num and
exec_is_valid and
args_equal_or_pass and
paths_are_within_base_dirs)
def get_command(self, userargs, exec_dirs=[]):
command, arguments = userargs[0], userargs[1:]
# convert path values to canonical ones; copy other args as is
args = [os.path.realpath(value) if os.path.isabs(arg) else value
for arg, value in zip(self.args, arguments)]
return super(PathFilter, self).get_command([command] + args,
exec_dirs)
class DnsmasqFilter(CommandFilter):
"""Specific filter for the dnsmasq call (which includes env)."""
CONFIG_FILE_ARG = 'CONFIG_FILE'
def match(self, userargs):
if (userargs[0] == 'env' and
userargs[1].startswith(self.CONFIG_FILE_ARG) and
userargs[2].startswith('NETWORK_ID=') and
userargs[3] == 'dnsmasq'):
return True
return False
def get_command(self, userargs, exec_dirs=[]):
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
dnsmasq_pos = userargs.index('dnsmasq')
return [to_exec] + userargs[dnsmasq_pos + 1:]
def get_environment(self, userargs):
env = os.environ.copy()
env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1]
env['NETWORK_ID'] = userargs[2].split('=')[-1]
return env
class DeprecatedDnsmasqFilter(DnsmasqFilter):
"""Variant of dnsmasq filter to support old-style FLAGFILE."""
CONFIG_FILE_ARG = 'FLAGFILE'
class KillFilter(CommandFilter):
"""Specific filter for the kill calls.
1st argument is the user to run /bin/kill under
2nd argument is the location of the affected executable
Subsequent arguments list the accepted signals (if any)
This filter relies on /proc to accurately determine affected
executable, so it will only work on procfs-capable systems (not OSX).
"""
def __init__(self, *args):
super(KillFilter, self).__init__("/bin/kill", *args)
def match(self, userargs):
if userargs[0] != "kill":
return False
args = list(userargs)
if len(args) == 3:
# A specific signal is requested
signal = args.pop(1)
if signal not in self.args[1:]:
# Requested signal not in accepted list
return False
else:
if len(args) != 2:
# Incorrect number of arguments
return False
if len(self.args) > 1:
# No signal requested, but filter requires specific signal
return False
try:
command = os.readlink("/proc/%d/exe" % int(args[1]))
# NOTE(yufang521247): /proc/PID/exe may have '\0' on the
# end, because python doen't stop at '\0' when read the
# target path.
command = command.split('\0')[0]
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
# the end if an executable is updated or deleted
if command.endswith(" (deleted)"):
command = command[:command.rindex(" ")]
if command != self.args[0]:
# Affected executable does not match
return False
except (ValueError, OSError):
# Incorrect PID
return False
return True
class ReadFileFilter(CommandFilter):
"""Specific filter for the utils.read_file_as_root call."""
def __init__(self, file_path, *args):
self.file_path = file_path
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
def match(self, userargs):
if userargs[0] != 'cat':
return False
if userargs[1] != self.file_path:
return False
if len(userargs) != 2:
return False
return True

View File

@ -1,151 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import os
import string
from cgtsclient.openstack.common.rootwrap import filters
from six.moves import configparser
class NoFilterMatched(Exception):
"""This exception is raised when no filter matched."""
pass
class FilterMatchNotExecutable(Exception):
"""raise if filter matche but not executable
This exception is raised when a filter matched but no executable was
found.
"""
def __init__(self, match=None, **kwargs):
self.match = match
class RootwrapConfig(object):
def __init__(self, config):
# filters_path
self.filters_path = config.get("DEFAULT", "filters_path").split(",")
# exec_dirs
if config.has_option("DEFAULT", "exec_dirs"):
self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
else:
# Use system PATH if exec_dirs is not specified
self.exec_dirs = os.environ["PATH"].split(':')
# syslog_log_facility
if config.has_option("DEFAULT", "syslog_log_facility"):
v = config.get("DEFAULT", "syslog_log_facility")
facility_names = logging.handlers.SysLogHandler.facility_names
self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
v, None)
if self.syslog_log_facility is None and v in facility_names:
self.syslog_log_facility = facility_names.get(v)
if self.syslog_log_facility is None:
raise ValueError('Unexpected syslog_log_facility: %s' % v)
else:
default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
self.syslog_log_facility = default_facility
# syslog_log_level
if config.has_option("DEFAULT", "syslog_log_level"):
v = config.get("DEFAULT", "syslog_log_level")
self.syslog_log_level = logging.getLevelName(v.upper())
if (self.syslog_log_level == "Level %s" % v.upper()):
raise ValueError('Unexepected syslog_log_level: %s' % v)
else:
self.syslog_log_level = logging.ERROR
# use_syslog
if config.has_option("DEFAULT", "use_syslog"):
self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
else:
self.use_syslog = False
def setup_syslog(execname, facility, level):
rootwrap_logger = logging.getLogger()
rootwrap_logger.setLevel(level)
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
handler.setFormatter(logging.Formatter(
os.path.basename(execname) + ': %(message)s'))
rootwrap_logger.addHandler(handler)
def build_filter(class_name, *args):
"""Returns a filter object of class class_name."""
if not hasattr(filters, class_name):
logging.warning("Skipping unknown filter class (%s) specified "
"in filter definitions" % class_name)
return None
filterclass = getattr(filters, class_name)
return filterclass(*args)
def load_filters(filters_path):
"""Load filters from a list of directories."""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
continue
for filterfile in os.listdir(filterdir):
filterconfig = configparser.RawConfigParser()
filterconfig.read(os.path.join(filterdir, filterfile))
for (name, value) in filterconfig.items("Filters"):
filterdefinition = [string.strip(s) for s in value.split(',')]
newfilter = build_filter(*filterdefinition)
if newfilter is None:
continue
newfilter.name = name
filterlist.append(newfilter)
return filterlist
def match_filter(filter_list, userargs, exec_dirs=[]):
"""check user command and args
Checks user command and arguments through command filters and
returns the first matching filter.
Raises NoFilterMatched if no filter matched.
Raises FilterMatchNotExecutable if no executable was found for the
best filter match.
"""
first_not_executable_filter = None
for f in filter_list:
if f.match(userargs):
# Try other filters if executable is absent
if not f.get_exec(exec_dirs=exec_dirs):
if not first_not_executable_filter:
first_not_executable_filter = f
continue
# Otherwise return matching filter for execution
return f
if first_not_executable_filter:
# A filter matched, but no executable was found for it
raise FilterMatchNotExecutable(match=first_not_executable_filter)
# No filter matched
raise NoFilterMatched()

View File

@ -349,5 +349,6 @@ def main():
print(e, file=sys.stderr) print(e, file=sys.stderr)
sys.exit(1) sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -20,3 +20,8 @@ class HealthManager(base.Manager):
path = '/v1/health/upgrade' path = '/v1/health/upgrade'
resp, body = self.api.json_request('GET', path) resp, body = self.api.json_request('GET', path)
return body return body
def get_kube_upgrade(self):
path = '/v1/health/kube-upgrade'
resp, body = self.api.json_request('GET', path)
return body

View File

@ -17,3 +17,8 @@ def do_health_query(cc, args):
def do_health_query_upgrade(cc, args): def do_health_query_upgrade(cc, args):
"""Run the Health Check for an Upgrade.""" """Run the Health Check for an Upgrade."""
print(cc.health.get_upgrade()) print(cc.health.get_upgrade())
def do_health_query_kube_upgrade(cc, args):
"""Run the Health Check for a Kubernetes Upgrade."""
print(cc.health.get_kube_upgrade())

View File

@ -13,10 +13,10 @@ from collections import OrderedDict
import datetime import datetime
import os import os
from cgtsclient._i18n import _
from cgtsclient.common import constants from cgtsclient.common import constants
from cgtsclient.common import utils from cgtsclient.common import utils
from cgtsclient import exc from cgtsclient import exc
from cgtsclient.openstack.common.gettextutils import _
from cgtsclient.v1 import icpu as icpu_utils from cgtsclient.v1 import icpu as icpu_utils
from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import iinterface as iinterface_utils from cgtsclient.v1 import iinterface as iinterface_utils
@ -513,6 +513,7 @@ def _list_storage(cc, host):
fields = ['uuid', 'lvm_pv_name', 'disk_or_part_device_path', 'lvm_vg_name'] fields = ['uuid', 'lvm_pv_name', 'disk_or_part_device_path', 'lvm_vg_name']
utils.print_list(ipvs, fields, field_labels, sortby=0) utils.print_list(ipvs, fields, field_labels, sortby=0)
""" """
NOTE (neid): NOTE (neid):
all three "do_host_apply_<if|stor|cpu>profile" methods can be replaced all three "do_host_apply_<if|stor|cpu>profile" methods can be replaced

View File

@ -6,9 +6,9 @@
# #
from cgtsclient._i18n import _
from cgtsclient.common import base from cgtsclient.common import base
from cgtsclient import exc from cgtsclient import exc
from cgtsclient.openstack.common.gettextutils import _
CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread', CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread',

View File

@ -86,7 +86,9 @@ def do_show(cc, args):
@utils.arg('-S', '--security_feature', @utils.arg('-S', '--security_feature',
metavar='<security_feature>', metavar='<security_feature>',
choices=['spectre_meltdown_v1', 'spectre_meltdown_all'], choices=['spectre_meltdown_v1', 'spectre_meltdown_all'],
help='Use spectre_meltdown_v1 for spectre/meltdown v1 fixes, or spectre_meltdown_all to use all fixes') help='Use spectre_meltdown_v1 to add linux bootargs "nopti '
'nospectre_v2 nospectre_v1", or spectre_meltdown_all to not '
'add any mitigation disabling bootargs')
def do_modify(cc, args): def do_modify(cc, args):
"""Modify system attributes.""" """Modify system attributes."""
isystems = cc.isystem.list() isystems = cc.isystem.list()

Some files were not shown because too many files have changed in this diff Show More