diff --git a/.zuul.yaml b/.zuul.yaml index 28cc0e0180..86ba0995b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,28 +8,28 @@ jobs: - openstack-tox-linters - sysinv-tox-py27 - - sysinv-tox-py35 + - sysinv-tox-py36 - sysinv-tox-flake8 - sysinv-tox-pylint - sysinv-tox-bandit - controllerconfig-tox-flake8 - - controllerconfig-tox-py27 - controllerconfig-tox-pylint - cgtsclient-tox-py27 + - cgtsclient-tox-py36 - cgtsclient-tox-pep8 - cgtsclient-tox-pylint gate: jobs: - openstack-tox-linters - sysinv-tox-py27 - - sysinv-tox-py35 + - sysinv-tox-py36 - sysinv-tox-flake8 - sysinv-tox-pylint - sysinv-tox-bandit - controllerconfig-tox-flake8 - - controllerconfig-tox-py27 - controllerconfig-tox-pylint - cgtsclient-tox-py27 + - cgtsclient-tox-py36 - cgtsclient-tox-pep8 - cgtsclient-tox-pylint @@ -50,11 +50,11 @@ tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini - job: - name: sysinv-tox-py35 + name: sysinv-tox-py36 parent: tox description: | - Run py35 test for sysinv - nodeset: ubuntu-xenial + Run py36 test for sysinv + nodeset: ubuntu-bionic required-projects: - starlingx/fault - starlingx/update @@ -62,7 +62,7 @@ files: - sysinv/sysinv/* vars: - tox_envlist: py35 + tox_envlist: py36 tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini - job: @@ -112,18 +112,6 @@ tox_envlist: flake8 tox_extra_args: -c controllerconfig/controllerconfig/tox.ini -- job: - name: controllerconfig-tox-py27 - parent: tox - description: Run py27 tests for controllerconfig - required-projects: - - starlingx/fault - files: - - controllerconfig/* - vars: - tox_envlist: py27 - tox_extra_args: -c controllerconfig/controllerconfig/tox.ini - - job: name: controllerconfig-tox-pylint parent: tox @@ -171,6 +159,19 @@ tox_envlist: py27 tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini +- job: + name: cgtsclient-tox-py36 + parent: tox + description: | + Run py36 test for cgts-client + nodeset: ubuntu-bionic + files: + - sysinv/cgts-client/* + vars: + tox_envlist: py36 + tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini + + - job: name: cgtsclient-tox-pep8 parent: tox diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py index 84cf9db4d6..d751fec22e 100644 --- a/api-ref/source/conf.py +++ b/api-ref/source/conf.py @@ -51,7 +51,7 @@ master_doc = 'index' # General information about the project. repository_name = 'openstack/stx-config' -project = u'stx-config' +project = u'StarlingX Configuration' bug_project = 'starlingx' bug_tag = 'stx.config' diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst index 75a39cf6c4..f2b3ad36c9 100644 --- a/api-ref/source/index.rst +++ b/api-ref/source/index.rst @@ -1,12 +1,13 @@ -======================== -stx-config API Reference -======================== +=========================== +Configuration API Reference +=========================== -Use the StarlingX stx-config API for system configuration management. +Use the StarlingX Configuration API for system configuration management. -stx-config API content can be searched using the :ref:`search page `. +Search Configuration API content using the :ref:`search page `. -API Reference +------------- +API reference ------------- .. toctree:: diff --git a/controllerconfig/centos/build_srpm.data b/controllerconfig/centos/build_srpm.data index 04bab29441..64925df6c3 100755 --- a/controllerconfig/centos/build_srpm.data +++ b/controllerconfig/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="controllerconfig" -TIS_PATCH_VER=151 +TIS_PATCH_VER=152 diff --git a/controllerconfig/centos/controllerconfig.spec b/controllerconfig/centos/controllerconfig.spec index 08c6944711..8017a3d0a6 100644 --- a/controllerconfig/centos/controllerconfig.spec +++ b/controllerconfig/centos/controllerconfig.spec @@ -57,10 +57,7 @@ mkdir -p $RPM_BUILD_ROOT/wheels install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password -install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone -install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh install -d -m 755 %{buildroot}%{local_goenabledd} install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh @@ -74,13 +71,12 @@ install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/ install -d -m 755 %{buildroot}%{local_etc_systemd} install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service -#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service %post systemctl enable controllerconfig.service %clean -rm -rf $RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root,-) diff --git a/controllerconfig/controllerconfig/controllerconfig/__init__.py b/controllerconfig/controllerconfig/controllerconfig/__init__.py index 138851db64..9fc91a45df 100644 --- a/controllerconfig/controllerconfig/controllerconfig/__init__.py +++ b/controllerconfig/controllerconfig/controllerconfig/__init__.py @@ -1,34 +1,10 @@ # -# Copyright (c) 2015-2019 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from controllerconfig.common.validator import validate # noqa: F401 -from controllerconfig.common.configobjects import Network # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import REGION_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_NAMES # noqa: F401 -from controllerconfig.common.configobjects import HP_NAMES # noqa: F401 -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import MGMT_TYPE # noqa: F401 -from controllerconfig.common.configobjects import INFRA_TYPE # noqa: F401 -from controllerconfig.common.configobjects import OAM_TYPE # noqa: F401 -from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES # noqa: F401 -from controllerconfig.common.configobjects import HOST_XML_ATTRIBUTES # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME # noqa: F401 from controllerconfig.common.exceptions import ConfigError # noqa: F401 -from controllerconfig.common.exceptions import ConfigFail # noqa: F401 from controllerconfig.common.exceptions import ValidateFail # noqa: F401 -from controllerconfig.utils import is_valid_vlan # noqa: F401 -from controllerconfig.utils import is_mtu_valid # noqa: F401 from controllerconfig.utils import validate_network_str # noqa: F401 from controllerconfig.utils import validate_address_str # noqa: F401 -from controllerconfig.utils import validate_address # noqa: F401 -from controllerconfig.utils import is_valid_url # noqa: F401 -from controllerconfig.utils import is_valid_domain_or_ip # noqa: F401 -from controllerconfig.utils import ip_version_to_string # noqa: F401 -from controllerconfig.utils import lag_mode_to_str # noqa: F401 -from controllerconfig.utils import validate_openstack_password # noqa: F401 -from controllerconfig.utils import validate_nameserver_address_str # noqa: F401 -from controllerconfig.utils import extract_openstack_password_rules_from_file # noqa: F401 diff --git a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py b/controllerconfig/controllerconfig/controllerconfig/backup_restore.py deleted file mode 100644 index 5b238be9b9..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py +++ /dev/null @@ -1,1690 +0,0 @@ -# -# Copyright (c) 2014-2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Backup & Restore -""" - -from __future__ import print_function -import copy -import filecmp -import fileinput -import os -import glob -import shutil -import stat -import subprocess -import tarfile -import tempfile -import textwrap -import time - -from fm_api import constants as fm_constants -from fm_api import fm_api -from sysinv.common import constants as sysinv_constants - -from controllerconfig.common import log -from controllerconfig.common import constants -from controllerconfig.common.exceptions import BackupFail -from controllerconfig.common.exceptions import RestoreFail -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig import openstack -import tsconfig.tsconfig as tsconfig -from controllerconfig import utils -from controllerconfig import sysinv_api as sysinv -from six.moves import input -from os import environ - -LOG = log.get_logger(__name__) - -DEVNULL = open(os.devnull, 'w') -RESTORE_COMPLETE = "restore-complete" -RESTORE_RERUN_REQUIRED = "restore-rerun-required" - -# Backup/restore related constants -backup_in_progress = tsconfig.BACKUP_IN_PROGRESS_FLAG -restore_in_progress = tsconfig.RESTORE_IN_PROGRESS_FLAG -restore_patching_complete = '/etc/platform/.restore_patching_complete' -node_is_patched = '/var/run/node_is_patched' -keyring_permdir = os.path.join('/opt/platform/.keyring', tsconfig.SW_VERSION) -ceph_permdir = os.path.join(tsconfig.CONFIG_PATH, 'ceph-config') -ldap_permdir = '/var/lib/openldap-data' -patching_permdir = '/opt/patching' -patching_repo_permdir = '/www/pages/updates' -home_permdir = '/home' -extension_permdir = '/opt/extension' -patch_vault_permdir = '/opt/patch-vault' -mariadb_pod = 'mariadb-server-0' - -kube_config = environ.get('KUBECONFIG') -if kube_config is None: - kube_config = '/etc/kubernetes/admin.conf' - - -kube_cmd_prefix = 'kubectl --kubeconfig=%s ' % kube_config -kube_cmd_prefix += 'exec -i %s -n openstack -- bash -c ' % mariadb_pod - -mysql_prefix = '\'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD" ' -mysqldump_prefix = '\'exec mysqldump -uroot -p"$MYSQL_ROOT_PASSWORD" ' - - -def get_backup_databases(): - """ - Retrieve database lists for backup. - :return: backup_databases and backup_database_skip_tables - """ - - # Databases common to all configurations - REGION_LOCAL_DATABASES = ('postgres', 'template1', 'sysinv', - 'fm', 'barbican') - REGION_SHARED_DATABASES = ('keystone',) - - # Indicates which tables have to be dropped for a certain database. - DB_TABLE_SKIP_MAPPING = { - 'fm': ('alarm',), - 'dcorch': ('orch_job', - 'orch_request', - 'resource', - 'subcloud_resource'), } - - if tsconfig.region_config == 'yes': - BACKUP_DATABASES = REGION_LOCAL_DATABASES - else: - # Add additional databases for non-region configuration and for the - # primary region in region deployments. - BACKUP_DATABASES = REGION_LOCAL_DATABASES + REGION_SHARED_DATABASES - - # Add distributed cloud databases - if tsconfig.distributed_cloud_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - BACKUP_DATABASES += ('dcmanager', 'dcorch') - - # We generate the tables to be skipped for each database - # mentioned in BACKUP_DATABASES. We explicitly list - # skip tables in DB_TABLE_SKIP_MAPPING - BACKUP_DB_SKIP_TABLES = dict( - [[x, DB_TABLE_SKIP_MAPPING.get(x, ())] for x in BACKUP_DATABASES]) - - return BACKUP_DATABASES, BACKUP_DB_SKIP_TABLES - - -def get_os_backup_databases(): - """ - Retrieve openstack database lists from MariaDB for backup. - :return: os_backup_databases - """ - - skip_dbs = ("Database", "information_schema", "performance_schema", - "mysql", "horizon", "panko", "gnocchi") - - try: - db_cmd = kube_cmd_prefix + mysql_prefix + '-e"show databases" \'' - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - os_backup_dbs = set(line[:-1] for line in proc.stdout - if line[:-1] not in skip_dbs) - - proc.communicate() - - return os_backup_dbs - - except subprocess.CalledProcessError: - raise BackupFail("Failed to get openstack databases from MariaDB.") - - -def check_load_versions(archive, staging_dir): - match = False - try: - member = archive.getmember('etc/build.info') - archive.extract(member, path=staging_dir) - match = filecmp.cmp('/etc/build.info', staging_dir + '/etc/build.info') - shutil.rmtree(staging_dir + '/etc') - except Exception as e: - LOG.exception(e) - raise RestoreFail("Unable to verify load version in backup file. " - "Invalid backup file.") - - if not match: - LOG.error("Load version mismatch.") - raise RestoreFail("Load version of backup does not match the " - "version of the installed load.") - - -def get_subfunctions(filename): - """ - Retrieves the subfunctions from a platform.conf file. - :param filename: file to retrieve subfunctions from - :return: a list of the subfunctions or None if no subfunctions exist - """ - matchstr = 'subfunction=' - - with open(filename, 'r') as f: - for line in f: - if matchstr in line: - parsed = line.split('=') - return parsed[1].rstrip().split(",") - return - - -def check_load_subfunctions(archive, staging_dir): - """ - Verify that the subfunctions in the backup match the installed load. - :param archive: backup archive - :param staging_dir: staging directory - :return: raises exception if the subfunctions do not match - """ - match = False - backup_subfunctions = None - try: - member = archive.getmember('etc/platform/platform.conf') - archive.extract(member, path=staging_dir) - backup_subfunctions = get_subfunctions(staging_dir + - '/etc/platform/platform.conf') - shutil.rmtree(staging_dir + '/etc') - if set(backup_subfunctions) ^ set(tsconfig.subfunctions): - # The set of subfunctions do not match - match = False - else: - match = True - except Exception: - LOG.exception("Unable to verify subfunctions in backup file") - raise RestoreFail("Unable to verify subfunctions in backup file. " - "Invalid backup file.") - - if not match: - LOG.error("Subfunction mismatch - backup: %s, installed: %s" % - (str(backup_subfunctions), str(tsconfig.subfunctions))) - raise RestoreFail("Subfunctions in backup load (%s) do not match the " - "subfunctions of the installed load (%s)." % - (str(backup_subfunctions), - str(tsconfig.subfunctions))) - - -def file_exists_in_archive(archive, file_path): - """ Check if file exists in archive """ - try: - archive.getmember(file_path) - return True - - except KeyError: - LOG.info("File %s is not in archive." % file_path) - return False - - -def filter_directory(archive, directory): - for tarinfo in archive: - if tarinfo.name.split('/')[0] == directory: - yield tarinfo - - -def backup_etc_size(): - """ Backup etc size estimate """ - try: - total_size = utils.directory_get_size('/etc') - return total_size - except OSError: - LOG.error("Failed to estimate backup etc size.") - raise BackupFail("Failed to estimate backup etc size") - - -def backup_etc(archive): - """ Backup etc """ - try: - archive.add('/etc', arcname='etc') - - except tarfile.TarError: - LOG.error("Failed to backup etc.") - raise BackupFail("Failed to backup etc") - - -def restore_etc_file(archive, dest_dir, etc_file): - """ Restore etc file """ - try: - # Change the name of this file to remove the leading path - member = archive.getmember('etc/' + etc_file) - # Copy the member to avoid changing the name for future operations on - # this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, path=dest_dir) - - except tarfile.TarError: - LOG.error("Failed to restore etc file.") - raise RestoreFail("Failed to restore etc file") - - -def restore_etc_ssl_dir(archive, configpath=constants.CONFIG_WORKDIR): - """ Restore the etc SSL dir """ - - def filter_etc_ssl_private(members): - for tarinfo in members: - if 'etc/ssl/private' in tarinfo.name: - yield tarinfo - - if file_exists_in_archive(archive, 'config/server-cert.pem'): - restore_config_file( - archive, configpath, 'server-cert.pem') - - if file_exists_in_archive(archive, 'etc/ssl/private'): - # NOTE: This will include all TPM certificate files if TPM was - # enabled on the backed up system. However in that case, this - # restoration is only done for the first controller and TPM - # will need to be reconfigured once duplex controller (if any) - # is restored. - archive.extractall(path='/', - members=filter_etc_ssl_private(archive)) - - -def restore_ceph_external_config_files(archive, staging_dir): - # Restore ceph-config. - if file_exists_in_archive(archive, "config/ceph-config"): - restore_config_dir(archive, staging_dir, 'ceph-config', ceph_permdir) - - # Copy the file to /etc/ceph. - # There might be no files to copy, so don't check the return code. - cp_command = ('cp -Rp ' + os.path.join(ceph_permdir, '*') + - ' /etc/ceph/') - subprocess.call(cp_command, shell=True) - - -def backup_config_size(config_permdir): - """ Backup configuration size estimate """ - try: - return(utils.directory_get_size(config_permdir)) - - except OSError: - LOG.error("Failed to estimate backup configuration size.") - raise BackupFail("Failed to estimate backup configuration size") - - -def backup_config(archive, config_permdir): - """ Backup configuration """ - try: - # The config dir is versioned, but we're only grabbing the current - # release - archive.add(config_permdir, arcname='config') - - except tarfile.TarError: - LOG.error("Failed to backup config.") - raise BackupFail("Failed to backup configuration") - - -def restore_config_file(archive, dest_dir, config_file): - """ Restore configuration file """ - try: - # Change the name of this file to remove the leading path - member = archive.getmember('config/' + config_file) - # Copy the member to avoid changing the name for future operations on - # this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, path=dest_dir) - - except tarfile.TarError: - LOG.error("Failed to restore config file %s." % config_file) - raise RestoreFail("Failed to restore configuration") - - -def restore_configuration(archive, staging_dir): - """ Restore configuration """ - try: - os.makedirs(constants.CONFIG_WORKDIR, stat.S_IRWXU | stat.S_IRGRP | - stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) - except OSError: - LOG.error("Failed to create config directory: %s", - constants.CONFIG_WORKDIR) - raise RestoreFail("Failed to restore configuration files") - - # Restore cgcs_config file from original installation for historical - # purposes. Not used to restore the system as the information in this - # file is out of date (not updated after original installation). - restore_config_file(archive, constants.CONFIG_WORKDIR, 'cgcs_config') - - # Restore platform.conf file and update as necessary. The file will be - # created in a temporary location and then moved into place when it is - # complete to prevent access to a partially created file. - restore_etc_file(archive, staging_dir, 'platform/platform.conf') - temp_platform_conf_file = os.path.join(tsconfig.PLATFORM_CONF_PATH, - 'platform.conf.temp') - shutil.copyfile(os.path.join(staging_dir, 'platform.conf'), - temp_platform_conf_file) - install_uuid = utils.get_install_uuid() - for line in fileinput.FileInput(temp_platform_conf_file, inplace=1): - if line.startswith("INSTALL_UUID="): - # The INSTALL_UUID must be updated to match the new INSTALL_UUID - # which was generated when this controller was installed prior to - # doing the restore. - print("INSTALL_UUID=%s" % install_uuid) - elif line.startswith("management_interface=") or \ - line.startswith("oam_interface=") or \ - line.startswith("cluster_host_interface=") or \ - line.startswith("UUID="): - # Strip out any entries that are host specific as the backup can - # be done on either controller. The application of the - # platform_conf manifest will add these back in. - pass - else: - print(line, end='') - fileinput.close() - # Move updated platform.conf file into place. - os.rename(temp_platform_conf_file, tsconfig.PLATFORM_CONF_FILE) - - # Kick tsconfig to reload the platform.conf file - tsconfig._load() - - # Restore branding - restore_config_dir(archive, staging_dir, 'branding', '/opt/branding/') - - # Restore banner customization - restore_config_dir(archive, staging_dir, 'banner/etc', '/opt/banner') - - # Restore ssh configuration - restore_config_dir(archive, staging_dir, 'ssh_config', - constants.CONFIG_WORKDIR + '/ssh_config') - - # Configure hostname - utils.configure_hostname('controller-0') - - # Restore hosts file - restore_etc_file(archive, '/etc', 'hosts') - restore_etc_file(archive, constants.CONFIG_WORKDIR, 'hosts') - - # Restore certificate files - restore_etc_ssl_dir(archive) - - -def filter_pxelinux(archive): - for tarinfo in archive: - if tarinfo.name.find('config/pxelinux.cfg') == 0: - yield tarinfo - - -def restore_dnsmasq(archive, config_permdir): - """ Restore dnsmasq """ - try: - etc_files = ['hosts'] - - perm_files = ['hosts', - 'dnsmasq.hosts', 'dnsmasq.leases', - 'dnsmasq.addn_hosts'] - - for etc_file in etc_files: - restore_config_file(archive, '/etc', etc_file) - - for perm_file in perm_files: - restore_config_file(archive, config_permdir, perm_file) - - # Extract distributed cloud addn_hosts file if present in archive. - if file_exists_in_archive( - archive, 'config/dnsmasq.addn_hosts_dc'): - restore_config_file(archive, config_permdir, - 'dnsmasq.addn_hosts_dc') - - tmpdir = tempfile.mkdtemp(prefix="pxerestore_") - - archive.extractall(tmpdir, - members=filter_pxelinux(archive)) - - if os.path.exists(tmpdir + '/config/pxelinux.cfg'): - shutil.rmtree(config_permdir + 'pxelinux.cfg', ignore_errors=True) - shutil.move(tmpdir + '/config/pxelinux.cfg', config_permdir) - - shutil.rmtree(tmpdir, ignore_errors=True) - - except (shutil.Error, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to restore dnsmasq config.") - raise RestoreFail("Failed to restore dnsmasq files") - - -def backup_puppet_data_size(puppet_permdir): - """ Backup puppet data size estimate """ - try: - return(utils.directory_get_size(puppet_permdir)) - - except OSError: - LOG.error("Failed to estimate backup puppet data size.") - raise BackupFail("Failed to estimate backup puppet data size") - - -def backup_puppet_data(archive, puppet_permdir): - """ Backup puppet data """ - try: - # The puppet dir is versioned, but we're only grabbing the current - # release - archive.add(puppet_permdir, arcname='hieradata') - - except tarfile.TarError: - LOG.error("Failed to backup puppet data.") - raise BackupFail("Failed to backup puppet data") - - -def restore_static_puppet_data(archive, puppet_workdir): - """ Restore static puppet data """ - try: - member = archive.getmember('hieradata/static.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - member = archive.getmember('hieradata/secure_static.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - except tarfile.TarError: - LOG.error("Failed to restore static puppet data.") - raise RestoreFail("Failed to restore static puppet data") - - except OSError: - pass - - -def restore_puppet_data(archive, puppet_workdir, controller_0_address): - """ Restore puppet data """ - try: - member = archive.getmember('hieradata/system.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - member = archive.getmember('hieradata/secure_system.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - # Only restore controller-0 hieradata - controller_0_hieradata = 'hieradata/%s.yaml' % controller_0_address - member = archive.getmember(controller_0_hieradata) - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - except tarfile.TarError: - LOG.error("Failed to restore puppet data.") - raise RestoreFail("Failed to restore puppet data") - - except OSError: - pass - - -def backup_armada_manifest_size(armada_permdir): - """ Backup armada manifest size estimate """ - try: - return(utils.directory_get_size(armada_permdir)) - - except OSError: - LOG.error("Failed to estimate backup armada manifest size.") - raise BackupFail("Failed to estimate backup armada manifest size") - - -def backup_armada_manifest_data(archive, armada_permdir): - """ Backup armada manifest data """ - try: - archive.add(armada_permdir, arcname='armada') - - except tarfile.TarError: - LOG.error("Failed to backup armada manifest data.") - raise BackupFail("Failed to backup armada manifest data") - - -def restore_armada_manifest_data(archive, armada_permdir): - """ Restore armada manifest data """ - try: - shutil.rmtree(armada_permdir, ignore_errors=True) - members = filter_directory(archive, 'armada') - temp_members = list() - # remove armada and armada/ from the member path since they are - # extracted to armada_permdir: /opt/platform/armada/release - for m in members: - temp_member = copy.copy(m) - lst = temp_member.name.split('armada/') - if len(lst) > 1: - temp_member.name = lst[1] - temp_members.append(temp_member) - archive.extractall(path=armada_permdir, members=temp_members) - - except (tarfile.TarError, OSError): - LOG.error("Failed to restore armada manifest.") - shutil.rmtree(armada_permdir, ignore_errors=True) - raise RestoreFail("Failed to restore armada manifest") - - -def backup_keyring_size(keyring_permdir): - """ Backup keyring size estimate """ - try: - return(utils.directory_get_size(keyring_permdir)) - - except OSError: - LOG.error("Failed to estimate backup keyring size.") - raise BackupFail("Failed to estimate backup keyring size") - - -def backup_keyring(archive, keyring_permdir): - """ Backup keyring configuration """ - try: - archive.add(keyring_permdir, arcname='.keyring') - - except tarfile.TarError: - LOG.error("Failed to backup keyring.") - raise BackupFail("Failed to backup keyring configuration") - - -def restore_keyring(archive, keyring_permdir): - """ Restore keyring configuration """ - try: - shutil.rmtree(keyring_permdir, ignore_errors=False) - members = filter_directory(archive, '.keyring') - temp_members = list() - # remove .keyring and .keyring/ from the member path since they are - # extracted to keyring_permdir: /opt/platform/.keyring/release - for m in members: - temp_member = copy.copy(m) - lst = temp_member.name.split('.keyring/') - if len(lst) > 1: - temp_member.name = lst[1] - temp_members.append(temp_member) - archive.extractall(path=keyring_permdir, members=temp_members) - - except (tarfile.TarError, shutil.Error): - LOG.error("Failed to restore keyring.") - shutil.rmtree(keyring_permdir, ignore_errors=True) - raise RestoreFail("Failed to restore keyring configuration") - - -def prefetch_keyring(archive): - """ Prefetch keyring configuration for manifest use """ - keyring_tmpdir = '/tmp/.keyring' - python_keyring_tmpdir = '/tmp/python_keyring' - try: - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - archive.extractall( - path=os.path.dirname(keyring_tmpdir), - members=filter_directory(archive, - os.path.basename(keyring_tmpdir))) - - shutil.move(keyring_tmpdir + '/python_keyring', python_keyring_tmpdir) - - except (tarfile.TarError, shutil.Error): - LOG.error("Failed to restore keyring.") - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - raise RestoreFail("Failed to restore keyring configuration") - - -def cleanup_prefetched_keyring(): - """ Cleanup fetched keyring """ - try: - keyring_tmpdir = '/tmp/.keyring' - python_keyring_tmpdir = '/tmp/python_keyring' - - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - - except shutil.Error: - LOG.error("Failed to cleanup keyring.") - raise RestoreFail("Failed to cleanup fetched keyring") - - -def backup_ldap_size(): - """ Backup ldap size estimate """ - try: - total_size = 0 - - proc = subprocess.Popen( - ['slapcat -d 0 -F /etc/openldap/schema | wc -c'], - shell=True, stdout=subprocess.PIPE) - - for line in proc.stdout: - total_size = int(line) - break - - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate backup ldap size.") - raise BackupFail("Failed to estimate backup ldap size") - - -def backup_ldap(archive, staging_dir): - """ Backup ldap configuration """ - try: - ldap_staging_dir = staging_dir + '/ldap' - os.mkdir(ldap_staging_dir, 0o655) - - subprocess.check_call([ - 'slapcat', '-d', '0', '-F', '/etc/openldap/schema', - '-l', (ldap_staging_dir + '/ldap.db')], stdout=DEVNULL) - - archive.add(ldap_staging_dir + '/ldap.db', arcname='ldap.db') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup ldap database.") - raise BackupFail("Failed to backup ldap configuration") - - -def restore_ldap(archive, ldap_permdir, staging_dir): - """ Restore ldap configuration """ - try: - ldap_staging_dir = staging_dir + '/ldap' - archive.extract('ldap.db', path=ldap_staging_dir) - - utils.stop_lsb_service('openldap') - - subprocess.call(['rm', '-rf', ldap_permdir], stdout=DEVNULL) - os.mkdir(ldap_permdir, 0o755) - - subprocess.check_call(['slapadd', '-F', '/etc/openldap/schema', - '-l', ldap_staging_dir + '/ldap.db'], - stdout=DEVNULL, stderr=DEVNULL) - - except (subprocess.CalledProcessError, OSError, tarfile.TarError): - LOG.error("Failed to restore ldap database.") - raise RestoreFail("Failed to restore ldap configuration") - - finally: - utils.start_lsb_service('openldap') - - -def backup_mariadb_size(): - """ Backup MariaDB size estimate """ - try: - total_size = 0 - - os_backup_dbs = get_os_backup_databases() - - # Backup data for databases. - for db_elem in os_backup_dbs: - - db_cmd = kube_cmd_prefix + mysqldump_prefix - db_cmd += ' %s\' | wc -c' % db_elem - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - total_size += int(proc.stdout.readline()) - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate MariaDB database size.") - raise BackupFail("Failed to estimate MariaDB database size") - - -def backup_mariadb(archive, staging_dir): - """ Backup MariaDB data """ - try: - mariadb_staging_dir = staging_dir + '/mariadb' - os.mkdir(mariadb_staging_dir, 0o655) - - os_backup_dbs = get_os_backup_databases() - - # Backup data for databases. - for db_elem in os_backup_dbs: - db_cmd = kube_cmd_prefix + mysqldump_prefix - db_cmd += ' %s\' > %s/%s.sql.data' % (db_elem, - mariadb_staging_dir, db_elem) - - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - archive.add(mariadb_staging_dir, arcname='mariadb') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup MariaDB databases.") - raise BackupFail("Failed to backup MariaDB database.") - - -def extract_mariadb_data(archive): - """ Extract and store MariaDB data """ - try: - # We store MariaDB data in /opt/backups/mariadb for now. - # After MariaDB service is up, we will populate the - # database using these data. - archive.extractall(path=constants.BACKUPS_PATH, - members=filter_directory(archive, 'mariadb')) - except (OSError, tarfile.TarError) as e: - LOG.error("Failed to extract and store MariaDB data. Error: %s", e) - raise RestoreFail("Failed to extract and store MariaDB data.") - - -def create_helm_overrides_directory(): - """ - Create helm overrides directory - During restore, application-apply will be done without - first running application-upload where the helm overrides - directory is created. So we need to create the helm overrides - directory before running application-apply. - """ - try: - os.mkdir(constants.HELM_OVERRIDES_PERMDIR, 0o755) - except OSError: - LOG.error("Failed to create helm overrides directory") - raise BackupFail("Failed to create helm overrides directory") - - -def restore_mariadb(): - """ - Restore MariaDB - - This function is called after MariaDB service is up - """ - try: - mariadb_staging_dir = constants.BACKUPS_PATH + '/mariadb' - # Restore data for databases. - for data in glob.glob(mariadb_staging_dir + '/*.sql.data'): - db_elem = data.split('/')[-1].split('.')[0] - create_db = "create database %s" % db_elem - - # Create the database - db_cmd = kube_cmd_prefix + mysql_prefix + '-e"%s" \'' % create_db - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - # Populate data - db_cmd = 'cat %s | ' % data - db_cmd = db_cmd + kube_cmd_prefix + mysql_prefix - db_cmd += '%s\' ' % db_elem - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - shutil.rmtree(mariadb_staging_dir, ignore_errors=True) - - except (OSError, subprocess.CalledProcessError) as e: - LOG.error("Failed to restore MariaDB data. Error: %s", e) - raise RestoreFail("Failed to restore MariaDB data.") - - -def backup_postgres_size(): - """ Backup postgres size estimate """ - try: - total_size = 0 - - # Backup roles, table spaces and schemas for databases. - proc = subprocess.Popen([('sudo -u postgres pg_dumpall --clean ' + - '--schema-only | wc -c')], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - for line in proc.stdout: - total_size = int(line) - break - - proc.communicate() - - # get backup database - backup_databases, backup_db_skip_tables = get_backup_databases() - - # Backup data for databases. - for _, db_elem in enumerate(backup_databases): - - db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts ' - db_cmd += '--disable-triggers --data-only %s ' % db_elem - - for _, table_elem in enumerate(backup_db_skip_tables[db_elem]): - db_cmd += '--exclude-table=%s ' % table_elem - - db_cmd += '| wc -c' - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - for line in proc.stdout: - total_size += int(line) - break - - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate backup database size.") - raise BackupFail("Failed to estimate backup database size") - - -def backup_postgres(archive, staging_dir): - """ Backup postgres configuration """ - try: - postgres_staging_dir = staging_dir + '/postgres' - os.mkdir(postgres_staging_dir, 0o655) - - # Backup roles, table spaces and schemas for databases. - subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' + - '--schema-only' + - '> %s/%s' % (postgres_staging_dir, - 'postgres.sql.config'))], - shell=True, stderr=DEVNULL) - - # get backup database - backup_databases, backup_db_skip_tables = get_backup_databases() - - # Backup data for databases. - for _, db_elem in enumerate(backup_databases): - - db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts ' - db_cmd += '--disable-triggers --data-only %s ' % db_elem - - for _, table_elem in enumerate(backup_db_skip_tables[db_elem]): - db_cmd += '--exclude-table=%s ' % table_elem - - db_cmd += '> %s/%s.sql.data' % (postgres_staging_dir, db_elem) - - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - archive.add(postgres_staging_dir, arcname='postgres') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup postgres databases.") - raise BackupFail("Failed to backup database configuration") - - -def restore_postgres(archive, staging_dir): - """ Restore postgres configuration """ - try: - postgres_staging_dir = staging_dir + '/postgres' - archive.extractall(path=staging_dir, - members=filter_directory(archive, 'postgres')) - - utils.start_service("postgresql") - - # Restore roles, table spaces and schemas for databases. - subprocess.check_call(["sudo", "-u", "postgres", "psql", "-f", - postgres_staging_dir + - '/postgres.sql.config', "postgres"], - stdout=DEVNULL, stderr=DEVNULL) - - # Restore data for databases. - for data in glob.glob(postgres_staging_dir + '/*.sql.data'): - db_elem = data.split('/')[-1].split('.')[0] - subprocess.check_call(["sudo", "-u", "postgres", "psql", "-f", - data, db_elem], - stdout=DEVNULL) - - except (OSError, subprocess.CalledProcessError, tarfile.TarError) as e: - LOG.error("Failed to restore postgres databases. Error: %s", e) - raise RestoreFail("Failed to restore database configuration") - - finally: - utils.stop_service('postgresql') - - -def filter_config_dir(archive, directory): - for tarinfo in archive: - if tarinfo.name.find('config/' + directory) == 0: - yield tarinfo - - -def restore_config_dir(archive, staging_dir, config_dir, dest_dir): - """ Restore configuration directory if it exists """ - try: - archive.extractall(staging_dir, - members=filter_config_dir(archive, config_dir)) - - # Copy files from backup to dest dir - if (os.path.exists(staging_dir + '/config/' + config_dir) and - os.listdir(staging_dir + '/config/' + config_dir)): - subprocess.call(["mkdir", "-p", dest_dir]) - - try: - for f in glob.glob( - staging_dir + '/config/' + config_dir + '/*'): - subprocess.check_call(["cp", "-p", f, dest_dir]) - except IOError: - LOG.warning("Failed to copy %s files" % config_dir) - - except (subprocess.CalledProcessError, tarfile.TarError): - LOG.info("No custom %s config was found during restore." % config_dir) - - -def backup_std_dir_size(directory): - """ Backup standard directory size estimate """ - try: - return utils.directory_get_size(directory) - - except OSError: - LOG.error("Failed to estimate backup size for %s" % directory) - raise BackupFail("Failed to estimate backup size for %s" % directory) - - -def backup_std_dir(archive, directory): - """ Backup standard directory """ - try: - archive.add(directory, arcname=os.path.basename(directory)) - - except tarfile.TarError: - LOG.error("Failed to backup %s" % directory) - raise BackupFail("Failed to backup %s" % directory) - - -def restore_std_dir(archive, directory): - """ Restore standard directory """ - try: - shutil.rmtree(directory, ignore_errors=True) - # Verify that archive contains this directory - try: - archive.getmember(os.path.basename(directory)) - except KeyError: - LOG.error("Archive does not contain directory %s" % directory) - raise RestoreFail("Invalid backup file - missing directory %s" % - directory) - archive.extractall( - path=os.path.dirname(directory), - members=filter_directory(archive, os.path.basename(directory))) - - except (shutil.Error, tarfile.TarError): - LOG.error("Failed to restore %s" % directory) - raise RestoreFail("Failed to restore %s" % directory) - - -def configure_loopback_interface(archive): - """ Restore and apply configuration for loopback interface """ - utils.remove_interface_config_files() - restore_etc_file( - archive, utils.NETWORK_SCRIPTS_PATH, - 'sysconfig/network-scripts/' + utils.NETWORK_SCRIPTS_LOOPBACK) - utils.restart_networking() - - -def backup_ceph_crush_map(archive, staging_dir): - """ Backup ceph crush map """ - try: - ceph_staging_dir = os.path.join(staging_dir, 'ceph') - os.mkdir(ceph_staging_dir, 0o655) - crushmap_file = os.path.join(ceph_staging_dir, - sysinv_constants.CEPH_CRUSH_MAP_BACKUP) - subprocess.check_call(['ceph', 'osd', 'getcrushmap', - '-o', crushmap_file], stdout=DEVNULL, - stderr=DEVNULL) - archive.add(crushmap_file, arcname='ceph/' + - sysinv_constants.CEPH_CRUSH_MAP_BACKUP) - except Exception as e: - LOG.error('Failed to backup ceph crush map. Reason: {}'.format(e)) - raise BackupFail('Failed to backup ceph crush map') - - -def restore_ceph_crush_map(archive): - """ Restore ceph crush map """ - if not file_exists_in_archive(archive, 'ceph/' + - sysinv_constants.CEPH_CRUSH_MAP_BACKUP): - return - - try: - crush_map_file = 'ceph/' + sysinv_constants.CEPH_CRUSH_MAP_BACKUP - if file_exists_in_archive(archive, crush_map_file): - member = archive.getmember(crush_map_file) - # Copy the member to avoid changing the name for future - # operations on this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, - path=sysinv_constants.SYSINV_CONFIG_PATH) - - except tarfile.TarError as e: - LOG.error('Failed to restore crush map file. Reason: {}'.format(e)) - raise RestoreFail('Failed to restore crush map file') - - -def check_size(archive_dir): - """Check if there is enough space to create backup.""" - backup_overhead_bytes = 1024 ** 3 # extra GB for staging directory - - backup_size = (backup_overhead_bytes + - backup_etc_size() + - backup_config_size(tsconfig.CONFIG_PATH) + - backup_puppet_data_size(constants.HIERADATA_PERMDIR) + - backup_keyring_size(keyring_permdir) + - backup_ldap_size() + - backup_postgres_size() + - backup_std_dir_size(home_permdir) + - backup_std_dir_size(patching_permdir) + - backup_std_dir_size(patching_repo_permdir) + - backup_std_dir_size(extension_permdir) + - backup_std_dir_size(patch_vault_permdir) + - backup_armada_manifest_size(constants.ARMADA_PERMDIR) + - backup_std_dir_size(constants.HELM_CHARTS_PERMDIR) + - backup_mariadb_size() - ) - - archive_dir_free_space = \ - utils.filesystem_get_free_space(archive_dir) - - if backup_size > archive_dir_free_space: - print("Archive directory (%s) does not have enough free " - "space (%s), estimated backup size is %s." % - (archive_dir, utils.print_bytes(archive_dir_free_space), - utils.print_bytes(backup_size))) - - raise BackupFail("Not enough free space for backup.") - - -def backup(backup_name, archive_dir, clone=False): - """Backup configuration.""" - - if not os.path.isdir(archive_dir): - raise BackupFail("Archive directory (%s) not found." % archive_dir) - - if not utils.is_active("management-ip"): - raise BackupFail( - "Backups can only be performed from the active controller.") - - if os.path.isfile(backup_in_progress): - raise BackupFail("Backup already in progress.") - else: - open(backup_in_progress, 'w') - - fmApi = fm_api.FaultAPIs() - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - sysinv_constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault(alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text=("System Backup in progress."), - # operational - alarm_type=fm_constants.FM_ALARM_TYPE_7, - # congestion - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, - proposed_repair_action=("No action required."), - service_affecting=False) - - fmApi.set_fault(fault) - - staging_dir = None - system_tar_path = None - warnings = '' - try: - os.chdir('/') - - if not clone: - check_size(archive_dir) - - print ("\nPerforming backup (this might take several minutes):") - staging_dir = tempfile.mkdtemp(dir=archive_dir) - - system_tar_path = os.path.join(archive_dir, - backup_name + '_system.tgz') - system_archive = tarfile.open(system_tar_path, "w:gz") - - step = 1 - total_steps = 16 - - # Step 1: Backup etc - backup_etc(system_archive) - utils.progress(total_steps, step, 'backup etc', 'DONE') - step += 1 - - # Step 2: Backup configuration - backup_config(system_archive, tsconfig.CONFIG_PATH) - utils.progress(total_steps, step, 'backup configuration', 'DONE') - step += 1 - - # Step 3: Backup puppet data - backup_puppet_data(system_archive, constants.HIERADATA_PERMDIR) - utils.progress(total_steps, step, 'backup puppet data', 'DONE') - step += 1 - - # Step 4: Backup armada data - backup_armada_manifest_data(system_archive, constants.ARMADA_PERMDIR) - utils.progress(total_steps, step, 'backup armada data', 'DONE') - step += 1 - - # Step 5: Backup helm charts data - backup_std_dir(system_archive, constants.HELM_CHARTS_PERMDIR) - utils.progress(total_steps, step, 'backup helm charts', 'DONE') - step += 1 - - # Step 6: Backup keyring - backup_keyring(system_archive, keyring_permdir) - utils.progress(total_steps, step, 'backup keyring', 'DONE') - step += 1 - - # Step 7: Backup ldap - backup_ldap(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup ldap', 'DONE') - step += 1 - - # Step 8: Backup postgres - backup_postgres(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup postgres', 'DONE') - step += 1 - - # Step 9: Backup mariadb - backup_mariadb(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup mariadb', 'DONE') - step += 1 - - # Step 10: Backup home - backup_std_dir(system_archive, home_permdir) - utils.progress(total_steps, step, 'backup home directory', 'DONE') - step += 1 - - # Step 11: Backup patching - if not clone: - backup_std_dir(system_archive, patching_permdir) - utils.progress(total_steps, step, 'backup patching', 'DONE') - step += 1 - - # Step 12: Backup patching repo - if not clone: - backup_std_dir(system_archive, patching_repo_permdir) - utils.progress(total_steps, step, 'backup patching repo', 'DONE') - step += 1 - - # Step 13: Backup extension filesystem - backup_std_dir(system_archive, extension_permdir) - utils.progress(total_steps, step, 'backup extension filesystem ' - 'directory', 'DONE') - step += 1 - - # Step 14: Backup patch-vault filesystem - if os.path.exists(patch_vault_permdir): - backup_std_dir(system_archive, patch_vault_permdir) - utils.progress(total_steps, step, 'backup patch-vault filesystem ' - 'directory', 'DONE') - step += 1 - - # Step 15: Backup ceph crush map - backup_ceph_crush_map(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup ceph crush map', 'DONE') - step += 1 - - # Step 16: Create archive - system_archive.close() - utils.progress(total_steps, step, 'create archive', 'DONE') - step += 1 - - except Exception: - if system_tar_path and os.path.isfile(system_tar_path): - os.remove(system_tar_path) - - raise - finally: - fmApi.clear_fault(fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - entity_instance_id) - os.remove(backup_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - - system_msg = "System backup file created" - if not clone: - system_msg += ": " + system_tar_path - - print(system_msg) - if warnings != '': - print("WARNING: The following problems occurred:") - print(textwrap.fill(warnings, 80)) - - -def create_restore_runtime_config(filename): - """ Create any runtime parameters needed for Restore.""" - config = {} - # We need to re-enable Openstack password rules, which - # were previously disabled while the controller manifests - # were applying during a Restore - config['classes'] = ['keystone::security_compliance'] - utils.create_manifest_runtime_config(filename, config) - - -def restore_system(backup_file, include_storage_reinstall=False, clone=False): - """Restoring system configuration.""" - - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(tsconfig.CONFIG_PATH) or - os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FILE)): - print(textwrap.fill( - "Configuration has already been done. " - "A system restore operation can only be done " - "immediately after the load has been installed.", 80)) - print('') - raise RestoreFail("System configuration already completed") - - if not os.path.isabs(backup_file): - raise RestoreFail("Backup file (%s) not found. Full path is " - "required." % backup_file) - - if os.path.isfile(restore_in_progress): - raise RestoreFail("Restore already in progress.") - else: - open(restore_in_progress, 'w') - - # Add newline to console log for install-clone scenario - newline = clone - staging_dir = None - - try: - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(["vgdisplay", "cgts-vg"], - stdout=fnull, - stderr=fnull) - except subprocess.CalledProcessError: - LOG.error("The cgts-vg volume group was not found") - raise RestoreFail("Volume groups not configured") - - print("\nRestoring system (this will take several minutes):") - # Use /scratch for the staging dir for now, - # until /opt/backups is available - staging_dir = tempfile.mkdtemp(dir='/scratch') - # Permission change required or postgres restore fails - subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL) - os.chdir('/') - - step = 1 - total_steps = 26 - - # Step 1: Open archive and verify installed load matches backup - try: - archive = tarfile.open(backup_file) - except tarfile.TarError as e: - LOG.exception(e) - raise RestoreFail("Error opening backup file. Invalid backup " - "file.") - check_load_versions(archive, staging_dir) - check_load_subfunctions(archive, staging_dir) - utils.progress(total_steps, step, 'open archive', 'DONE', newline) - step += 1 - - # Patching is potentially a multi-phase step. - # If the controller is impacted by patches from the backup, - # it must be rebooted before continuing the restore. - # If this is the second pass through, we can skip over this. - if not os.path.isfile(restore_patching_complete) and not clone: - # Step 2: Restore patching - restore_std_dir(archive, patching_permdir) - utils.progress(total_steps, step, 'restore patching', 'DONE', - newline) - step += 1 - - # Step 3: Restore patching repo - restore_std_dir(archive, patching_repo_permdir) - utils.progress(total_steps, step, 'restore patching repo', 'DONE', - newline) - step += 1 - - # Step 4: Apply patches - try: - subprocess.check_output(["sw-patch", "install-local"]) - except subprocess.CalledProcessError: - LOG.error("Failed to install patches") - raise RestoreFail("Failed to install patches") - utils.progress(total_steps, step, 'install patches', 'DONE', - newline) - step += 1 - - open(restore_patching_complete, 'w') - - # If the controller was impacted by patches, we need to reboot. - if os.path.isfile(node_is_patched): - if not clone: - print("\nThis controller has been patched. " + - "A reboot is required.") - print("After the reboot is complete, " + - "re-execute the restore command.") - while True: - user_input = input( - "Enter 'reboot' to reboot controller: ") - if user_input == 'reboot': - break - LOG.info("This controller has been patched. Rebooting now") - print("\nThis controller has been patched. Rebooting now\n\n") - time.sleep(5) - os.remove(restore_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - subprocess.call("reboot") - - else: - # We need to restart the patch controller and agent, since - # we setup the repo and patch store outside its control - with open(os.devnull, "w") as devnull: - subprocess.call( - ["systemctl", - "restart", - "sw-patch-controller-daemon.service"], - stdout=devnull, stderr=devnull) - subprocess.call( - ["systemctl", - "restart", - "sw-patch-agent.service"], - stdout=devnull, stderr=devnull) - if clone: - # No patches were applied, return to cloning code - # to run validation code. - return RESTORE_RERUN_REQUIRED - else: - # Add the skipped steps - step += 3 - - if os.path.isfile(node_is_patched): - # If we get here, it means the node was patched by the user - # AFTER the restore applied patches and rebooted, but didn't - # reboot. - # This means the patch lineup no longer matches what's in the - # backup, but we can't (and probably shouldn't) prevent that. - # However, since this will ultimately cause the node to fail - # the goenabled step, we can fail immediately and force the - # user to reboot. - print ("\nThis controller has been patched, but not rebooted.") - print ("Please reboot before continuing the restore process.") - raise RestoreFail("Controller node patched without rebooting") - - # Flag can now be cleared - if os.path.exists(restore_patching_complete): - os.remove(restore_patching_complete) - - # Prefetch keyring - prefetch_keyring(archive) - - # Step 5: Restore configuration - restore_configuration(archive, staging_dir) - # In AIO SX systems, the loopback interface is used as the management - # interface. However, the application of the interface manifest will - # not configure the necessary addresses on the loopback interface (see - # apply_network_config.sh for details). So, we need to configure the - # loopback interface here. - if tsconfig.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - configure_loopback_interface(archive) - # Write the simplex flag - utils.write_simplex_flag() - utils.progress(total_steps, step, 'restore configuration', 'DONE', - newline) - step += 1 - - # Step 6: Apply restore bootstrap manifest - controller_0_address = utils.get_address_from_hosts_file( - 'controller-0') - restore_static_puppet_data(archive, constants.HIERADATA_WORKDIR) - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'bootstrap', - constants.HIERADATA_WORKDIR) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply bootstrap manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - utils.progress(total_steps, step, 'apply bootstrap manifest', 'DONE', - newline) - step += 1 - - # Step 7: Restore puppet data - restore_puppet_data(archive, constants.HIERADATA_WORKDIR, - controller_0_address) - utils.progress(total_steps, step, 'restore puppet data', 'DONE', - newline) - step += 1 - - # Step 8: Persist configuration - utils.persist_config() - utils.progress(total_steps, step, 'persist configuration', 'DONE', - newline) - step += 1 - - # Step 9: Apply controller manifest - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'controller', - constants.HIERADATA_PERMDIR) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - utils.progress(total_steps, step, 'apply controller manifest', 'DONE', - newline) - step += 1 - - # Step 10: Apply runtime controller manifests - restore_filename = os.path.join(staging_dir, 'restore.yaml') - create_restore_runtime_config(restore_filename) - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'runtime', - constants.HIERADATA_PERMDIR, - runtime_filename=restore_filename) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply runtime controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - utils.progress(total_steps, step, - 'apply runtime controller manifest', 'DONE', - newline) - step += 1 - - # Move the staging dir under /opt/backups, now that it's setup - shutil.rmtree(staging_dir, ignore_errors=True) - staging_dir = tempfile.mkdtemp(dir=constants.BACKUPS_PATH) - # Permission change required or postgres restore fails - subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL) - - # Step 11: Apply banner customization - utils.apply_banner_customization() - utils.progress(total_steps, step, 'apply banner customization', 'DONE', - newline) - step += 1 - - # Step 12: Restore dnsmasq and pxeboot config - restore_dnsmasq(archive, tsconfig.CONFIG_PATH) - utils.progress(total_steps, step, 'restore dnsmasq', 'DONE', newline) - step += 1 - - # Step 13: Restore keyring - restore_keyring(archive, keyring_permdir) - utils.progress(total_steps, step, 'restore keyring', 'DONE', newline) - step += 1 - - # Step 14: Restore ldap - restore_ldap(archive, ldap_permdir, staging_dir) - utils.progress(total_steps, step, 'restore ldap', 'DONE', newline) - step += 1 - - # Step 15: Restore postgres - restore_postgres(archive, staging_dir) - utils.progress(total_steps, step, 'restore postgres', 'DONE', newline) - step += 1 - - # Step 16: Extract and store mariadb data - extract_mariadb_data(archive) - utils.progress(total_steps, step, 'extract mariadb', 'DONE', newline) - step += 1 - - # Step 17: Restore ceph crush map - restore_ceph_crush_map(archive) - utils.progress(total_steps, step, 'restore ceph crush map', 'DONE', - newline) - step += 1 - - # Step 18: Restore home - restore_std_dir(archive, home_permdir) - utils.progress(total_steps, step, 'restore home directory', 'DONE', - newline) - step += 1 - - # Step 19: Restore extension filesystem - restore_std_dir(archive, extension_permdir) - utils.progress(total_steps, step, 'restore extension filesystem ' - 'directory', 'DONE', newline) - step += 1 - - # Step 20: Restore patch-vault filesystem - if file_exists_in_archive(archive, - os.path.basename(patch_vault_permdir)): - restore_std_dir(archive, patch_vault_permdir) - utils.progress(total_steps, step, 'restore patch-vault filesystem ' - 'directory', 'DONE', newline) - - step += 1 - - # Step 21: Restore external ceph configuration files. - restore_ceph_external_config_files(archive, staging_dir) - utils.progress(total_steps, step, 'restore CEPH external config', - 'DONE', newline) - step += 1 - - # Step 22: Restore Armada manifest - restore_armada_manifest_data(archive, constants.ARMADA_PERMDIR) - utils.progress(total_steps, step, 'restore armada manifest', - 'DONE', newline) - step += 1 - - # Step 23: Restore Helm charts - restore_std_dir(archive, constants.HELM_CHARTS_PERMDIR) - utils.progress(total_steps, step, 'restore helm charts', - 'DONE', newline) - step += 1 - - # Step 24: Create Helm overrides directory - create_helm_overrides_directory() - utils.progress(total_steps, step, 'create helm overrides directory', - 'DONE', newline) - step += 1 - - # Step 25: Shutdown file systems - archive.close() - shutil.rmtree(staging_dir, ignore_errors=True) - utils.shutdown_file_systems() - utils.progress(total_steps, step, 'shutdown file systems', 'DONE', - newline) - step += 1 - - # Step 26: Recover services - utils.mtce_restart() - utils.mark_config_complete() - time.sleep(120) - - for service in ['sysinv-conductor', 'sysinv-inv']: - if not utils.wait_sm_service(service): - raise RestoreFail("Services have failed to initialize.") - - utils.progress(total_steps, step, 'recover services', 'DONE', newline) - step += 1 - - if tsconfig.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - - print("\nRestoring node states (this will take several minutes):") - - with openstack.OpenStack() as client: - # On ceph setups storage nodes take about 90 seconds - # to become locked. Setting the timeout to 120 seconds - # for such setups - lock_timeout = 60 - storage_hosts = sysinv.get_hosts(client.admin_token, - client.conf['region_name'], - personality='storage') - if storage_hosts: - lock_timeout = 120 - - failed_lock_host = False - skip_hosts = ['controller-0'] - if not include_storage_reinstall: - if storage_hosts: - install_uuid = utils.get_install_uuid() - for h in storage_hosts: - skip_hosts.append(h.name) - - # Update install_uuid on the storage node - client.sysinv.ihost.update_install_uuid( - h.uuid, - install_uuid) - - skip_hosts_count = len(skip_hosts) - - # Wait for nodes to be identified as disabled before attempting - # to lock hosts. Even if after 3 minute nodes are still not - # identified as disabled, we still continue the restore. - if not client.wait_for_hosts_disabled( - exempt_hostnames=skip_hosts, - timeout=180): - LOG.info("At least one node is not in a disabling state. " - "Continuing.") - - print("\nLocking nodes:") - try: - failed_hosts = client.lock_hosts(skip_hosts, - utils.progress, - timeout=lock_timeout) - # Don't power off nodes that could not be locked - if len(failed_hosts) > 0: - skip_hosts.append(failed_hosts) - - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - failed_lock_host = True - - if not failed_lock_host: - print("\nPowering-off nodes:") - try: - client.power_off_hosts(skip_hosts, - utils.progress, - timeout=60) - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - # this is somehow expected - - if failed_lock_host or len(skip_hosts) > skip_hosts_count: - if include_storage_reinstall: - print(textwrap.fill( - "Failed to lock at least one node. " + - "Please lock the unlocked nodes manually.", 80 - )) - else: - print(textwrap.fill( - "Failed to lock at least one node. " + - "Please lock the unlocked controller-1 or " + - "worker nodes manually.", 80 - )) - - if not clone: - print(textwrap.fill( - "Before continuing to the next step in the restore, " + - "please ensure all nodes other than controller-0 " + - "and storage nodes, if they are not being " + - "reinstalled, are powered off. Please refer to the " + - "system administration guide for more details.", 80 - )) - - finally: - os.remove(restore_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - cleanup_prefetched_keyring() - - fmApi = fm_api.FaultAPIs() - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - sysinv_constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_MSG, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text=("System Restore complete."), - # other - alarm_type=fm_constants.FM_ALARM_TYPE_0, - # unknown - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN, - proposed_repair_action=(""), - service_affecting=False) - - fmApi.set_fault(fault) - - if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD: - print("\nApplying worker manifests for %s. " % - (utils.get_controller_hostname())) - print("Node will reboot on completion.") - - sysinv.do_worker_config_complete(utils.get_controller_hostname()) - - # show in-progress log on console every 30 seconds - # until self reboot or timeout - - time.sleep(30) - for i in range(1, 10): - print("worker manifest apply in progress ... ") - time.sleep(30) - - raise RestoreFail("Timeout running worker manifests, " - "reboot did not occur") - - return RESTORE_COMPLETE diff --git a/controllerconfig/controllerconfig/controllerconfig/clone.py b/controllerconfig/controllerconfig/controllerconfig/clone.py deleted file mode 100644 index 41c8fe3d65..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/clone.py +++ /dev/null @@ -1,712 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Clone a Configured System and Install the image on another -identical hardware or the same hardware. -""" - -from __future__ import print_function -import os -import re -import glob -import time -import shutil -import netaddr -import tempfile -import fileinput -import subprocess - -from controllerconfig.common import constants -from sysinv.common import constants as si_const -from controllerconfig import sysinv_api -import tsconfig.tsconfig as tsconfig -from controllerconfig.common import log -from controllerconfig.common.exceptions import CloneFail -from controllerconfig.common.exceptions import BackupFail -from controllerconfig import utils -from controllerconfig import backup_restore - -DEBUG = False -LOG = log.get_logger(__name__) -DEVNULL = open(os.devnull, 'w') -CLONE_ARCHIVE_DIR = "clone-archive" -CLONE_ISO_INI = ".cloneiso.ini" -NAME = "name" -INSTALLED = "installed_at" -RESULT = "result" -IN_PROGRESS = "in-progress" -FAIL = "failed" -OK = "ok" - - -def clone_status(): - """ Check status of last install-clone. """ - INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI) - INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI) - name = "unknown" - result = "unknown" - installed_at = "unknown time" - for ini_file in [INI_FILE1, INI_FILE2]: - if os.path.exists(ini_file): - with open(ini_file) as f: - s = f.read() - for line in s.split("\n"): - if line.startswith(NAME): - name = line.split("=")[1].strip() - elif line.startswith(RESULT): - result = line.split("=")[1].strip() - elif line.startswith(INSTALLED): - installed_at = line.split("=")[1].strip() - break # one file was found, skip the other file - if result != "unknown": - if result == OK: - print("\nInstallation of cloned image [{}] was successful at {}\n" - .format(name, installed_at)) - elif result == FAIL: - print("\nInstallation of cloned image [{}] failed at {}\n" - .format(name, installed_at)) - else: - print("\ninstall-clone is in progress.\n") - else: - print("\nCloned image is not installed on this node.\n") - - -def check_size(archive_dir): - """ Check if there is enough space to create iso. """ - overhead_bytes = 1024 ** 3 # extra GB for staging directory - # Size of the cloned iso is directly proportional to the - # installed package repository (note that patches are a part of - # the system archive size below). - # 1G overhead size added (above) will accomodate the temporary - # workspace (updating system archive etc) needed to create the iso. - feed_dir = os.path.join('/www', 'pages', 'feed', - 'rel-' + tsconfig.SW_VERSION) - overhead_bytes += backup_restore.backup_std_dir_size(feed_dir) - - clone_size = ( - overhead_bytes + - backup_restore.backup_etc_size() + - backup_restore.backup_config_size(tsconfig.CONFIG_PATH) + - backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) + - backup_restore.backup_keyring_size(backup_restore.keyring_permdir) + - backup_restore.backup_ldap_size() + - backup_restore.backup_postgres_size() + - backup_restore.backup_std_dir_size(backup_restore.home_permdir) + - backup_restore.backup_std_dir_size(backup_restore.patching_permdir) + - backup_restore.backup_std_dir_size( - backup_restore.patching_repo_permdir) + - backup_restore.backup_std_dir_size(backup_restore.extension_permdir) + - backup_restore.backup_std_dir_size( - backup_restore.patch_vault_permdir) + - backup_restore.backup_armada_manifest_size( - constants.ARMADA_PERMDIR) + - backup_restore.backup_std_dir_size( - constants.HELM_CHARTS_PERMDIR) + - backup_restore.backup_mariadb_size()) - - archive_dir_free_space = \ - utils.filesystem_get_free_space(archive_dir) - - if clone_size > archive_dir_free_space: - print("\nArchive directory (%s) does not have enough free " - "space (%s), estimated size to create image is %s." % - (archive_dir, - utils.print_bytes(archive_dir_free_space), - utils.print_bytes(clone_size))) - raise CloneFail("Not enough free space.\n") - - -def update_bootloader_default(bl_file, host): - """ Update bootloader files for cloned image """ - if not os.path.exists(bl_file): - LOG.error("{} does not exist".format(bl_file)) - raise CloneFail("{} does not exist".format(os.path.basename(bl_file))) - - # Tags should be in sync with common-bsp/files/centos.syslinux.cfg - # and common-bsp/files/grub.cfg - STANDARD_STANDARD = '0' - STANDARD_EXTENDED = 'S0' - AIO_STANDARD = '2' - AIO_EXTENDED = 'S2' - AIO_LL_STANDARD = '4' - AIO_LL_EXTENDED = 'S4' - if "grub.cfg" in bl_file: - STANDARD_STANDARD = 'standard>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - STANDARD_EXTENDED = 'standard>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - AIO_STANDARD = 'aio>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - AIO_EXTENDED = 'aio>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - AIO_LL_STANDARD = 'aio-lowlat>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - SUBMENUITEM_TBOOT = 'tboot' - SUBMENUITEM_SECUREBOOT = 'secureboot' - - timeout_line = None - default_line = None - default_label_num = STANDARD_STANDARD - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - default_label_num = AIO_LL_STANDARD - else: - default_label_num = AIO_STANDARD - if (tsconfig.security_profile == - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED): - default_label_num = STANDARD_EXTENDED - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - default_label_num = AIO_LL_EXTENDED - else: - default_label_num = AIO_EXTENDED - if "grub.cfg" in bl_file: - if host.tboot is not None: - if host.tboot == "true": - default_label_num = default_label_num + '>' + \ - SUBMENUITEM_TBOOT - else: - default_label_num = default_label_num + '>' + \ - SUBMENUITEM_SECUREBOOT - - try: - with open(bl_file) as f: - s = f.read() - for line in s.split("\n"): - if line.startswith("timeout"): - timeout_line = line - elif line.startswith("default"): - default_line = line - - if "grub.cfg" in bl_file: - replace = "default='{}'\ntimeout=10".format(default_label_num) - else: # isolinux format - replace = "default {}\ntimeout 10".format(default_label_num) - - if default_line and timeout_line: - s = s.replace(default_line, "") - s = s.replace(timeout_line, replace) - elif default_line: - s = s.replace(default_line, replace) - elif timeout_line: - s = s.replace(timeout_line, replace) - else: - s = replace + s - - s = re.sub(r'boot_device=[^\s]*', - 'boot_device=%s' % host.boot_device, - s) - s = re.sub(r'rootfs_device=[^\s]*', - 'rootfs_device=%s' % host.rootfs_device, - s) - s = re.sub(r'console=[^\s]*', - 'console=%s' % host.console, - s) - - with open(bl_file, "w") as f: - LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]" - .format(bl_file, default_label_num, timeout_line, - default_line, replace.replace('\n', ''))) - f.write(s) - - except Exception as e: - LOG.error("update_bootloader_default failed: {}".format(e)) - raise CloneFail("Failed to update bootloader files") - - -def get_online_cpus(): - """ Get max cpu id """ - with open('/sys/devices/system/cpu/online') as f: - s = f.read() - max_cpu_id = s.split('-')[-1].strip() - LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip())) - return max_cpu_id - return "" - - -def get_total_mem(): - """ Get total memory size """ - with open('/proc/meminfo') as f: - s = f.read() - for line in s.split("\n"): - if line.startswith("MemTotal:"): - mem_total = line.split()[1] - LOG.info("MemTotal:[{}]".format(mem_total)) - return mem_total - return "" - - -def get_disk_size(disk): - """ Get the disk size """ - disk_size = "" - try: - disk_size = subprocess.check_output( - ['lsblk', '--nodeps', '--output', 'SIZE', - '--noheadings', '--bytes', disk]) - except Exception as e: - LOG.exception(e) - LOG.error("Failed to get disk size [{}]".format(disk)) - raise CloneFail("Failed to get disk size") - return disk_size.strip() - - -def create_ini_file(clone_archive_dir, iso_name): - """Create clone ini file.""" - interfaces = "" - my_hostname = utils.get_controller_hostname() - macs = sysinv_api.get_mac_addresses(my_hostname) - for intf in macs.keys(): - interfaces += intf + " " - - disk_paths = "" - for _, _, files in os.walk('/dev/disk/by-path'): - for f in files: - if f.startswith("pci-") and "part" not in f and "usb" not in f: - disk_size = get_disk_size('/dev/disk/by-path/' + f) - disk_paths += f + "#" + disk_size + " " - break # no need to go into sub-dirs. - - LOG.info("create ini: {} {}".format(macs, files)) - with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f: - f.write('[clone_iso]\n') - f.write('name=' + iso_name + '\n') - f.write('host=' + my_hostname + '\n') - f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z") - + '\n') - f.write('interfaces=' + interfaces + '\n') - f.write('disks=' + disk_paths + '\n') - f.write('cpus=' + get_online_cpus() + '\n') - f.write('mem=' + get_total_mem() + '\n') - LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths)) - - -def create_iso(iso_name, archive_dir): - """ Create iso image. This is modelled after - the cgcs-root/build-tools/build-iso tool. """ - try: - controller_0 = sysinv_api.get_host_data('controller-0') - except Exception as e: - e_log = "Failed to retrieve controller-0 inventory details." - LOG.exception(e_log) - raise CloneFail(e_log) - - iso_dir = os.path.join(archive_dir, 'isolinux') - clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR) - output = None - tmpdir = None - total_steps = 6 - step = 1 - print ("\nCreating ISO:") - - # Add the correct kick-start file to the image - ks_file = "controller_ks.cfg" - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - ks_file = "smallsystem_lowlatency_ks.cfg" - else: - ks_file = "smallsystem_ks.cfg" - - try: - # prepare the iso files - images_dir = os.path.join(iso_dir, 'images') - os.mkdir(images_dir, 0o644) - pxe_dir = os.path.join('/pxeboot', - 'rel-' + tsconfig.SW_VERSION) - os.symlink(pxe_dir + '/installer-bzImage', - iso_dir + '/vmlinuz') - os.symlink(pxe_dir + '/installer-initrd', - iso_dir + '/initrd.img') - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - feed_dir = os.path.join('/www', 'pages', 'feed', - 'rel-' + tsconfig.SW_VERSION) - os.symlink(feed_dir + '/Packages', iso_dir + '/Packages') - os.symlink(feed_dir + '/repodata', iso_dir + '/repodata') - os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS') - shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir) - update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0) - shutil.copyfile('/usr/share/syslinux/isolinux.bin', - iso_dir + '/isolinux.bin') - os.symlink('/usr/share/syslinux/vesamenu.c32', - iso_dir + '/vesamenu.c32') - for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')): - shutil.copy(os.path.join(feed_dir, filename), iso_dir) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT') - os.makedirs(efiboot_dir, 0o644) - l_efi_dir = os.path.join('/boot', 'efi', 'EFI') - shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir) - shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir) - shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir) - shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir) - update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0) - shutil.copytree(l_efi_dir + '/centos/fonts', - efiboot_dir + '/fonts') - # copy EFI boot image and update the grub.cfg file - efi_img = images_dir + '/efiboot.img' - shutil.copy2(pxe_dir + '/efiboot.img', efi_img) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - output = subprocess.check_output( - ["mount", "-t", "vfat", "-o", "loop", - efi_img, tmpdir], - stderr=subprocess.STDOUT) - # replace the grub.cfg file with the updated file - efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg') - os.remove(efi_grub_f) - shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f) - subprocess.call(['umount', tmpdir]) - shutil.rmtree(tmpdir, ignore_errors=True) - tmpdir = None - - epoch_time = "%.9f" % time.time() - disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"] - with open(iso_dir + '/.discinfo', 'w') as f: - f.write('\n'.join(disc_info)) - - # copy the latest install_clone executable - shutil.copy2('/usr/bin/install_clone', iso_dir) - subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " + - iso_dir + "/" + ks_file, shell=True) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - # copy patches - iso_patches_dir = os.path.join(iso_dir, 'patches') - iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata') - iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages') - iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata') - iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied') - iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir, - 'committed') - - os.mkdir(iso_patches_dir, 0o755) - os.mkdir(iso_patch_repo_dir, 0o755) - os.mkdir(iso_patch_pkgs_dir, 0o755) - os.mkdir(iso_patch_metadata_dir, 0o755) - os.mkdir(iso_patch_applied_dir, 0o755) - os.mkdir(iso_patch_committed_dir, 0o755) - - repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION - pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION - patch_applied_dir = '/opt/patching/metadata/applied/' - patch_committed_dir = '/opt/patching/metadata/committed/' - subprocess.check_call(['rsync', '-a', repodata, - '%s/' % iso_patch_repo_dir]) - if os.path.exists(pkgsdir): - subprocess.check_call(['rsync', '-a', pkgsdir, - '%s/' % iso_patch_pkgs_dir]) - if os.path.exists(patch_applied_dir): - subprocess.check_call(['rsync', '-a', patch_applied_dir, - '%s/' % iso_patch_applied_dir]) - if os.path.exists(patch_committed_dir): - subprocess.check_call(['rsync', '-a', patch_committed_dir, - '%s/' % iso_patch_committed_dir]) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - create_ini_file(clone_archive_dir, iso_name) - - os.chmod(iso_dir + '/isolinux.bin', 0o664) - iso_file = os.path.join(archive_dir, iso_name + ".iso") - output = subprocess.check_output( - ["nice", "mkisofs", - "-o", iso_file, "-R", "-D", - "-A", "oe_iso_boot", "-V", "oe_iso_boot", - "-f", "-quiet", - "-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot", - "-boot-load-size", "4", "-boot-info-table", - "-eltorito-alt-boot", "-e", "images/efiboot.img", - "-no-emul-boot", - iso_dir], - stderr=subprocess.STDOUT) - LOG.info("{} created: [{}]".format(iso_file, output)) - utils.progress(total_steps, step, 'iso created', 'DONE') - step += 1 - - output = subprocess.check_output( - ["nice", "isohybrid", - "--uefi", - iso_file], - stderr=subprocess.STDOUT) - LOG.debug("isohybrid: {}".format(output)) - - output = subprocess.check_output( - ["nice", "implantisomd5", - iso_file], - stderr=subprocess.STDOUT) - LOG.debug("implantisomd5: {}".format(output)) - utils.progress(total_steps, step, 'checksum implanted', 'DONE') - print("Cloned iso image created: {}".format(iso_file)) - - except Exception as e: - LOG.exception(e) - e_log = "ISO creation ({}) failed".format(iso_name) - if output: - e_log += ' [' + output + ']' - LOG.error(e_log) - raise CloneFail("ISO creation failed.") - - finally: - if tmpdir: - subprocess.call(['umount', tmpdir], stderr=DEVNULL) - shutil.rmtree(tmpdir, ignore_errors=True) - - -def find_and_replace_in_file(target, find, replace): - """ Find and replace a string in a file. """ - found = None - try: - for line in fileinput.FileInput(target, inplace=1): - if find in line: - # look for "find" string within word boundaries - fpat = r'\b' + find + r'\b' - line = re.sub(fpat, replace, line) - found = True - print(line, end='') - - except Exception as e: - LOG.error("Failed to replace [{}] with [{}] in [{}]: {}" - .format(find, replace, target, str(e))) - found = None - finally: - fileinput.close() - return found - - -def find_and_replace(target_list, find, replace): - """ Find and replace a string in all files in a directory. """ - found = False - file_list = [] - for target in target_list: - if os.path.isfile(target): - if find_and_replace_in_file(target, find, replace): - found = True - file_list.append(target) - elif os.path.isdir(target): - try: - output = subprocess.check_output( - ['grep', '-rl', find, target]) - if output: - for line in output.split('\n'): - if line and find_and_replace_in_file( - line, find, replace): - found = True - file_list.append(line) - except Exception: - pass # nothing found in that directory - if not found: - LOG.error("[{}] not found in backup".format(find)) - else: - LOG.info("Replaced [{}] with [{}] in {}".format( - find, replace, file_list)) - - -def remove_from_archive(archive, unwanted): - """ Remove a file from the archive. """ - try: - subprocess.check_call(["tar", "--delete", - "--file=" + archive, - unwanted]) - except subprocess.CalledProcessError as e: - LOG.error("Delete of {} failed: {}".format(unwanted, e.output)) - raise CloneFail("Failed to modify backup archive") - - -def update_oamip_in_archive(tmpdir): - """ Update OAM IP in system archive file. """ - oam_list = sysinv_api.get_oam_ip() - if not oam_list: - raise CloneFail("Failed to get OAM IP") - for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip, - oam_list.oam_subnet, oam_list.oam_floating_ip, - oam_list.oam_c0_ip, oam_list.oam_c1_ip]: - if not oamfind: - continue - ip = netaddr.IPNetwork(oamfind) - find_str = "" - if ip.version == 4: - # if ipv4, use 192.0.x.x as the temporary oam ip - find_str = str(ip.ip) - ipstr_list = find_str.split('.') - ipstr_list[0] = '192' - ipstr_list[1] = '0' - repl_ipstr = ".".join(ipstr_list) - else: - # if ipv6, use 2001:db8:x as the temporary oam ip - find_str = str(ip.ip) - ipstr_list = find_str.split(':') - ipstr_list[0] = '2001' - ipstr_list[1] = 'db8' - repl_ipstr = ":".join(ipstr_list) - if repl_ipstr: - find_and_replace( - [os.path.join(tmpdir, 'etc/hosts'), - os.path.join(tmpdir, 'etc/sysconfig/network-scripts'), - os.path.join(tmpdir, 'etc/nfv/vim/config.ini'), - os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'), - os.path.join(tmpdir, 'etc/heat/heat.conf'), - os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'), - os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'), - os.path.join(tmpdir, 'etc/nova/nova.conf'), - os.path.join(tmpdir, 'config/hosts'), - os.path.join(tmpdir, 'hieradata'), - os.path.join(tmpdir, 'postgres/keystone.sql.data'), - os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - find_str, repl_ipstr) - else: - LOG.error("Failed to modify OAM IP:[{}]" - .format(oamfind)) - raise CloneFail("Failed to modify OAM IP") - - -def update_mac_in_archive(tmpdir): - """ Update MAC addresses in system archive file. """ - hostname = utils.get_controller_hostname() - macs = sysinv_api.get_mac_addresses(hostname) - for intf, mac in macs.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - mac, "CLONEISOMAC_{}{}".format(hostname, intf)) - - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - hostname = utils.get_mate_controller_hostname() - macs = sysinv_api.get_mac_addresses(hostname) - for intf, mac in macs.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - mac, "CLONEISOMAC_{}{}".format(hostname, intf)) - - -def update_disk_serial_id_in_archive(tmpdir): - """ Update disk serial id in system archive file. """ - hostname = utils.get_controller_hostname() - disk_sids = sysinv_api.get_disk_serial_ids(hostname) - for d_dnode, d_sid in disk_sids.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode)) - - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - hostname = utils.get_mate_controller_hostname() - disk_sids = sysinv_api.get_disk_serial_ids(hostname) - for d_dnode, d_sid in disk_sids.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode)) - - -def update_sysuuid_in_archive(tmpdir): - """ Update system uuid in system archive file. """ - sysuuid = sysinv_api.get_system_uuid() - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - sysuuid, "CLONEISO_SYSTEM_UUID") - - -def update_backup_archive(backup_name, archive_dir): - """ Update backup archive file to be included in clone-iso """ - path_to_archive = os.path.join(archive_dir, backup_name) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - try: - subprocess.check_call( - ['gunzip', path_to_archive + '.tgz'], - stdout=DEVNULL, stderr=DEVNULL) - # 70-persistent-net.rules with the correct MACs will be - # generated on the linux boot on the cloned side. Remove - # the stale file from original side. - remove_from_archive(path_to_archive + '.tar', - 'etc/udev/rules.d/70-persistent-net.rules') - # Extract only a subset of directories which have files to be - # updated for oam-ip and MAC addresses. After updating the files - # these directories are added back to the archive. - subprocess.check_call( - ['tar', '-x', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'etc', 'postgres', 'config', - 'hieradata'], - stdout=DEVNULL, stderr=DEVNULL) - update_oamip_in_archive(tmpdir) - update_mac_in_archive(tmpdir) - update_disk_serial_id_in_archive(tmpdir) - update_sysuuid_in_archive(tmpdir) - subprocess.check_call( - ['tar', '--update', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'etc', 'postgres', 'config', - 'hieradata'], - stdout=DEVNULL, stderr=DEVNULL) - subprocess.check_call(['gzip', path_to_archive + '.tar']) - shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') - - except Exception as e: - LOG.error("Update of backup archive {} failed {}".format( - path_to_archive, str(e))) - raise CloneFail("Failed to update backup archive") - - finally: - if not DEBUG: - shutil.rmtree(tmpdir, ignore_errors=True) - - -def validate_controller_state(): - """ Cloning allowed now? """ - # Check if this Controller is enabled and provisioned - try: - if not sysinv_api.controller_enabled_provisioned( - utils.get_controller_hostname()): - raise CloneFail("Controller is not enabled/provisioned") - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - if not sysinv_api.controller_enabled_provisioned( - utils.get_mate_controller_hostname()): - raise CloneFail("Mate controller is not enabled/provisioned") - except CloneFail: - raise - except Exception: - raise CloneFail("Controller is not enabled/provisioned") - - if utils.get_system_type() != si_const.TIS_AIO_BUILD: - raise CloneFail("Cloning supported only on All-in-one systems") - - if len(sysinv_api.get_alarms()) > 0: - raise CloneFail("There are active alarms on this system!") - - -def clone(backup_name, archive_dir): - """ Do Cloning """ - validate_controller_state() - LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir)) - check_size(archive_dir) - - isolinux_dir = os.path.join(archive_dir, 'isolinux') - clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR) - if os.path.exists(isolinux_dir): - LOG.info("deleting old iso_dir %s" % isolinux_dir) - shutil.rmtree(isolinux_dir, ignore_errors=True) - os.makedirs(clone_archive_dir, 0o644) - - try: - backup_restore.backup(backup_name, clone_archive_dir, clone=True) - LOG.info("system backup done") - update_backup_archive(backup_name + '_system', clone_archive_dir) - create_iso(backup_name, archive_dir) - except BackupFail as e: - raise CloneFail(e.message) - except CloneFail as e: - raise - finally: - if not DEBUG: - shutil.rmtree(isolinux_dir, ignore_errors=True) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py b/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py deleted file mode 100644 index 1866a7c996..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py +++ /dev/null @@ -1,371 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from netaddr import IPRange -from controllerconfig.common.exceptions import ConfigFail -from controllerconfig.common.exceptions import ValidateFail -from controllerconfig.utils import is_mtu_valid -from controllerconfig.utils import is_valid_vlan -from controllerconfig.utils import validate_network_str -from controllerconfig.utils import validate_address_str - -DEFAULT_CONFIG = 0 -REGION_CONFIG = 1 -SUBCLOUD_CONFIG = 2 - -MGMT_TYPE = 0 -INFRA_TYPE = 1 -OAM_TYPE = 2 -CLUSTER_TYPE = 3 -NETWORK_PREFIX_NAMES = [ - ('MGMT', 'INFRA', 'OAM', 'CLUSTER'), - ('CLM', 'BLS', 'CAN', 'CLUSTER') -] - -HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions', - 'mgmt_mac', 'mgmt_ip', - 'bm_ip', 'bm_type', 'bm_username', - 'bm_password', 'boot_device', 'rootfs_device', - 'install_output', 'console', 'vsc_controllers', - 'power_on', 'location'] - -# Network naming types -DEFAULT_NAMES = 0 -HP_NAMES = 1 - -# well-known default domain name -DEFAULT_DOMAIN_NAME = 'Default' - - -class LogicalInterface(object): - """ Represents configuration for a logical interface. - """ - def __init__(self): - self.name = None - self.mtu = None - self.lag_interface = False - self.lag_mode = None - self.ports = None - - def parse_config(self, system_config, logical_interface): - # Ensure logical interface config is present - if not system_config.has_section(logical_interface): - raise ConfigFail("Missing config for logical interface %s." % - logical_interface) - self.name = logical_interface - - # Parse/validate the MTU - self.mtu = system_config.getint(logical_interface, 'INTERFACE_MTU') - if not is_mtu_valid(self.mtu): - raise ConfigFail("Invalid MTU value for %s. " - "Valid values: 576 - 9216" % logical_interface) - - # Parse the ports - self.ports = [_f for _f in - [x.strip() for x in - system_config.get(logical_interface, - 'INTERFACE_PORTS').split(',')] - if _f] - - # Parse/validate the LAG config - lag_interface = system_config.get(logical_interface, - 'LAG_INTERFACE') - if lag_interface.lower() == 'y': - self.lag_interface = True - if len(self.ports) != 2: - raise ConfigFail( - "Invalid number of ports (%d) supplied for LAG " - "interface %s" % (len(self.ports), logical_interface)) - self.lag_mode = system_config.getint(logical_interface, 'LAG_MODE') - if self.lag_mode < 1 or self.lag_mode > 6: - raise ConfigFail( - "Invalid LAG_MODE value of %d for %s. Valid values: 1-6" % - (self.lag_mode, logical_interface)) - elif lag_interface.lower() == 'n': - if len(self.ports) > 1: - raise ConfigFail( - "More than one interface supplied for non-LAG " - "interface %s" % logical_interface) - if len(self.ports) == 0: - raise ConfigFail( - "No interfaces supplied for non-LAG " - "interface %s" % logical_interface) - else: - raise ConfigFail( - "Invalid LAG_INTERFACE value of %s for %s. Valid values: " - "Y or N" % (lag_interface, logical_interface)) - - -class Network(object): - """ Represents configuration for a network. - """ - def __init__(self): - self.vlan = None - self.cidr = None - self.multicast_cidr = None - self.start_address = None - self.end_address = None - self.start_end_in_config = False - self.floating_address = None - self.address_0 = None - self.address_1 = None - self.dynamic_allocation = False - self.gateway_address = None - self.logical_interface = None - - def parse_config(self, system_config, config_type, network_type, - min_addresses=0, multicast_addresses=0, optional=False, - naming_type=DEFAULT_NAMES, - logical_interface_required=True): - network_prefix = NETWORK_PREFIX_NAMES[naming_type][network_type] - network_name = network_prefix + '_NETWORK' - - if naming_type == HP_NAMES: - attr_prefix = network_prefix + '_' - else: - attr_prefix = '' - - # Ensure network config is present - if not system_config.has_section(network_name): - if not optional: - raise ConfigFail("Missing config for network %s." % - network_name) - else: - # Optional interface - just return - return - - # Parse/validate the VLAN - if system_config.has_option(network_name, attr_prefix + 'VLAN'): - self.vlan = system_config.getint(network_name, - attr_prefix + 'VLAN') - if self.vlan: - if not is_valid_vlan(self.vlan): - raise ConfigFail( - "Invalid %s value of %d for %s. Valid values: 1-4094" % - (attr_prefix + 'VLAN', self.vlan, network_name)) - - # Parse/validate the cidr - cidr_str = system_config.get(network_name, attr_prefix + 'CIDR') - try: - self.cidr = validate_network_str( - cidr_str, min_addresses) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'CIDR', cidr_str, network_name, e)) - - # Parse/validate the multicast subnet - if 0 < multicast_addresses and \ - system_config.has_option(network_name, - attr_prefix + 'MULTICAST_CIDR'): - multicast_cidr_str = system_config.get(network_name, attr_prefix + - 'MULTICAST_CIDR') - try: - self.multicast_cidr = validate_network_str( - multicast_cidr_str, multicast_addresses, multicast=True) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str, - network_name, e)) - - if self.cidr.version != self.multicast_cidr.version: - raise ConfigFail( - "Invalid %s value of %s for %s. Multicast " - "subnet and network IP families must be the same." % - (attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str, - network_name)) - - # Parse/validate the hardwired controller addresses - floating_address_str = None - address_0_str = None - address_1_str = None - - if min_addresses == 1: - if (system_config.has_option( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_START_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_END_ADDRESS')): - raise ConfigFail( - "Only one IP address is required for OAM " - "network, use 'IP_ADDRESS' to specify the OAM IP " - "address") - floating_address_str = system_config.get( - network_name, attr_prefix + 'IP_ADDRESS') - try: - self.floating_address = validate_address_str( - floating_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_ADDRESS', - floating_address_str, network_name, e)) - self.address_0 = self.floating_address - self.address_1 = self.floating_address - else: - if system_config.has_option( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS'): - floating_address_str = system_config.get( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS') - try: - self.floating_address = validate_address_str( - floating_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_FLOATING_ADDRESS', - floating_address_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS'): - address_0_str = system_config.get( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') - try: - self.address_0 = validate_address_str( - address_0_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_UNIT_0_ADDRESS', - address_0_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS'): - address_1_str = system_config.get( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') - try: - self.address_1 = validate_address_str( - address_1_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_UNIT_1_ADDRESS', - address_1_str, network_name, e)) - - # Parse/validate the start/end addresses - start_address_str = None - end_address_str = None - if system_config.has_option( - network_name, attr_prefix + 'IP_START_ADDRESS'): - start_address_str = system_config.get( - network_name, attr_prefix + 'IP_START_ADDRESS') - try: - self.start_address = validate_address_str( - start_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_START_ADDRESS', - start_address_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_END_ADDRESS'): - end_address_str = system_config.get( - network_name, attr_prefix + 'IP_END_ADDRESS') - try: - self.end_address = validate_address_str( - end_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s " % - (attr_prefix + 'IP_END_ADDRESS', - end_address_str, network_name, e)) - - if start_address_str or end_address_str: - if not end_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_END_ADDRESS', - network_name)) - if not start_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_START_ADDRESS', - network_name)) - if not self.start_address < self.end_address: - raise ConfigFail( - "Start address %s not less than end address %s for %s." - % (str(self.start_address), str(self.end_address), - network_name)) - if not IPRange(start_address_str, end_address_str).size >= \ - min_addresses: - raise ConfigFail("Address range for %s must contain at " - "least %d addresses." % - (network_name, min_addresses)) - self.start_end_in_config = True - - if floating_address_str or address_0_str or address_1_str: - if not floating_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_FLOATING_ADDRESS', - network_name)) - if not address_0_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_UNIT_0_ADDRESS', - network_name)) - if not address_1_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_UNIT_1_ADDRESS', - network_name)) - - if start_address_str and floating_address_str: - raise ConfigFail("Overspecified network: Can only set %s " - "and %s OR %s, %s, and %s for " - "%s_NETWORK" % - (attr_prefix + 'IP_START_ADDRESS', - attr_prefix + 'IP_END_ADDRESS', - attr_prefix + 'IP_FLOATING_ADDRESS', - attr_prefix + 'IP_UNIT_0_ADDRESS', - attr_prefix + 'IP_UNIT_1_ADDRESS', - network_name)) - - if config_type == DEFAULT_CONFIG: - if not self.start_address: - self.start_address = self.cidr[2] - if not self.end_address: - self.end_address = self.cidr[-2] - - # Parse/validate the dynamic IP address allocation - if system_config.has_option(network_name, - 'DYNAMIC_ALLOCATION'): - dynamic_allocation = system_config.get(network_name, - 'DYNAMIC_ALLOCATION') - if dynamic_allocation.lower() == 'y': - self.dynamic_allocation = True - elif dynamic_allocation.lower() == 'n': - self.dynamic_allocation = False - else: - raise ConfigFail( - "Invalid DYNAMIC_ALLOCATION value of %s for %s. " - "Valid values: Y or N" % - (dynamic_allocation, network_name)) - - # Parse/validate the gateway (optional) - if system_config.has_option(network_name, attr_prefix + 'GATEWAY'): - gateway_address_str = system_config.get( - network_name, attr_prefix + 'GATEWAY') - try: - self.gateway_address = validate_address_str( - gateway_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'GATEWAY', - gateway_address_str, network_name, e)) - - # Parse/validate the logical interface - if logical_interface_required or system_config.has_option( - network_name, attr_prefix + 'LOGICAL_INTERFACE'): - logical_interface_name = system_config.get( - network_name, attr_prefix + 'LOGICAL_INTERFACE') - self.logical_interface = LogicalInterface() - self.logical_interface.parse_config(system_config, - logical_interface_name) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/constants.py b/controllerconfig/controllerconfig/controllerconfig/common/constants.py index 6f0059c7a8..8581c3fbdc 100644 --- a/controllerconfig/controllerconfig/controllerconfig/common/constants.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/constants.py @@ -1,10 +1,9 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from sysinv.common import constants as sysinv_constants from tsconfig import tsconfig @@ -15,70 +14,9 @@ CONFIG_PERMDIR = tsconfig.CONFIG_PATH HIERADATA_WORKDIR = '/tmp/hieradata' HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata' -ARMADA_PERMDIR = tsconfig.ARMADA_PATH -HELM_CHARTS_PERMDIR = tsconfig.PLATFORM_PATH + '/helm_charts' -HELM_OVERRIDES_PERMDIR = tsconfig.HELM_OVERRIDES_PATH - KEYRING_WORKDIR = '/tmp/python_keyring' KEYRING_PERMDIR = tsconfig.KEYRING_PATH INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete' -CONFIG_FAIL_FILE = '/var/run/.config_fail' -COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem" -FIREWALL_RULES_FILE = '/etc/platform/iptables.rules' -OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf' -INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed' BACKUPS_PATH = '/opt/backups' - -INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log" - -LINK_MTU_DEFAULT = "1500" - -CINDER_LVM_THIN = "thin" -CINDER_LVM_THICK = "thick" - -DEFAULT_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_DATABASE_STOR_SIZE -DEFAULT_SMALL_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE -DEFAULT_SMALL_BACKUP_STOR_SIZE = \ - sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE -DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE -DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \ - sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE -DEFAULT_EXTENSION_STOR_SIZE = \ - sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE -DEFAULT_PLATFORM_STOR_SIZE = \ - sysinv_constants.DEFAULT_PLATFORM_STOR_SIZE - -SYSTEM_CONFIG_TIMEOUT = 420 -SERVICE_ENABLE_TIMEOUT = 180 -MINIMUM_ROOT_DISK_SIZE = 500 -MAXIMUM_CGCS_LV_SIZE = 500 -LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30 -SYSADMIN_MAX_PASSWORD_AGE = 45 # 45 days - -LAG_MODE_ACTIVE_BACKUP = "active-backup" -LAG_MODE_BALANCE_XOR = "balance-xor" -LAG_MODE_8023AD = "802.3ad" - -LAG_TXHASH_LAYER2 = "layer2" - -LAG_MIIMON_FREQUENCY = 100 - -LOOPBACK_IFNAME = 'lo' - -DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28' -DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124' - -DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28' - -DEFAULT_REGION_NAME = "RegionOne" -DEFAULT_SERVICE_PROJECT_NAME = "services" - -SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \ - "console. Continuing with this terminal may cause " \ - "loss of connectivity and configuration failure." -SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console." diff --git a/controllerconfig/controllerconfig/controllerconfig/common/crypt.py b/controllerconfig/controllerconfig/controllerconfig/common/crypt.py deleted file mode 100644 index ce53d73f80..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/crypt.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Routines for URL-safe encrypting/decrypting - -Cloned from git/glance/common -""" - -import base64 -import os -import random - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import algorithms -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers import modes -from oslo_utils import encodeutils -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - - -def urlsafe_encrypt(key, plaintext, blocksize=16): - """Encrypts plaintext. - - Resulting ciphertext will contain URL-safe characters. - If plaintext is Unicode, encode it to UTF-8 before encryption. - - :param key: AES secret key - :param plaintext: Input text to be encrypted - :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) - :returns: Resulting ciphertext - """ - - def pad(text): - """Pads text to be encrypted""" - pad_length = (blocksize - len(text) % blocksize) - # NOTE(rosmaita): I know this looks stupid, but we can't just - # use os.urandom() to get the bytes because we use char(0) as - # a delimiter - pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF)) - for i in range(pad_length - 1)) - # We use chr(0) as a delimiter between text and padding - return text + b'\0' + pad - - plaintext = encodeutils.to_utf8(plaintext) - key = encodeutils.to_utf8(key) - # random initial 16 bytes for CBC - init_vector = os.urandom(16) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector), - backend=backend) - encryptor = cypher.encryptor() - padded = encryptor.update( - pad(six.binary_type(plaintext))) + encryptor.finalize() - encoded = base64.urlsafe_b64encode(init_vector + padded) - if six.PY3: - encoded = encoded.decode('ascii') - return encoded - - -def urlsafe_decrypt(key, ciphertext): - """Decrypts URL-safe base64 encoded ciphertext. - - On Python 3, the result is decoded from UTF-8. - - :param key: AES secret key - :param ciphertext: The encrypted text to decrypt - - :returns: Resulting plaintext - """ - # Cast from unicode - ciphertext = encodeutils.to_utf8(ciphertext) - key = encodeutils.to_utf8(key) - ciphertext = base64.urlsafe_b64decode(ciphertext) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]), - backend=backend) - decryptor = cypher.decryptor() - padded = decryptor.update(ciphertext[16:]) + decryptor.finalize() - text = padded[:padded.rfind(b'\0')] - if six.PY3: - text = text.decode('utf-8') - return text diff --git a/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py b/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py deleted file mode 100755 index c88c69cc1e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright (c) 2017-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -DC Manager Interactions -""" - -from controllerconfig.common import log - -from Crypto.Hash import MD5 -from controllerconfig.common import crypt - -import json - - -LOG = log.get_logger(__name__) - - -class UserList(object): - """ - User List - """ - def __init__(self, user_data, hash_string): - # Decrypt the data using input hash_string to generate - # the key - h = MD5.new() - h.update(hash_string) - encryption_key = h.hexdigest() - user_data_decrypted = crypt.urlsafe_decrypt(encryption_key, - user_data) - - self._data = json.loads(user_data_decrypted) - - def get_password(self, name): - """ - Search the users for the password - """ - for user in self._data: - if user['name'] == name: - return user['password'] - return None diff --git a/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py b/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py index 66a4b7e1c3..b42526bff3 100644 --- a/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2014-2019 Wind River Systems, Inc. +# Copyright (c) 2014-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -20,56 +20,21 @@ class ConfigError(Exception): return self.message or "" -class ConfigFail(ConfigError): - """General configuration error.""" - pass - - class ValidateFail(ConfigError): """Validation of data failed.""" pass -class BackupFail(ConfigError): - """Backup error.""" - pass - - class UpgradeFail(ConfigError): """Upgrade error.""" pass -class BackupWarn(ConfigError): - """Backup warning.""" - pass - - -class RestoreFail(ConfigError): - """Backup error.""" - pass - - class KeystoneFail(ConfigError): """Keystone error.""" pass -class SysInvFail(ConfigError): - """System Inventory error.""" - pass - - -class UserQuit(ConfigError): - """User initiated quit operation.""" - pass - - -class CloneFail(ConfigError): - """Clone error.""" - pass - - class TidyStorageFail(ConfigError): """Tidy storage error.""" pass diff --git a/controllerconfig/controllerconfig/controllerconfig/common/keystone.py b/controllerconfig/controllerconfig/controllerconfig/common/keystone.py index 34e86063ec..f0ef3f408f 100755 --- a/controllerconfig/controllerconfig/controllerconfig/common/keystone.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/keystone.py @@ -12,10 +12,9 @@ import datetime import iso8601 from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common import log +from oslo_log import log - -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) class Token(object): diff --git a/controllerconfig/controllerconfig/controllerconfig/common/log.py b/controllerconfig/controllerconfig/controllerconfig/common/log.py deleted file mode 100644 index d3844d5e72..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/log.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (c) 2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Logging -""" - -import logging -import logging.handlers - -_loggers = {} - - -def get_logger(name): - """ Get a logger or create one """ - - if name not in _loggers: - _loggers[name] = logging.getLogger(name) - - return _loggers[name] - - -def setup_logger(logger): - """ Setup a logger """ - - # Send logs to /var/log/platform.log - syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1 - - formatter = logging.Formatter("configassistant[%(process)d] " + - "%(pathname)s:%(lineno)s " + - "%(levelname)8s [%(name)s] %(message)s") - - handler = logging.handlers.SysLogHandler(address='/dev/log', - facility=syslog_facility) - handler.setLevel(logging.INFO) - handler.setFormatter(formatter) - - logger.addHandler(handler) - logger.setLevel(logging.INFO) - - -def configure(): - """ Setup logging """ - - for logger in _loggers: - setup_logger(_loggers[logger]) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py b/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py index 45e06f849a..8122216957 100755 --- a/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py @@ -1,5 +1,5 @@ """ -Copyright (c) 2015-2017 Wind River Systems, Inc. +Copyright (c) 2015-2020 Wind River Systems, Inc. SPDX-License-Identifier: Apache-2.0 @@ -7,16 +7,15 @@ SPDX-License-Identifier: Apache-2.0 import json from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common import dcmanager from controllerconfig.common import keystone -from controllerconfig.common import log from six.moves import http_client as httplib from six.moves.urllib import request as urlrequest from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) def rest_api_request(token, method, api_cmd, api_cmd_headers=None, @@ -324,16 +323,3 @@ def delete_project(token, api_url, id): api_cmd = api_url + "/projects/" + id response = rest_api_request(token, "DELETE", api_cmd,) return keystone.Project(response) - - -def get_subcloud_config(token, api_url, subcloud_name, - hash_string): - """ - Ask DC Manager for our subcloud configuration - """ - api_cmd = api_url + "/subclouds/" + subcloud_name + "/config" - response = rest_api_request(token, "GET", api_cmd) - config = dict() - config['users'] = dcmanager.UserList(response['users'], hash_string) - - return config diff --git a/controllerconfig/controllerconfig/controllerconfig/common/validator.py b/controllerconfig/controllerconfig/controllerconfig/common/validator.py deleted file mode 100644 index fb0d1da019..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/validator.py +++ /dev/null @@ -1,1189 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" -from controllerconfig.common.configobjects import DEFAULT_NAMES -from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES -from controllerconfig.common.configobjects import OAM_TYPE -from controllerconfig.common.configobjects import MGMT_TYPE -from controllerconfig.common.configobjects import Network -from controllerconfig.common.configobjects import REGION_CONFIG -from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME -from controllerconfig.common.configobjects import HP_NAMES -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG -from controllerconfig.common.configobjects import CLUSTER_TYPE -from netaddr import IPRange -from controllerconfig.utils import lag_mode_to_str -from controllerconfig.utils import validate_network_str -from controllerconfig.utils import check_network_overlap -from controllerconfig.utils import is_mtu_valid -from controllerconfig.utils import get_service -from controllerconfig.utils import get_optional -from controllerconfig.utils import validate_address_str -from controllerconfig.utils import validate_nameserver_address_str -from controllerconfig.utils import is_valid_url -from controllerconfig.utils import is_valid_domain_or_ip -from controllerconfig.utils import is_valid_bool_str -from controllerconfig.common.exceptions import ConfigFail -from controllerconfig.common.exceptions import ValidateFail - - -# Constants -TiS_VERSION = "xxxSW_VERSIONxxx" - -# Minimum values for partition sizes -MIN_DATABASE_STORAGE = 20 -MIN_IMAGE_STORAGE = 10 -MIN_IMAGE_CONVERSIONS_VOLUME = 20 - -SYSADMIN_PASSWD_NO_AGING = 99999 - -# System mode -SYSTEM_MODE_DUPLEX = "duplex" -SYSTEM_MODE_SIMPLEX = "simplex" -SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct" - -DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER = 'systemcontroller' -DISTRIBUTED_CLOUD_ROLE_SUBCLOUD = 'subcloud' - -# System type -SYSTEM_TYPE_AIO = "All-in-one" -SYSTEM_TYPE_STANDARD = "Standard" - - -class ConfigValidator(object): - - def __init__(self, system_config, cgcs_config, config_type, offboard, - naming_type=DEFAULT_NAMES): - """ - :param system_config: system configuration - :param cgcs_config: if not None config data should be returned - :param config_type: indicates whether it is system, region or subcloud - config - :param offboard: if true only perform general error checking - :return: - """ - self.conf = system_config - self.cgcs_conf = cgcs_config - self.config_type = config_type - self.naming_type = naming_type - self.offboard = offboard - self.next_lag_index = 0 - self.configured_networks = [] - self.configured_vlans = [] - self.pxeboot_network_configured = False - self.pxeboot_section_name = None - self.management_interface = None - self.cluster_interface = None - self.mgmt_network = None - self.cluster_network = None - self.oam_network = None - self.vswitch_type = None - self.system_mode = None - self.system_type = None - self.system_dc_role = None - - def is_simplex_cpe(self): - return self.system_mode == SYSTEM_MODE_SIMPLEX - - def is_subcloud(self): - return self.system_dc_role == DISTRIBUTED_CLOUD_ROLE_SUBCLOUD - - def set_system_mode(self, mode): - self.system_mode = mode - - def set_system_dc_role(self, dc_role): - self.system_dc_role = dc_role - - def set_oam_config(self, use_lag, external_oam_interface_name): - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cEXT_OAM') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_MTU', - self.oam_network.logical_interface.mtu) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_SUBNET', - self.oam_network.cidr) - if use_lag: - self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE', - 'yes') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_0', - self.oam_network.logical_interface.ports[0]) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_1', - self.oam_network.logical_interface.ports[1]) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_POLICY', - lag_mode_to_str(self.oam_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE', - 'no') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE', - external_oam_interface_name) - if self.oam_network.vlan: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_VLAN', - str(self.oam_network.vlan)) - external_oam_interface_name += '.' + str(self.oam_network.vlan) - - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE_NAME', - external_oam_interface_name) - if self.oam_network.gateway_address: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS', - str(self.oam_network.gateway_address)) - if self.system_mode == SYSTEM_MODE_SIMPLEX: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS', - str(self.oam_network.floating_address)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS', - str(self.oam_network.address_0)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS', - str(self.oam_network.address_1)) - else: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS', - str(self.oam_network.floating_address or - self.oam_network.start_address)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS', - str(self.oam_network.address_0 or - self.oam_network.start_address + 1)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS', - str(self.oam_network.address_1 or - self.oam_network.start_address + 2)) - - def process_oam_on_its_own_interface(self): - use_lag = False - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - # OAM on its own LAG interface - if self.oam_network.logical_interface.lag_interface: - if self.oam_network.logical_interface.lag_mode not in (1, 2, 4): - raise ConfigFail( - "Unsupported LAG mode (%d) for %s interface" - " - use LAG mode 1, 2, or 4 instead" % - (self.oam_network.logical_interface.lag_mode, oam_prefix)) - use_lag = True - external_oam_interface = 'bond' + str(self.next_lag_index) - else: - # CAN on its own non-LAG interface - external_oam_interface = ( - self.oam_network.logical_interface.ports[0]) - return use_lag, external_oam_interface - - def validate_oam_common(self): - # validate OAM network - self.oam_network = Network() - if self.is_simplex_cpe(): - min_addresses = 1 - else: - min_addresses = 3 - try: - self.oam_network.parse_config(self.conf, self.config_type, - OAM_TYPE, - min_addresses=min_addresses, - multicast_addresses=0, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - def validate_aio_simplex_mgmt(self): - # AIO simplex management network configuration - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.mgmt_network = Network() - - min_addresses = 16 - - try: - self.mgmt_network.parse_config(self.conf, self.config_type, - MGMT_TYPE, - min_addresses=min_addresses, - multicast_addresses=0, - naming_type=self.naming_type, - logical_interface_required=False) - - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.mgmt_network.vlan or self.mgmt_network.multicast_cidr or \ - self.mgmt_network.start_end_in_config or \ - self.mgmt_network.floating_address or \ - self.mgmt_network.address_0 or self.mgmt_network.address_1 or \ - self.mgmt_network.dynamic_allocation or \ - self.mgmt_network.gateway_address or \ - self.mgmt_network.logical_interface: - raise ConfigFail("For AIO simplex, only the %s network CIDR can " - "be specified" % mgmt_prefix) - - if self.mgmt_network.cidr.version == 6: - raise ConfigFail("IPv6 management network not supported on " - "simplex configuration.") - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cMGMT') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_SUBNET', - self.mgmt_network.cidr) - - def validate_aio_network(self, subcloud=False): - if not subcloud: - # AIO-SX subcloud supports MGMT_NETWORK & PXEBOOT_NETWORK - if self.conf.has_section('PXEBOOT_NETWORK'): - raise ConfigFail("PXEBoot Network configuration is not " - "supported.") - if self.conf.has_section('MGMT_NETWORK'): - self.validate_aio_simplex_mgmt() - if self.conf.has_section('BOARD_MANAGEMENT_NETWORK'): - raise ConfigFail("Board Management Network configuration is not " - "supported.") - # validate OAM network - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - self.validate_oam_common() - (use_lag, external_oam_interface_name) = ( - self.process_oam_on_its_own_interface()) - - # Ensure that the gateway was configured - if self.oam_network.gateway_address is None: - raise ConfigFail( - "No gateway specified - %s_GATEWAY must be specified" - % oam_prefix) - - # Check overlap with management network - if self.mgmt_network is not None: - try: - self.configured_networks.append(self.mgmt_network.cidr) - check_network_overlap(self.oam_network.cidr, - self.configured_networks) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (oam_prefix, str(self.mgmt_network.cidr))) - - self.set_oam_config(use_lag, external_oam_interface_name) - - def validate_version(self): - if self.offboard: - version = TiS_VERSION - else: - from tsconfig.tsconfig import SW_VERSION - version = SW_VERSION - - if not self.conf.has_option('VERSION', 'RELEASE'): - raise ConfigFail( - "Version information is missing from this config file. Please" - " refer to the installation documentation for details on " - "the correct contents of the configuration file.") - ini_version = self.conf.get('VERSION', 'RELEASE') - if version != ini_version: - raise ConfigFail( - "The configuration file given is of a different version (%s) " - "than the installed software (%s). Please refer to the " - "installation documentation for details on the correct " - "contents of the configuration file and update it with " - "any changes required for this release." % - (ini_version, version)) - - def validate_system(self): - # timezone section - timezone = 'UTC' - if self.conf.has_option('SYSTEM', 'TIMEZONE'): - timezone = self.conf.get('SYSTEM', 'TIMEZONE') - - # system type section - if self.conf.has_option("SYSTEM", "SYSTEM_TYPE"): - self.system_type = self.conf.get("SYSTEM", "SYSTEM_TYPE") - available_system_types = [ - SYSTEM_TYPE_STANDARD, - SYSTEM_TYPE_AIO - ] - if self.system_type not in available_system_types: - raise ConfigFail("Available options for SYSTEM_TYPE are: %s" % - available_system_types) - elif not self.offboard: - from tsconfig.tsconfig import system_type - self.system_type = system_type - - # system mode section - if self.conf.has_option("SYSTEM", "SYSTEM_MODE"): - self.system_mode = self.conf.get("SYSTEM", "SYSTEM_MODE") - available_system_modes = [SYSTEM_MODE_DUPLEX] - if self.system_type != SYSTEM_TYPE_STANDARD: - available_system_modes.append(SYSTEM_MODE_SIMPLEX) - available_system_modes.append(SYSTEM_MODE_DUPLEX_DIRECT) - if self.system_mode not in available_system_modes: - raise ConfigFail("Available options for SYSTEM_MODE are: %s" % - available_system_modes) - else: - if self.system_type == SYSTEM_TYPE_STANDARD: - self.system_mode = SYSTEM_MODE_DUPLEX - else: - self.system_mode = SYSTEM_MODE_DUPLEX_DIRECT - - if self.conf.has_option("SYSTEM", "DISTRIBUTED_CLOUD_ROLE"): - self.system_dc_role = \ - self.conf.get("SYSTEM", "DISTRIBUTED_CLOUD_ROLE") - if self.config_type == SUBCLOUD_CONFIG: - available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SUBCLOUD] - elif self.config_type != REGION_CONFIG: - available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER] - else: - raise ConfigFail("DISTRIBUTED_CLOUD_ROLE option is " - "not avaialbe for this configuration") - - if self.system_dc_role not in available_dc_role: - raise ConfigFail( - "Available options for DISTRIBUTED_CLOUD_ROLE are: %s" % - available_dc_role) - - if (self.system_dc_role == - DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and - self.system_type == SYSTEM_TYPE_AIO): - raise ConfigFail("An All-in-one controller cannot be " - "configured as Distributed Cloud " - "System Controller") - elif self.config_type == SUBCLOUD_CONFIG: - self.system_dc_role = DISTRIBUTED_CLOUD_ROLE_SUBCLOUD - else: - self.system_dc_role = None - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section("cSYSTEM") - self.cgcs_conf.set("cSYSTEM", "TIMEZONE", timezone) - self.cgcs_conf.set("cSYSTEM", "SYSTEM_MODE", self.system_mode) - if self.system_dc_role is not None: - self.cgcs_conf.set("cSYSTEM", "DISTRIBUTED_CLOUD_ROLE", - self.system_dc_role) - - def validate_storage(self): - if (self.conf.has_option('STORAGE', 'DATABASE_STORAGE') or - self.conf.has_option('STORAGE', 'IMAGE_STORAGE') or - self.conf.has_option('STORAGE', 'BACKUP_STORAGE') or - self.conf.has_option('STORAGE', 'IMAGE_CONVERSIONS_VOLUME') or - self.conf.has_option('STORAGE', 'SHARED_INSTANCE_STORAGE') or - self.conf.has_option('STORAGE', 'CINDER_BACKEND') or - self.conf.has_option('STORAGE', 'CINDER_DEVICE') or - self.conf.has_option('STORAGE', 'CINDER_LVM_TYPE') or - self.conf.has_option('STORAGE', 'CINDER_STORAGE')): - msg = "DATABASE_STORAGE, IMAGE_STORAGE, BACKUP_STORAGE, " + \ - "IMAGE_CONVERSIONS_VOLUME, SHARED_INSTANCE_STORAGE, " + \ - "CINDER_BACKEND, CINDER_DEVICE, CINDER_LVM_TYPE, " + \ - "CINDER_STORAGE " + \ - "are not valid entries in config file." - raise ConfigFail(msg) - - def validate_pxeboot(self): - # PXEBoot network configuration - start_end_in_config = False - - if self.config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - self.pxeboot_section_name = 'REGION2_PXEBOOT_NETWORK' - else: - self.pxeboot_section_name = 'PXEBOOT_NETWORK' - - if self.conf.has_section(self.pxeboot_section_name): - pxeboot_cidr_str = self.conf.get(self.pxeboot_section_name, - 'PXEBOOT_CIDR') - try: - pxeboot_subnet = validate_network_str(pxeboot_cidr_str, 16) - if pxeboot_subnet.version != 4: - raise ValidateFail("Invalid PXEBOOT_NETWORK IP version - " - "only IPv4 supported") - self.configured_networks.append(pxeboot_subnet) - pxeboot_start_address = None - pxeboot_end_address = None - if self.conf.has_option(self.pxeboot_section_name, - "IP_START_ADDRESS"): - start_addr_str = self.conf.get(self.pxeboot_section_name, - "IP_START_ADDRESS") - pxeboot_start_address = validate_address_str( - start_addr_str, pxeboot_subnet - ) - - if self.conf.has_option(self.pxeboot_section_name, - "IP_END_ADDRESS"): - end_addr_str = self.conf.get(self.pxeboot_section_name, - "IP_END_ADDRESS") - pxeboot_end_address = validate_address_str( - end_addr_str, pxeboot_subnet - ) - - if pxeboot_start_address or pxeboot_end_address: - if not pxeboot_end_address: - raise ConfigFail("Missing attribute %s for %s" % - ('IP_END_ADDRESS', - self.pxeboot_section_name)) - - if not pxeboot_start_address: - raise ConfigFail("Missing attribute %s for %s" % - ('IP_START_ADDRESS', - self.pxeboot_section_name)) - - if not pxeboot_start_address < pxeboot_end_address: - raise ConfigFail("Start address %s not " - "less than end address %s for %s." - % (start_addr_str, - end_addr_str, - self.pxeboot_section_name)) - - min_addresses = 8 - if not IPRange(start_addr_str, end_addr_str).size >= \ - min_addresses: - raise ConfigFail("Address range for %s must contain " - "at least %d addresses." % - (self.pxeboot_section_name, - min_addresses)) - start_end_in_config = True - - self.pxeboot_network_configured = True - except ValidateFail as e: - raise ConfigFail("Invalid PXEBOOT_CIDR value of %s for %s." - "\nReason: %s" % - (pxeboot_cidr_str, - self.pxeboot_section_name, e)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cPXEBOOT') - if self.pxeboot_network_configured: - self.cgcs_conf.set('cPXEBOOT', 'PXEBOOT_SUBNET', - str(pxeboot_subnet)) - if start_end_in_config: - self.cgcs_conf.set("cPXEBOOT", - "PXEBOOT_START_ADDRESS", - start_addr_str) - self.cgcs_conf.set("cPXEBOOT", - "PXEBOOT_END_ADDRESS", - end_addr_str) - - pxeboot_floating_addr = pxeboot_start_address - pxeboot_controller_addr_0 = pxeboot_start_address + 1 - pxeboot_controller_addr_1 = pxeboot_controller_addr_0 + 1 - else: - pxeboot_floating_addr = pxeboot_subnet[2] - pxeboot_controller_addr_0 = pxeboot_subnet[3] - pxeboot_controller_addr_1 = pxeboot_subnet[4] - self.cgcs_conf.set('cPXEBOOT', - 'CONTROLLER_PXEBOOT_FLOATING_ADDRESS', - str(pxeboot_floating_addr)) - self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_0', - str(pxeboot_controller_addr_0)) - self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_1', - str(pxeboot_controller_addr_1)) - self.cgcs_conf.set('cPXEBOOT', 'PXECONTROLLER_FLOATING_HOSTNAME', - 'pxecontroller') - - def validate_mgmt(self): - # Management network configuration - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.mgmt_network = Network() - - if self.config_type == SUBCLOUD_CONFIG: - min_addresses = 5 - else: - min_addresses = 8 - - try: - self.mgmt_network.parse_config(self.conf, self.config_type, - MGMT_TYPE, - min_addresses=min_addresses, - multicast_addresses=16, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.mgmt_network.floating_address: - raise ConfigFail("%s network cannot specify individual unit " - "addresses" % mgmt_prefix) - - if not self.mgmt_network.multicast_cidr: - # The MULTICAST_CIDR is optional for subclouds (default is used) - if self.config_type != SUBCLOUD_CONFIG: - raise ConfigFail("%s MULTICAST_CIDR attribute is missing." - % mgmt_prefix) - - try: - check_network_overlap(self.mgmt_network.cidr, - self.configured_networks) - self.configured_networks.append(self.mgmt_network.cidr) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (mgmt_prefix, str(self.mgmt_network.cidr))) - - if (self.system_dc_role == - DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - # For Distributed Cloud SystemController, we require the setting - # of the IP_START_ADDRESS/IP_END_ADDRESS config settings so as to - # raise awareness that some space in MGMT subnet must be set aside - # for gateways to reach subclouds. - - if not self.mgmt_network.start_end_in_config: - raise ConfigFail("IP_START_ADDRESS and IP_END_ADDRESS required" - " for %s network as this configuration " - "requires address space left for gateway " - "address(es)" % mgmt_prefix) - else: - # Warn user that some space in the management subnet must - # be reserved for the system controller gateway address(es) - # used to communicate with the subclouds. - 2 because of - # subnet and broadcast addresses. - address_range = \ - IPRange(str(self.mgmt_network.start_address), - str(self.mgmt_network.end_address)).size - - if address_range >= (self.mgmt_network.cidr.size - 2): - raise ConfigFail( - "Address range for %s network too large, no addresses" - " left for gateway(s), required in this " - "configuration." % mgmt_prefix) - - if self.mgmt_network.logical_interface.lag_interface: - supported_lag_mode = [1, 4] - if (self.mgmt_network.logical_interface.lag_mode not in - supported_lag_mode): - raise ConfigFail("Unsupported LAG mode (%d) for %s interface" - " - use LAG mode %s instead" % - (self.mgmt_network.logical_interface.lag_mode, - mgmt_prefix, supported_lag_mode)) - - self.management_interface = 'bond' + str(self.next_lag_index) - management_interface_name = self.management_interface - self.next_lag_index += 1 - else: - self.management_interface = ( - self.mgmt_network.logical_interface.ports[0]) - management_interface_name = self.management_interface - - if self.mgmt_network.vlan: - if not self.pxeboot_network_configured: - raise ConfigFail( - "Management VLAN cannot be configured because " - "PXEBOOT_NETWORK is not configured.") - self.configured_vlans.append(self.mgmt_network.vlan) - management_interface_name += '.' + str(self.mgmt_network.vlan) - elif self.pxeboot_network_configured: - raise ConfigFail( - "Management VLAN must be configured because " - "%s configured." % self.pxeboot_section_name) - - if not self.is_simplex_cpe() and self.mgmt_network.cidr.version == 6 \ - and not self.pxeboot_network_configured: - raise ConfigFail("IPv6 management network cannot be configured " - "because PXEBOOT_NETWORK is not configured.") - - mtu = self.mgmt_network.logical_interface.mtu - if not is_mtu_valid(mtu): - raise ConfigFail( - "Invalid MTU value of %s for %s. " - "Valid values: 576 - 9216" - % (mtu, self.mgmt_network.logical_interface.name)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cMGMT') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_MTU', - self.mgmt_network.logical_interface.mtu) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_SUBNET', - self.mgmt_network.cidr) - if self.mgmt_network.logical_interface.lag_interface: - self.cgcs_conf.set('cMGMT', 'LAG_MANAGEMENT_INTERFACE', 'yes') - self.cgcs_conf.set( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_0', - self.mgmt_network.logical_interface.ports[0]) - self.cgcs_conf.set( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_1', - self.mgmt_network.logical_interface.ports[1]) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_BOND_POLICY', - lag_mode_to_str(self.mgmt_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cMGMT', 'LAG_MANAGEMENT_INTERFACE', 'no') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_INTERFACE', - self.management_interface) - - if self.mgmt_network.vlan: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_VLAN', - str(self.mgmt_network.vlan)) - - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_INTERFACE_NAME', - management_interface_name) - - if self.mgmt_network.gateway_address: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS', - str(self.mgmt_network.gateway_address)) - - self.cgcs_conf.set('cMGMT', 'CONTROLLER_FLOATING_ADDRESS', - str(self.mgmt_network.start_address)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_0_ADDRESS', - str(self.mgmt_network.start_address + 1)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_1_ADDRESS', - str(self.mgmt_network.start_address + 2)) - first_nfs_ip = self.mgmt_network.start_address + 3 - self.cgcs_conf.set('cMGMT', 'NFS_MANAGEMENT_ADDRESS_1', - str(first_nfs_ip)) - self.cgcs_conf.set('cMGMT', 'NFS_MANAGEMENT_ADDRESS_2', - str(first_nfs_ip + 1)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_FLOATING_HOSTNAME', - 'controller') - self.cgcs_conf.set('cMGMT', 'CONTROLLER_HOSTNAME_PREFIX', - 'controller-') - self.cgcs_conf.set('cMGMT', 'OAMCONTROLLER_FLOATING_HOSTNAME', - 'oamcontroller') - if self.mgmt_network.dynamic_allocation: - self.cgcs_conf.set('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION', - "yes") - else: - self.cgcs_conf.set('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION', - "no") - if self.mgmt_network.start_address and \ - self.mgmt_network.end_address: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_START_ADDRESS', - self.mgmt_network.start_address) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_END_ADDRESS', - self.mgmt_network.end_address) - if self.mgmt_network.multicast_cidr: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_MULTICAST_SUBNET', - self.mgmt_network.multicast_cidr) - - def validate_cluster(self): - # Kubernetes cluster network configuration - cluster_prefix = NETWORK_PREFIX_NAMES[self.naming_type][CLUSTER_TYPE] - if not self.conf.has_section(cluster_prefix + '_NETWORK'): - return - self.cluster_network = Network() - try: - self.cluster_network.parse_config(self.conf, self.config_type, - CLUSTER_TYPE, - min_addresses=8, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.cluster_network.floating_address: - raise ConfigFail("%s network cannot specify individual unit " - "addresses" % cluster_prefix) - - try: - check_network_overlap(self.cluster_network.cidr, - self.configured_networks) - self.configured_networks.append(self.cluster_network.cidr) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (cluster_prefix, str(self.cluster_network.cidr))) - - if self.cluster_network.logical_interface.lag_interface: - supported_lag_mode = [1, 2, 4] - if (self.cluster_network.logical_interface.lag_mode not in - supported_lag_mode): - raise ConfigFail( - "Unsupported LAG mode (%d) for %s interface" - " - use LAG mode %s instead" % - (self.cluster_network.logical_interface.lag_mode, - cluster_prefix, supported_lag_mode)) - - self.cluster_interface = 'bond' + str(self.next_lag_index) - cluster_interface_name = self.cluster_interface - self.next_lag_index += 1 - else: - self.cluster_interface = ( - self.cluster_network.logical_interface.ports[0]) - cluster_interface_name = self.cluster_interface - - if self.cluster_network.vlan: - if any(self.cluster_network.vlan == vlan for vlan in - self.configured_vlans): - raise ConfigFail( - "%s_NETWORK VLAN conflicts with another configured " - "VLAN" % cluster_prefix) - self.configured_vlans.append(self.cluster_network.vlan) - cluster_interface_name += '.' + str(self.cluster_network.vlan) - - mtu = self.cluster_network.logical_interface.mtu - if not is_mtu_valid(mtu): - raise ConfigFail( - "Invalid MTU value of %s for %s. " - "Valid values: 576 - 9216" - % (mtu, self.cluster_network.logical_interface.name)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cCLUSTER') - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_MTU', - self.cluster_network.logical_interface.mtu) - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_SUBNET', - self.cluster_network.cidr) - if self.cluster_network.logical_interface.lag_interface: - self.cgcs_conf.set('cCLUSTER', 'LAG_CLUSTER_INTERFACE', 'yes') - self.cgcs_conf.set( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_0', - self.cluster_network.logical_interface.ports[0]) - self.cgcs_conf.set( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_1', - self.cluster_network.logical_interface.ports[1]) - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_BOND_POLICY', - lag_mode_to_str(self.cluster_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cCLUSTER', 'LAG_CLUSTER_INTERFACE', 'no') - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_INTERFACE', - self.cluster_interface) - - if self.cluster_network.vlan: - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_VLAN', - str(self.cluster_network.vlan)) - - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_INTERFACE_NAME', - cluster_interface_name) - - if self.cluster_network.dynamic_allocation: - self.cgcs_conf.set('cCLUSTER', 'DYNAMIC_ADDRESS_ALLOCATION', - "yes") - else: - self.cgcs_conf.set('cCLUSTER', 'DYNAMIC_ADDRESS_ALLOCATION', - "no") - - def validate_oam(self): - # OAM network configuration - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.validate_oam_common() - try: - check_network_overlap(self.oam_network.cidr, - self.configured_networks) - self.configured_networks.append(self.oam_network.cidr) - except ValidateFail: - raise ConfigFail( - "%s CIDR %s overlaps with another configured network" % - (oam_prefix, str(self.oam_network.cidr))) - - use_lag = False - if (self.oam_network.logical_interface.name == - self.mgmt_network.logical_interface.name): - # CAN sharing CLM interface - external_oam_interface = self.management_interface - elif (self.cluster_network and - (self.oam_network.logical_interface.name == - self.cluster_network.logical_interface.name)): - # CAN sharing BLS interface - external_oam_interface = self.cluster_interface - else: - (use_lag, external_oam_interface) = ( - self.process_oam_on_its_own_interface()) - external_oam_interface_name = external_oam_interface - - if self.oam_network.vlan: - if any(self.oam_network.vlan == vlan for vlan in - self.configured_vlans): - raise ConfigFail( - "%s_NETWORK VLAN conflicts with another configured VLAN" % - oam_prefix) - self.configured_vlans.append(self.oam_network.vlan) - elif external_oam_interface in (self.management_interface, - self.cluster_interface): - raise ConfigFail( - "VLAN required for %s_NETWORK since it uses the same interface" - " as another network" % oam_prefix) - - # Ensure that exactly one gateway was configured - if (self.mgmt_network.gateway_address is None and self.oam_network. - gateway_address is None): - raise ConfigFail( - "No gateway specified - either the %s_GATEWAY or %s_GATEWAY " - "must be specified" % (mgmt_prefix, oam_prefix)) - elif self.mgmt_network.gateway_address and ( - self.oam_network.gateway_address): - # In subcloud configs we support both a management and OAM gateway - if self.config_type != SUBCLOUD_CONFIG: - raise ConfigFail( - "Two gateways specified - only one of the %s_GATEWAY or " - "%s_GATEWAY can be specified" % (mgmt_prefix, oam_prefix)) - self.set_oam_config(use_lag, external_oam_interface_name) - - def validate_sdn(self): - if self.conf.has_section('SDN'): - raise ConfigFail("SDN Configuration is no longer supported") - - def validate_dns(self): - # DNS config is optional - if not self.conf.has_section('DNS'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDNS') - for x in range(0, 3): - if self.conf.has_option('DNS', 'NAMESERVER_' + str(x + 1)): - dns_address_str = self.conf.get('DNS', 'NAMESERVER_' + str( - x + 1)) - try: - dns_address = validate_nameserver_address_str( - dns_address_str) - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDNS', 'NAMESERVER_' + str(x + 1), - str(dns_address)) - except ValidateFail as e: - raise ConfigFail( - "Invalid DNS NAMESERVER value of %s.\nReason: %s" % - (dns_address_str, e)) - - def validate_docker_proxy(self): - if not self.conf.has_section('DOCKER_PROXY'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDOCKER_PROXY') - # check http_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_HTTP_PROXY'): - docker_http_proxy_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_HTTP_PROXY') - if is_valid_url(docker_http_proxy_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_HTTP_PROXY', - docker_http_proxy_str) - else: - raise ConfigFail( - "Invalid DOCKER_HTTP_PROXY value of %s." % - docker_http_proxy_str) - # check https_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_HTTPS_PROXY'): - docker_https_proxy_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_HTTPS_PROXY') - if is_valid_url(docker_https_proxy_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_HTTPS_PROXY', - docker_https_proxy_str) - else: - raise ConfigFail( - "Invalid DOCKER_HTTPS_PROXY value of %s." % - docker_https_proxy_str) - # check no_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_NO_PROXY'): - docker_no_proxy_list_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_NO_PROXY') - docker_no_proxy_list = docker_no_proxy_list_str.split(',') - for no_proxy_str in docker_no_proxy_list: - if not is_valid_domain_or_ip(no_proxy_str): - raise ConfigFail( - "Invalid DOCKER_NO_PROXY value of %s." % - no_proxy_str) - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_NO_PROXY', - docker_no_proxy_list_str) - - def validate_docker_registry(self): - if not self.conf.has_section('DOCKER_REGISTRY'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDOCKER_REGISTRY') - # check k8s_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY'): - docker_k8s_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY') - if is_valid_domain_or_ip(docker_k8s_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_K8S_REGISTRY', - docker_k8s_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_K8S_REGISTRY value of %s." % - docker_k8s_registry_str) - # check gcr_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY'): - docker_gcr_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY') - if is_valid_domain_or_ip(docker_gcr_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_GCR_REGISTRY', - docker_gcr_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_GCR_REGISTRY value of %s." % - docker_gcr_registry_str) - # check quay_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY'): - docker_quay_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY') - if is_valid_domain_or_ip(docker_quay_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_QUAY_REGISTRY', - docker_quay_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_QUAY_REGISTRY value of %s." % - docker_quay_registry_str) - # check docker_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY'): - docker_docker_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY') - if is_valid_domain_or_ip(docker_docker_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_DOCKER_REGISTRY', - docker_docker_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_DOCKER_REGISTRY value of %s." % - docker_docker_registry_str) - # check is_secure_registry - if self.conf.has_option('DOCKER_REGISTRY', 'IS_SECURE_REGISTRY'): - docker_is_secure_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'IS_SECURE_REGISTRY') - if is_valid_bool_str(docker_is_secure_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'IS_SECURE_REGISTRY', - docker_is_secure_registry_str) - else: - raise ConfigFail( - "Invalid IS_SECURE_REGISTRY value of %s." % - docker_is_secure_registry_str) - - def validate_ntp(self): - if self.conf.has_section('NTP'): - raise ConfigFail("NTP Configuration is no longer supported") - - def validate_region(self, config_type=REGION_CONFIG): - region_1_name = self.conf.get('SHARED_SERVICES', 'REGION_NAME') - region_2_name = self.conf.get('REGION_2_SERVICES', 'REGION_NAME') - if region_1_name == region_2_name: - raise ConfigFail( - "The Region Names must be unique.") - - if not (self.conf.has_option('REGION_2_SERVICES', 'CREATE') and - self.conf.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - password_fields = [ - 'PATCHING', 'SYSINV', 'FM', 'BARBICAN', 'NFV', 'MTCE' - ] - for pw in password_fields: - if not self.conf.has_option('REGION_2_SERVICES', - pw + '_PASSWORD'): - raise ConfigFail("User password for %s is required and " - "missing." % pw) - - admin_user_name = self.conf.get('SHARED_SERVICES', 'ADMIN_USER_NAME') - if self.conf.has_option('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN'): - admin_user_domain = self.conf.get('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN') - else: - admin_user_domain = DEFAULT_DOMAIN_NAME - - # for now support both ADMIN_PROJECT_NAME and ADMIN_TENANT_NAME - if self.conf.has_option('SHARED_SERVICES', 'ADMIN_PROJECT_NAME'): - admin_project_name = self.conf.get('SHARED_SERVICES', - 'ADMIN_PROJECT_NAME') - else: - admin_project_name = self.conf.get('SHARED_SERVICES', - 'ADMIN_TENANT_NAME') - if self.conf.has_option('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN'): - admin_project_domain = self.conf.get('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN') - else: - admin_project_domain = DEFAULT_DOMAIN_NAME - - # for now support both SERVICE_PROJECT_NAME and SERVICE_TENANT_NAME - if self.conf.has_option('SHARED_SERVICES', 'SERVICE_PROJECT_NAME'): - service_project_name = self.conf.get('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME') - else: - service_project_name = self.conf.get('SHARED_SERVICES', - 'SERVICE_TENANT_NAME') - keystone_service_name = get_service(self.conf, 'SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - keystone_service_type = get_service(self.conf, 'SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - - # validate the patch service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'PATCHING_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'PATCHING_SERVICE_TYPE') - patch_user_name = self.conf.get('REGION_2_SERVICES', - 'PATCHING_USER_NAME') - patch_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'PATCHING_PASSWORD') - sysinv_user_name = self.conf.get('REGION_2_SERVICES', - 'SYSINV_USER_NAME') - sysinv_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'SYSINV_PASSWORD') - sysinv_service_name = get_service(self.conf, 'REGION_2_SERVICES', - 'SYSINV_SERVICE_NAME') - sysinv_service_type = get_service(self.conf, 'REGION_2_SERVICES', - 'SYSINV_SERVICE_TYPE') - - # validate nfv service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'NFV_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'NFV_SERVICE_TYPE') - nfv_user_name = self.conf.get('REGION_2_SERVICES', 'NFV_USER_NAME') - nfv_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'NFV_PASSWORD') - - # validate mtce user - mtce_user_name = self.conf.get('REGION_2_SERVICES', 'MTCE_USER_NAME') - mtce_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'MTCE_PASSWORD') - - # validate fm service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_TYPE') - fm_user_name = self.conf.get('REGION_2_SERVICES', 'FM_USER_NAME') - fm_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'FM_PASSWORD') - - # validate barbican service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'BARBICAN_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'BARBICAN_SERVICE_TYPE') - barbican_user_name = self.conf.get('REGION_2_SERVICES', - 'BARBICAN_USER_NAME') - barbican_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'BARBICAN_PASSWORD') - - if self.conf.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): - user_domain = self.conf.get('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - user_domain = DEFAULT_DOMAIN_NAME - if self.conf.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): - project_domain = self.conf.get('REGION_2_SERVICES', - 'PROJECT_DOMAIN_NAME') - else: - project_domain = DEFAULT_DOMAIN_NAME - - system_controller_subnet = None - system_controller_floating_ip = None - if config_type == SUBCLOUD_CONFIG: - system_controller_subnet = self.conf.get( - 'SHARED_SERVICES', 'SYSTEM_CONTROLLER_SUBNET') - system_controller_floating_ip = self.conf.get( - 'SHARED_SERVICES', 'SYSTEM_CONTROLLER_FLOATING_ADDRESS') - - # Create cgcs_config file if specified - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cREGION') - self.cgcs_conf.set('cREGION', 'REGION_CONFIG', 'True') - self.cgcs_conf.set('cREGION', 'REGION_1_NAME', region_1_name) - self.cgcs_conf.set('cREGION', 'REGION_2_NAME', region_2_name) - self.cgcs_conf.set('cREGION', 'ADMIN_USER_NAME', admin_user_name) - self.cgcs_conf.set('cREGION', 'ADMIN_USER_DOMAIN', - admin_user_domain) - self.cgcs_conf.set('cREGION', 'ADMIN_PROJECT_NAME', - admin_project_name) - self.cgcs_conf.set('cREGION', 'ADMIN_PROJECT_DOMAIN', - admin_project_domain) - self.cgcs_conf.set('cREGION', 'SERVICE_PROJECT_NAME', - service_project_name) - self.cgcs_conf.set('cREGION', 'KEYSTONE_SERVICE_NAME', - keystone_service_name) - self.cgcs_conf.set('cREGION', 'KEYSTONE_SERVICE_TYPE', - keystone_service_type) - self.cgcs_conf.set('cREGION', 'PATCHING_USER_NAME', - patch_user_name) - self.cgcs_conf.set('cREGION', 'PATCHING_PASSWORD', patch_password) - self.cgcs_conf.set('cREGION', 'SYSINV_USER_NAME', sysinv_user_name) - self.cgcs_conf.set('cREGION', 'SYSINV_PASSWORD', sysinv_password) - self.cgcs_conf.set('cREGION', 'SYSINV_SERVICE_NAME', - sysinv_service_name) - self.cgcs_conf.set('cREGION', 'SYSINV_SERVICE_TYPE', - sysinv_service_type) - self.cgcs_conf.set('cREGION', 'NFV_USER_NAME', nfv_user_name) - self.cgcs_conf.set('cREGION', 'NFV_PASSWORD', nfv_password) - self.cgcs_conf.set('cREGION', 'MTCE_USER_NAME', mtce_user_name) - self.cgcs_conf.set('cREGION', 'MTCE_PASSWORD', mtce_password) - self.cgcs_conf.set('cREGION', 'FM_USER_NAME', fm_user_name) - self.cgcs_conf.set('cREGION', 'FM_PASSWORD', fm_password) - self.cgcs_conf.set('cREGION', 'BARBICAN_USER_NAME', - barbican_user_name) - self.cgcs_conf.set('cREGION', 'BARBICAN_PASSWORD', - barbican_password) - - self.cgcs_conf.set('cREGION', 'USER_DOMAIN_NAME', - user_domain) - self.cgcs_conf.set('cREGION', 'PROJECT_DOMAIN_NAME', - project_domain) - if config_type == SUBCLOUD_CONFIG: - self.cgcs_conf.set('cREGION', 'SYSTEM_CONTROLLER_SUBNET', - system_controller_subnet) - self.cgcs_conf.set('cREGION', - 'SYSTEM_CONTROLLER_FLOATING_ADDRESS', - system_controller_floating_ip) - - def validate_security(self): - if self.conf.has_section('SECURITY'): - raise ConfigFail("The section SECURITY is " - "no longer supported.") - - def validate_licensing(self): - if self.conf.has_section('LICENSING'): - raise ConfigFail("The section LICENSING is no longer supported.") - - def validate_authentication(self): - if self.config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - password = self.conf.get('SHARED_SERVICES', 'ADMIN_PASSWORD') - else: - password = self.conf.get('AUTHENTICATION', 'ADMIN_PASSWORD') - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cAUTHENTICATION') - self.cgcs_conf.set('cAUTHENTICATION', 'ADMIN_PASSWORD', password) - - -def validate(system_config, config_type=REGION_CONFIG, cgcs_config=None, - offboard=False): - """ - Perform general errors checking on a system configuration file - :param system_config: system configuration - :param config_type: indicates whether it is system, region or subcloud - configuration - :param cgcs_config: if not None config data should be returned - :param offboard: if true only perform general error checking - :return: None - """ - if config_type == REGION_CONFIG and system_config.has_section( - 'CLM_NETWORK'): - naming_type = HP_NAMES - else: - naming_type = DEFAULT_NAMES - validator = ConfigValidator(system_config, cgcs_config, config_type, - offboard, naming_type) - # Version configuration - validator.validate_version() - # System configuration - validator.validate_system() - # Storage configuration - validator.validate_storage() - # SDN configuration - validator.validate_sdn() - - if validator.is_simplex_cpe(): - if validator.is_subcloud(): - # For AIO-SX subcloud, mgmt n/w will be on a separate physical - # interface or could be on a VLAN interface (on PXEBOOT n/w). - validator.validate_aio_network(subcloud=True) - validator.validate_pxeboot() - validator.validate_mgmt() - else: - validator.validate_aio_network() - else: - # PXEBoot network configuration - validator.validate_pxeboot() - # Management network configuration - validator.validate_mgmt() - # OAM network configuration - validator.validate_oam() - # Kubernetes Cluster network configuration - validator.validate_cluster() - # Neutron configuration - leave blank to use defaults - # DNS configuration - validator.validate_dns() - # Docker Proxy configuration - validator.validate_docker_proxy() - # Docker Registry configuration - validator.validate_docker_registry() - # NTP configuration - validator.validate_ntp() - # Region configuration - if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - validator.validate_region(config_type) - # Security configuration - validator.validate_security() - # Licensing configuration - validator.validate_licensing() - # Authentication configuration - validator.validate_authentication() diff --git a/controllerconfig/controllerconfig/controllerconfig/configassistant.py b/controllerconfig/controllerconfig/controllerconfig/configassistant.py deleted file mode 100644 index 5468853b06..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/configassistant.py +++ /dev/null @@ -1,4746 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from six.moves import configparser -import datetime -import errno -import getpass -import hashlib -import keyring -import netifaces -import os -import re -import stat -import subprocess -import textwrap -import time - -import pyudev -from controllerconfig import ConfigFail -from controllerconfig import ValidateFail -from controllerconfig import is_valid_vlan -from controllerconfig import is_mtu_valid -from controllerconfig import validate_network_str -from controllerconfig import validate_address_str -from controllerconfig import validate_address -from controllerconfig import ip_version_to_string -from controllerconfig import validate_nameserver_address_str -from controllerconfig import is_valid_url -from controllerconfig import is_valid_domain_or_ip -from controllerconfig import validate_openstack_password -from controllerconfig import DEFAULT_DOMAIN_NAME -from netaddr import IPNetwork -from netaddr import IPAddress -from netaddr import IPRange -from netaddr import AddrFormatError -from sysinv.common import constants as sysinv_constants -from tsconfig.tsconfig import SW_VERSION - -from controllerconfig import openstack -from controllerconfig import sysinv_api as sysinv -from controllerconfig import utils -from controllerconfig import progress - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig.common.exceptions import UserQuit -from six.moves import input - -LOG = log.get_logger(__name__) - -DEVNULL = open(os.devnull, 'w') - - -def interface_exists(name): - """Check whether an interface exists.""" - return name in netifaces.interfaces() - - -def timestamped(dname, fmt='{dname}_%Y-%m-%d-%H-%M-%S'): - return datetime.datetime.now().strftime(fmt).format(dname=dname) - - -def prompt_for(prompt_text, default_input, validator): - """ - :param prompt_text: text for the prompt - :param default_input: default input if user hit enter directly - :param validator: validator function to validate user input, - validator should return error message in case - of invalid input, or None if input is valid. - :return: return a valid user input - """ - error_msg = None - while True: - user_input = input(prompt_text) - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = default_input - - if validator: - error_msg = validator(user_input) - - if error_msg is not None: - print(error_msg) - else: - break - - return user_input - - -def is_interface_up(interface_name): - arg = '/sys/class/net/' + interface_name + '/operstate' - try: - if (subprocess.check_output(['cat', arg]).rstrip() == - 'up'): - return True - else: - return False - except subprocess.CalledProcessError: - LOG.error("Command cat %s failed" % arg) - return False - - -def device_node_to_device_path(dev_node): - device_path = None - cmd = ["find", "-L", "/dev/disk/by-path/", "-samefile", dev_node] - - try: - out = subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - LOG.error("Could not retrieve device information: %s" % e) - return device_path - - device_path = out.rstrip() - return device_path - - -def parse_fdisk(device_node): - """Cloned/modified from sysinv""" - # Run command - fdisk_command = ('fdisk -l %s 2>/dev/null | grep "Disk %s:"' % - (device_node, device_node)) - fdisk_process = subprocess.Popen(fdisk_command, stdout=subprocess.PIPE, - shell=True) - fdisk_output = fdisk_process.stdout.read() - - # Parse output - secnd_half = fdisk_output.split(',')[1] - size_bytes = secnd_half.split()[0].strip() - - # Convert bytes to GiB (1 GiB = 1024*1024*1024 bytes) - int_size = int(size_bytes) - size_gib = int_size / 1073741824 - - return int(size_gib) - - -def get_rootfs_node(): - """Cloned from sysinv""" - cmdline_file = '/proc/cmdline' - device = None - - with open(cmdline_file, 'r') as f: - for line in f: - for param in line.split(): - params = param.split("=", 1) - if params[0] == "root": - if "UUID=" in params[1]: - key, uuid = params[1].split("=") - symlink = "/dev/disk/by-uuid/%s" % uuid - device = os.path.basename(os.readlink(symlink)) - else: - device = os.path.basename(params[1]) - - if device is not None: - if sysinv_constants.DEVICE_NAME_NVME in device: - re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)') - else: - re_line = re.compile(r'^(\D*)') - match = re_line.search(device) - if match: - return os.path.join("/dev", match.group(1)) - - return - - -def find_boot_device(): - """ Determine boot device """ - boot_device = None - - context = pyudev.Context() - - # Get the boot partition - # Unfortunately, it seems we can only get it from the logfile. - # We'll parse the device used from a line like the following: - # BIOSBoot.create: device: /dev/sda1 ; status: False ; type: biosboot ; - # or - # EFIFS.create: device: /dev/sda1 ; status: False ; type: efi ; - # - logfile = '/var/log/anaconda/storage.log' - - re_line = re.compile(r'(BIOSBoot|EFIFS).create: device: ([^\s;]*)') - boot_partition = None - with open(logfile, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - boot_partition = match.group(2) - break - if boot_partition is None: - raise ConfigFail("Failed to determine the boot partition") - - # Find the boot partition and get its parent - for device in context.list_devices(DEVTYPE='partition'): - if device.device_node == boot_partition: - boot_device = device.find_parent('block').device_node - break - - if boot_device is None: - raise ConfigFail("Failed to determine the boot device") - - return boot_device - - -def get_device_from_function(get_disk_function): - device_node = get_disk_function() - device_path = device_node_to_device_path(device_node) - device = device_path if device_path else os.path.basename(device_node) - - return device - - -def get_console_info(): - """ Determine console info """ - cmdline_file = '/proc/cmdline' - - re_line = re.compile(r'^.*\s+console=([^\s]*)') - - with open(cmdline_file, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - console_info = match.group(1) - return console_info - return '' - - -def get_orig_install_mode(): - """ Determine original install mode, text vs graphical """ - # Post-install, the only way to detemine the original install mode - # will be to check the anaconda install log for the parameters passed - logfile = '/var/log/anaconda/anaconda.log' - - search_str = 'Display mode = t' - try: - subprocess.check_call(['grep', '-q', search_str, logfile]) - return 'text' - except subprocess.CalledProcessError: - return 'graphical' - - -def get_root_disk_size(): - """ Get size of the root disk """ - context = pyudev.Context() - rootfs_node = get_rootfs_node() - size_gib = 0 - - for device in context.list_devices(DEVTYPE='disk'): - # /dev/nvmeXn1 259 are for NVME devices - major = device['MAJOR'] - if (major == '8' or major == '3' or major == '253' or - major == '259'): - devname = device['DEVNAME'] - if devname == rootfs_node: - try: - size_gib = parse_fdisk(devname) - except Exception as e: - LOG.error("Could not retrieve disk size - %s " % e) - # Do not break config script, just return size 0 - break - break - return size_gib - - -def net_device_cmp(a, b): - # Sorting function for net devices - # Break device name "devX" into "dev" and "X", in order - # to numerically sort devices with same "dev" prefix. - # For example, this ensures a device named enp0s10 comes - # after enp0s3. - - pattern = re.compile("^(.*?)([0-9]*)$") - a_match = pattern.match(a) - b_match = pattern.match(b) - - if a_match.group(1) == b_match.group(1): - a_num = int(a_match.group(2)) if a_match.group(2).isdigit() else 0 - b_num = int(b_match.group(2)) if b_match.group(2).isdigit() else 0 - return a_num - b_num - elif a_match.group(1) < b_match.group(1): - return -1 - return 1 - - -def get_net_device_list(): - devlist = [] - context = pyudev.Context() - for device in context.list_devices(SUBSYSTEM='net'): - # Skip the loopback device - if device.sys_name != "lo": - devlist.append(str(device.sys_name)) - - return sorted(devlist, cmp=net_device_cmp) - - -def get_tboot_info(): - """ Determine whether we were booted with a tboot value """ - cmdline_file = '/proc/cmdline' - - # tboot=true, tboot=false, or no tboot parameter expected - re_line = re.compile(r'^.*\s+tboot=([^\s]*)') - - with open(cmdline_file, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - tboot = match.group(1) - return tboot - return '' - - -class ConfigAssistant(): - """Allow user to do the initial configuration.""" - - def __init__(self, labmode=False, kubernetes=False, **kwargs): - """Constructor - - The values assigned here are used as the defaults if the user does not - supply a new value. - """ - - self.labmode = labmode - self.kubernetes = True - - self.config_uuid = "install" - - self.net_devices = get_net_device_list() - if len(self.net_devices) < 2: - raise ConfigFail("Two or more network devices are required") - - if os.path.exists(constants.INSTALLATION_FAILED_FILE): - msg = "Installation failed. For more info, see:\n" - with open(constants.INSTALLATION_FAILED_FILE, 'r') as f: - msg += f.read() - raise ConfigFail(msg) - - # system config - self.system_type = utils.get_system_type() - self.security_profile = utils.get_security_profile() - - if self.system_type == sysinv_constants.TIS_AIO_BUILD: - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT - else: - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX - self.system_dc_role = None - - self.rootfs_node = get_rootfs_node() - - # PXEBoot network config - self.separate_pxeboot_network = False - self.pxeboot_subnet = IPNetwork("192.168.202.0/24") - self.controller_pxeboot_floating_address = IPNetwork("192.168.202.2") - self.controller_pxeboot_address_0 = IPAddress("192.168.202.3") - self.controller_pxeboot_address_1 = IPAddress("192.168.202.4") - self.controller_pxeboot_hostname_suffix = "-pxeboot" - self.private_pxeboot_subnet = IPNetwork("169.254.202.0/24") - self.pxecontroller_floating_hostname = "pxecontroller" - self.pxeboot_start_address = None - self.pxeboot_end_address = None - self.use_entire_pxeboot_subnet = True - - # Management network config - self.management_interface_configured = False - self.management_interface_name = self.net_devices[1] - self.management_interface = self.net_devices[1] - self.management_vlan = "" - self.management_mtu = constants.LINK_MTU_DEFAULT - self.next_lag_index = 0 - self.lag_management_interface = False - self.lag_management_interface_member0 = self.net_devices[1] - self.lag_management_interface_member1 = "" - self.lag_management_interface_policy = constants.LAG_MODE_8023AD - self.lag_management_interface_txhash = constants.LAG_TXHASH_LAYER2 - self.lag_management_interface_miimon = constants.LAG_MIIMON_FREQUENCY - self.management_subnet = IPNetwork("192.168.204.0/24") - self.management_gateway_address = None - self.controller_floating_address = IPAddress("192.168.204.2") - self.controller_address_0 = IPAddress("192.168.204.3") - self.controller_address_1 = IPAddress("192.168.204.4") - self.nfs_management_address_1 = IPAddress("192.168.204.5") - self.nfs_management_address_2 = IPAddress("192.168.204.6") - self.storage_address_0 = "" - self.storage_address_1 = "" - self.controller_floating_hostname = "controller" - self.controller_hostname_prefix = "controller-" - self.storage_hostname_prefix = "storage-" - self.use_entire_mgmt_subnet = True - self.dynamic_address_allocation = True - self.management_start_address = IPAddress("192.168.204.2") - self.management_end_address = IPAddress("192.168.204.254") - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV4) - - # External OAM Network config - self.external_oam_interface_configured = False - self.external_oam_interface_name = self.net_devices[0] - self.external_oam_interface = self.net_devices[0] - self.external_oam_vlan = "" - self.external_oam_mtu = constants.LINK_MTU_DEFAULT - self.lag_external_oam_interface = False - self.lag_external_oam_interface_member0 = self.net_devices[0] - self.lag_external_oam_interface_member1 = "" - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_external_oam_interface_txhash = "" - self.lag_external_oam_interface_miimon = \ - constants.LAG_MIIMON_FREQUENCY - self.external_oam_subnet = IPNetwork("10.10.10.0/24") - self.external_oam_gateway_address = IPAddress("10.10.10.1") - self.external_oam_floating_address = IPAddress("10.10.10.2") - self.external_oam_address_0 = IPAddress("10.10.10.3") - self.external_oam_address_1 = IPAddress("10.10.10.4") - self.oamcontroller_floating_hostname = "oamcontroller" - - # Kubernetes cluster network config - self.cluster_host_interface_configured = False - self.cluster_host_interface_name = self.management_interface_name - self.cluster_host_interface = self.management_interface - self.cluster_host_vlan = "" - self.cluster_host_mtu = constants.LINK_MTU_DEFAULT - self.lag_cluster_host_interface = False - self.lag_cluster_host_interface_member0 = "" - self.lag_cluster_host_interface_member1 = "" - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_cluster_host_interface_txhash = "" - self.lag_cluster_host_interface_miimon = \ - constants.LAG_MIIMON_FREQUENCY - self.cluster_host_subnet = IPNetwork("192.168.206.0/24") - - # Will be configurable in the future - self.cluster_pod_subnet = IPNetwork("172.16.0.0/16") - self.cluster_service_subnet = IPNetwork("10.96.0.0/12") - - # Docker Proxy config - self.enable_docker_proxy = False - self.docker_http_proxy = "" - self.docker_https_proxy = "" - self.docker_no_proxy = "" - - # Docker registry config - self.docker_use_default_registry = True - self.docker_k8s_registry = "" - self.docker_gcr_registry = "" - self.docker_quay_registry = "" - self.docker_docker_registry = "" - self.is_secure_registry = True - - # SDN config - self.enable_sdn = False - - # DNS config - self.nameserver_addresses = ["8.8.8.8", "8.8.4.4", ""] - - # HTTPS - self.enable_https = False - # Network config - self.vswitch_type = "none" - - # Authentication config - self.admin_username = "admin" - self.admin_password = "" - self.os_password_rules_file = constants.OPENSTACK_PASSWORD_RULES_FILE - self.openstack_passwords = [] - - # Region config - self.region_config = False - self.region_services_create = False - self.shared_services = [] - self.external_oam_start_address = "" - self.external_oam_end_address = "" - self.region_1_name = "" - self.region_2_name = "" - self.admin_user_domain = DEFAULT_DOMAIN_NAME - self.admin_project_name = "" - self.admin_project_domain = DEFAULT_DOMAIN_NAME - self.service_project_name = constants.DEFAULT_SERVICE_PROJECT_NAME - self.service_user_domain = DEFAULT_DOMAIN_NAME - self.service_project_domain = DEFAULT_DOMAIN_NAME - self.keystone_auth_uri = "" - self.keystone_identity_uri = "" - self.keystone_admin_uri = "" - self.keystone_internal_uri = "" - self.keystone_public_uri = "" - self.keystone_service_name = "" - self.keystone_service_type = "" - self.patching_ks_user_name = "" - self.patching_ks_password = "" - self.sysinv_ks_user_name = "" - self.sysinv_ks_password = "" - self.sysinv_service_name = "" - self.sysinv_service_type = "" - self.mtce_ks_user_name = "" - self.mtce_ks_password = "" - self.nfv_ks_user_name = "" - self.nfv_ks_password = "" - self.fm_ks_user_name = "" - self.fm_ks_password = "" - self.barbican_ks_user_name = "" - self.barbican_ks_password = "" - - self.ldap_region_name = "" - self.ldap_service_name = "" - self.ldap_service_uri = "" - - # Subcloud config (only valid when region configured) - self.system_controller_subnet = None - - # LDAP config - self.ldapadmin_password = "" - self.ldapadmin_hashed_pw = "" - - # Time Zone config - self.timezone = "UTC" - - # saved service passwords, indexed by service name - self._service_passwords = {} - - @staticmethod - def set_time(): - """Allow user to set the system date and time.""" - - print("System date and time:") - print("---------------------\n") - print(textwrap.fill( - "The system date and time must be set now. Note that UTC " - "time must be used and that the date and time must be set as " - "accurately as possible, even if NTP/PTP is to be configured " - "later.", 80)) - print('') - - now = datetime.datetime.utcnow() - date_format = '%Y-%m-%d %H:%M:%S' - print("Current system date and time (UTC): " + - now.strftime(date_format)) - - while True: - user_input = input( - "\nIs the current date and time correct? [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - print("Current system date and time will be used.") - return - elif user_input.lower() == 'n': - break - else: - print("Invalid choice") - - new_time = None - while True: - user_input = input("\nEnter new system date and time (UTC) " + - "in YYYY-MM-DD HH:MM:SS format: \n") - if user_input.lower() == 'q': - raise UserQuit - else: - try: - new_time = datetime.datetime.strptime(user_input, - date_format) - break - except ValueError: - print("Invalid date and time specified") - continue - - # Set the system clock - try: - subprocess.check_call(["date", "-s", new_time.isoformat()]) - - except subprocess.CalledProcessError: - LOG.error("Failed to set system date and time") - raise ConfigFail("Failed to set system date and time") - - # Set the hardware clock in UTC time - try: - subprocess.check_call(["hwclock", "-wu"]) - except subprocess.CalledProcessError: - LOG.error("Failed to set the hardware clock") - raise ConfigFail("Failed to set the hardware clock") - - @staticmethod - def set_timezone(self): - """Allow user to set the system timezone.""" - - print("\nSystem timezone:") - print("----------------\n") - print(textwrap.fill( - "The system timezone must be set now. The timezone " - "must be a valid timezone from /usr/share/zoneinfo " - "(e.g. UTC, Asia/Hong_Kong, etc...)", 80)) - print('') - - while True: - user_input = input( - "Please input the timezone[" + self.timezone + "]:") - - if user_input == 'Q' or user_input == 'q': - raise UserQuit - elif user_input == "": - break - else: - if not os.path.isfile("/usr/share/zoneinfo/%s" % user_input): - print("Invalid timezone specified, please try again.") - continue - self.timezone = user_input - break - return - - def subcloud_config(self): - return (self.system_dc_role == - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD) - - def get_next_lag_name(self): - """Return next available name for LAG interface.""" - name = 'bond' + str(self.next_lag_index) - self.next_lag_index += 1 - return name - - def get_sysadmin_sig(self): - """ Get signature for sysadmin user. """ - - # NOTE (knasim): only compute the signature for the entries we're - # tracking and propagating {password, aging}. This is prevent - # config-outdated alarms for shadow fields that get modified - # and we don't track and propagate - re_line = re.compile(r'(sysadmin:.*?)\s') - with open('/etc/shadow') as shadow_file: - for line in shadow_file: - match = re_line.search(line) - if match: - # Isolate password(2nd field) and aging(5th field) - entry = match.group(1).split(':') - entrystr = entry[1] + ":" + entry[4] - self.sysadmin_sig = hashlib.md5(entrystr).hexdigest() - self.passwd_hash = entry[1] - - def input_system_mode_config(self): - """Allow user to input system mode""" - print("\nSystem Configuration:") - print("---------------------\n") - print("System mode. Available options are:\n") - print(textwrap.fill( - "1) duplex-direct - two node redundant configuration. " - "Management and cluster-host networks " - "are directly connected to peer ports", 80)) - print(textwrap.fill( - "2) duplex - two node redundant configuration. ", 80)) - - print(textwrap.fill( - "3) simplex - single node non-redundant configuration.", 80)) - - value_mapping = { - "1": sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT, - "2": sysinv_constants.SYSTEM_MODE_DUPLEX, - '3': sysinv_constants.SYSTEM_MODE_SIMPLEX - } - user_input = prompt_for( - "System mode [duplex-direct]: ", '1', - lambda text: "Invalid choice" if text not in value_mapping - else None - ) - self.system_mode = value_mapping[user_input.lower()] - - def input_dc_selection(self): - """Allow user to input dc role""" - print("\nDistributed Cloud Configuration:") - print("--------------------------------\n") - - value_mapping = { - "y": sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, - "n": None, - } - user_input = prompt_for( - "Configure Distributed Cloud System Controller [y/N]: ", 'n', - lambda text: "Invalid choice" if text.lower() not in value_mapping - else None - ) - self.system_dc_role = value_mapping[user_input.lower()] - - def check_storage_config(self): - """Check basic storage config.""" - - if get_root_disk_size() < constants.MINIMUM_ROOT_DISK_SIZE: - print(textwrap.fill( - "Warning: Root Disk %s size is less than %d GiB. " - "Please consult the Software Installation Guide " - "for details." % - (self.rootfs_node, constants.MINIMUM_ROOT_DISK_SIZE), 80)) - print('') - - def is_interface_in_bond(self, interface_name): - """ - Determine if the supplied interface is configured as a member - in a bond. - - :param interface_name: interface to check - :return: True or False - """ - # In the case of bond with a single member - if interface_name == "": - return False - - if ((self.management_interface_configured and - self.lag_management_interface and - (interface_name == self.lag_management_interface_member0 or - interface_name == self.lag_management_interface_member1)) - or - (self.external_oam_interface_configured and - self.lag_external_oam_interface and - (interface_name == self.lag_external_oam_interface_member0 or - interface_name == self.lag_external_oam_interface_member1)) - or - (self.cluster_host_interface_configured and - self.lag_cluster_host_interface and - (interface_name == self.lag_cluster_host_interface_member0 or - interface_name == self.lag_cluster_host_interface_member1))): - return True - else: - return False - - def is_interface_in_use(self, interface_name): - """ - Determine if the supplied interface is already configured for use - - :param interface_name: interface to check - :return: True or False - """ - if ((self.management_interface_configured and - interface_name == self.management_interface) or - (self.external_oam_interface_configured and - interface_name == self.external_oam_interface) or - (self.cluster_host_interface_configured and - interface_name == self.cluster_host_interface)): - return True - else: - return False - - def is_valid_pxeboot_address(self, ip_address): - """Determine whether a pxeboot address is valid.""" - if ip_address.version != 4: - print("Invalid IP version - only IPv4 supported") - return False - elif ip_address == self.pxeboot_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.pxeboot_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.pxeboot_subnet: - print("Address must be in the PXEBoot subnet") - return False - else: - return True - - def default_pxeboot_config(self): - """Set pxeboot to default private network.""" - - # Use private subnet for pxe booting - self.separate_pxeboot_network = False - self.pxeboot_subnet = self.private_pxeboot_subnet - self.controller_pxeboot_floating_address = \ - IPAddress(self.pxeboot_subnet[2]) - self.controller_pxeboot_address_0 = \ - IPAddress(self.pxeboot_subnet[3]) - self.controller_pxeboot_address_1 = \ - IPAddress(self.pxeboot_subnet[4]) - - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - - def input_pxeboot_config(self): - """Allow user to input pxeboot config and perform validation.""" - - print("\nPXEBoot Network:") - print("----------------\n") - - print(textwrap.fill( - "The PXEBoot network is used for initial booting and installation" - " of each node. IP addresses on this network are reachable only " - "within the data center.", 80)) - print('') - print(textwrap.fill( - "The default configuration combines the PXEBoot network and the " - "management network. If a separate PXEBoot network is used, it " - "will share the management interface, which requires the " - "management network to be placed on a VLAN.", 80)) - - while True: - print('') - user_input = input( - "Configure a separate PXEBoot network [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.separate_pxeboot_network = True - break - elif user_input.lower() == 'n': - self.separate_pxeboot_network = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - if self.separate_pxeboot_network: - while True: - user_input = input("PXEBoot subnet [" + - str(self.pxeboot_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.pxeboot_subnet - - try: - ip_input = IPNetwork(user_input) - if ip_input.version != 4: - print("Invalid IP version - only IPv4 supported") - continue - elif ip_input.ip != ip_input.network: - print("Invalid network address") - continue - elif ip_input.size < 16: - print("PXEBoot subnet too small " - "- must have at least 16 addresses") - continue - - if ip_input.size < 255: - print("WARNING: Subnet allows only %d addresses." - % ip_input.size) - - self.pxeboot_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - please enter a valid IPv4 subnet") - - value_mapping = { - "y": True, - "n": False, - } - - user_input = prompt_for( - "Use entire PXEBoot subnet [Y/n]: ", 'Y', - lambda text: "Invalid choice" - if text.lower() not in value_mapping - else None - ) - self.use_entire_pxeboot_subnet = value_mapping[user_input.lower()] - - if not self.use_entire_pxeboot_subnet: - def validate_input_address(text, error_header): - try: - validate_address_str(text, self.pxeboot_subnet) - return None - except ValidateFail as e: - return "%s\n Reason: %s" % (error_header, e) - - while True: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - input_str = prompt_for( - "PXEBoot network start address [" + - str(self.pxeboot_start_address) + - "]: ", str(self.pxeboot_start_address), - lambda text: validate_input_address( - text, "Invalid start address.") - ) - self.pxeboot_start_address = IPAddress(input_str) - - input_str = prompt_for( - "PXEBoot network end address [" + - str(self.pxeboot_end_address) + - "]: ", str(self.pxeboot_end_address), - lambda text: validate_input_address( - text, "Invalid end address.") - ) - self.pxeboot_end_address = IPAddress(input_str) - - if not self.pxeboot_start_address < \ - self.pxeboot_end_address: - print("Start address not less than end address. ") - continue - - address_range = IPRange( - str(self.pxeboot_start_address), - str(self.pxeboot_end_address)) - - min_addresses = 8 - if not address_range.size >= min_addresses: - print( - "Address range must contain at least " - "%d addresses." % min_addresses) - continue - - print('') - break - else: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - else: - # Use private subnet for pxe booting - self.pxeboot_subnet = self.private_pxeboot_subnet - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - - ip_input = self.pxeboot_start_address - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller PXEBoot " - "floating address") - self.controller_pxeboot_floating_address = ip_input - - default_controller0_pxeboot_ip = \ - self.controller_pxeboot_floating_address + 1 - ip_input = IPAddress(default_controller0_pxeboot_ip) - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller-0 PXEBoot " - "address") - self.controller_pxeboot_address_0 = ip_input - - default_controller1_pxeboot_ip = self.controller_pxeboot_address_0 + 1 - ip_input = IPAddress(default_controller1_pxeboot_ip) - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller-1 PXEBoot " - "address") - self.controller_pxeboot_address_1 = ip_input - - def input_management_config(self): - """Allow user to input management config and perform validation.""" - - print("\nManagement Network:") - print("-------------------\n") - - print(textwrap.fill( - "The management network is used for internal communication " - "between platform components. IP addresses on this network " - "are reachable only within the data center.", 80)) - - while True: - print('') - print(textwrap.fill( - "A management bond interface provides redundant " - "connections for the management network.", 80)) - if self.system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT: - print(textwrap.fill( - "It is strongly recommended to configure Management " - "interface link aggregation, for All-in-one duplex-direct." - )) - print('') - user_input = input( - "Management interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_management_interface = True - break - elif user_input.lower() == 'n': - self.lag_management_interface = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_management_interface: - self.management_interface = self.get_next_lag_name() - - user_input = input("Management interface [" + - str(self.management_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_interface - elif self.lag_management_interface: - print(textwrap.fill( - "Warning: The default name for the management bond " - "interface (%s) cannot be changed." % - self.management_interface, 80)) - print('') - user_input = self.management_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_management_interface: - self.management_interface = user_input - self.management_interface_name = user_input - break - elif interface_exists(user_input): - self.management_interface = user_input - self.management_interface_name = user_input - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input("Management interface MTU [" + - str(self.management_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_mtu - - if is_mtu_valid(user_input): - self.management_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if not self.lag_management_interface: - break - - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) 802.3ad (LACP) policy") - print(" 2) Active-backup policy") - - user_input = input( - "\nManagement interface bonding policy [" + - str(self.lag_management_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_management_interface_policy = \ - constants.LAG_MODE_8023AD - break - elif user_input == '2': - self.lag_management_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_management_interface_txhash = None - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_management_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "management interface.", 80)) - print('') - - user_input = input( - "First management interface member [" + - str(self.lag_management_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_management_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_management_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_management_interface_member0 = "" - continue - - user_input = input( - "Second management interface member [" + - str(self.lag_management_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == self.lag_management_interface_member0: - print("Cannot use member 0 as member 1") - continue - elif user_input == "": - user_input = self.lag_management_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_management_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_management_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - continue - - if self.separate_pxeboot_network: - print('') - print(textwrap.fill( - "A management VLAN is required because a separate PXEBoot " - "network was configured on the management interface.", 80)) - print('') - - while True: - user_input = input( - "Management VLAN Identifier [" + - str(self.management_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - self.management_vlan = user_input - self.management_interface_name = \ - self.management_interface + '.' + self.management_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - - min_addresses = 8 - while True: - user_input = input("Management subnet [" + - str(self.management_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_subnet - - try: - tmp_management_subnet = validate_network_str(user_input, - min_addresses) - if (tmp_management_subnet.version == 6 and - not self.separate_pxeboot_network): - print("Using IPv6 management network requires " + - "use of separate PXEBoot network") - continue - self.management_subnet = tmp_management_subnet - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - if self.management_subnet.size < 255: - print("WARNING: Subnet allows only %d addresses.\n" - "This will not allow you to provision a Cinder LVM" - " or Ceph backend." % self.management_subnet.size) - while True: - user_input = raw_input( - "Do you want to continue with the current " - "configuration? [Y/n]: ") - if user_input.lower() == 'q' or \ - user_input.lower() == 'n': - raise UserQuit - elif user_input.lower() == 'y' or user_input == "": - break - else: - print("Invalid choice") - continue - break - except ValidateFail as e: - print("{}".format(e)) - - if (self.system_dc_role != - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - while True: - user_input = input( - "Use entire management subnet [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.use_entire_mgmt_subnet = True - break - elif user_input.lower() == 'n': - self.use_entire_mgmt_subnet = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - else: - self.use_entire_mgmt_subnet = False - print(textwrap.fill( - "Configured as Distributed Cloud System Controller," - " disallowing use of entire management subnet. " - "Ensure management ip range does not include System" - " Controller gateway address(es)", 80)) - - if not self.use_entire_mgmt_subnet: - while True: - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - while True: - user_input = input( - "Management network start address [" + - str(self.management_start_address) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_start_address - - try: - self.management_start_address = validate_address_str( - user_input, self.management_subnet) - break - except ValidateFail as e: - print("Invalid start address. \n Reason: %s" % e) - - while True: - user_input = input( - "Management network end address [" + - str(self.management_end_address) + "]: ") - if user_input == 'Q' or user_input == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_end_address - - try: - self.management_end_address = validate_address_str( - user_input, self.management_subnet) - break - except ValidateFail as e: - print("Invalid management end address. \n" - "Reason: %s" % e) - - if not self.management_start_address < \ - self.management_end_address: - print("Start address not less than end address. ") - print('') - continue - - address_range = IPRange(str(self.management_start_address), - str(self.management_end_address)) - if not address_range.size >= min_addresses: - print( - "Address range must contain at least %d addresses. " % - min_addresses) - continue - - sc = sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER - if (self.system_dc_role == sc): - # Warn user that some space in the management subnet must - # be reserved for the system controller gateway address(es) - # used to communicate with the subclouds. - 2 because of - # subnet and broadcast addresses. - if address_range.size >= (self.management_subnet.size - 2): - print(textwrap.fill( - "Address range too large, no addresses left " - "for System Controller gateway(s). ", 80)) - continue - break - while True: - print('') - if self.kubernetes: - print(textwrap.fill( - "IP addresses can be assigned to hosts dynamically or " - "a static IP address can be specified for each host. " - "This choice applies to both the management network " - "and cluster-host network. ", 80)) - else: - print(textwrap.fill( - "IP addresses can be assigned to hosts dynamically or " - "a static IP address can be specified for each host. " - "This choice applies to the management network ", 80)) - print(textwrap.fill( - "Warning: Selecting 'N', or static IP address allocation, " - "disables automatic provisioning of new hosts in System " - "Inventory, requiring the user to manually provision using " - "the 'system host-add' command. ", 80)) - user_input = input( - "Dynamic IP address allocation [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.dynamic_address_allocation = True - break - elif user_input.lower() == 'n': - self.dynamic_address_allocation = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - default_controller0_mgmt_float_ip = self.management_start_address - ip_input = IPAddress(default_controller0_mgmt_float_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-0 Management " - "floating address") - self.controller_floating_address = ip_input - - default_controller0_mgmt_ip = self.controller_floating_address + 1 - ip_input = IPAddress(default_controller0_mgmt_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-0 Management " - "address") - self.controller_address_0 = ip_input - - default_controller1_mgmt_ip = self.controller_address_0 + 1 - ip_input = IPAddress(default_controller1_mgmt_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-1 Management " - "address") - self.controller_address_1 = ip_input - - first_nfs_ip = self.controller_address_1 + 1 - - """ create default Management NFS addresses """ - default_nfs_ip = IPAddress(first_nfs_ip) - try: - validate_address(default_nfs_ip, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create NFS Management address 1") - self.nfs_management_address_1 = default_nfs_ip - - default_nfs_ip = IPAddress(self.nfs_management_address_1 + 1) - try: - validate_address(default_nfs_ip, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create NFS Management address 2") - self.nfs_management_address_2 = default_nfs_ip - - while True: - if self.management_subnet.version == 6: - # Management subnet is IPv6, so update the default value - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV6) - - user_input = input("Management Network Multicast subnet [" + - str(self.management_multicast_subnet) + - "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_multicast_subnet - - try: - ip_input = IPNetwork(user_input) - if not self.is_valid_management_multicast_subnet(ip_input): - continue - self.management_multicast_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - " - "please enter a valid IPv4 or IPv6 subnet" - ) - - """ Management interface configuration complete""" - self.management_interface_configured = True - - def input_aio_simplex_management_config(self, management_subnet=None): - """Allow user to input AIO simplex management config and perform - validation.""" - - if management_subnet is not None: - self.management_subnet = management_subnet - else: - print("\nManagement Network:") - print("-------------------\n") - - print(textwrap.fill( - "The management network is used for internal communication " - "between platform components. IP addresses on this network " - "are reachable only within the host.", 80)) - print('') - - self.management_subnet = IPNetwork( - constants.DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4) - min_addresses = 16 - while True: - user_input = input("Management subnet [" + - str(self.management_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_subnet - - try: - tmp_management_subnet = validate_network_str(user_input, - min_addresses) - if tmp_management_subnet.version == 6: - print("IPv6 management network not supported on " + - "simplex configuration") - continue - self.management_subnet = tmp_management_subnet - break - except ValidateFail as e: - print("{}".format(e)) - - self.management_interface = constants.LOOPBACK_IFNAME - self.management_interface_name = constants.LOOPBACK_IFNAME - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - self.controller_floating_address = self.management_start_address - self.controller_address_0 = self.management_start_address + 1 - self.controller_address_1 = self.management_start_address + 2 - - """ create default Management NFS addresses """ - self.nfs_management_address_1 = self.controller_address_1 + 1 - self.nfs_management_address_2 = self.controller_address_1 + 2 - - """ Management interface configuration complete""" - self.management_interface_configured = True - - def is_valid_external_oam_subnet(self, ip_subnet): - """Determine whether an OAM subnet is valid.""" - if ip_subnet.size < 8: - print("Subnet too small - must have at least 8 addresses") - return False - elif ip_subnet.ip != ip_subnet.network: - print("Invalid network address") - return False - elif ip_subnet.version == 6 and ip_subnet.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - return False - elif ip_subnet.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_subnet.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ((self.separate_pxeboot_network and - ip_subnet.ip in self.pxeboot_subnet) or - (ip_subnet.ip in self.management_subnet) or - (self.cluster_host_interface and - ip_subnet.ip in self.cluster_host_subnet)): - print("External OAM subnet overlaps with an already " - "configured subnet") - return False - else: - return True - - def is_valid_external_oam_address(self, ip_address): - """Determine whether an OAM address is valid.""" - if ip_address == self.external_oam_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.external_oam_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.external_oam_subnet: - print("Address must be in the external OAM subnet") - return False - else: - return True - - def input_aio_simplex_oam_ip_address(self): - """Allow user to input external OAM IP and perform validation.""" - while True: - user_input = input( - "External OAM address [" + - str(self.external_oam_gateway_address + 1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_gateway_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_floating_address = ip_input - self.external_oam_address_0 = ip_input - self.external_oam_address_1 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - def input_oam_ip_address(self): - """Allow user to input external OAM IP and perform validation.""" - while True: - user_input = input( - "External OAM floating address [" + - str(self.external_oam_gateway_address + 1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_gateway_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_floating_address = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - while True: - user_input = input("External OAM address for first " - "controller node [" + - str(self.external_oam_floating_address + 1) - + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_floating_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_address_0 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - while True: - user_input = input("External OAM address for second " - "controller node [" + - str(self.external_oam_address_0 + 1) + - "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_address_0 + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_address_1 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - def input_external_oam_config(self): - """Allow user to input external OAM config and perform validation.""" - - print("\nExternal OAM Network:") - print("---------------------\n") - print(textwrap.fill( - "The external OAM network is used for management of the " - "cloud. It also provides access to the " - "platform APIs. IP addresses on this network are reachable " - "outside the data center.", 80)) - print('') - - ext_oam_vlan_required = False - - while True: - print(textwrap.fill( - "An external OAM bond interface provides redundant " - "connections for the OAM network.", 80)) - print('') - user_input = input( - "External OAM interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_external_oam_interface = True - break - elif user_input.lower() == 'n': - self.lag_external_oam_interface = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_external_oam_interface: - self.external_oam_interface = self.get_next_lag_name() - - user_input = input("External OAM interface [" + - str(self.external_oam_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_interface - elif self.lag_external_oam_interface: - print(textwrap.fill( - "Warning: The default name for the external OAM bond " - "interface (%s) cannot be changed." % - self.external_oam_interface, 80)) - print('') - user_input = self.external_oam_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_external_oam_interface: - self.external_oam_interface = user_input - self.external_oam_interface_name = user_input - break - elif (interface_exists(user_input) or - user_input == self.management_interface): - self.external_oam_interface = user_input - self.external_oam_interface_name = user_input - if ((self.management_interface_configured and - user_input == self.management_interface)): - ext_oam_vlan_required = True - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input( - "Configure an external OAM VLAN [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "External OAM VLAN Identifier [" + - str(self.external_oam_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - if ((user_input == self.management_vlan) or - (user_input == self.cluster_host_vlan)): - print(textwrap.fill( - "Invalid VLAN Identifier. Configured VLAN " - "Identifier is already in use by another " - "network.", 80)) - continue - self.external_oam_vlan = user_input - self.external_oam_interface_name = \ - self.external_oam_interface + '.' + \ - self.external_oam_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - break - elif user_input.lower() in ('n', ''): - if ext_oam_vlan_required: - print(textwrap.fill( - "An external oam VLAN is required since the " - "configured external oam interface is the " - "same as either the configured management " - "or cluster-host interface.", 80)) - continue - self.external_oam_vlan = "" - break - else: - print("Invalid choice") - continue - - while True: - user_input = input("External OAM interface MTU [" + - str(self.external_oam_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_mtu - - if (self.management_interface_configured and - self.external_oam_interface == - self.management_interface and - self.external_oam_vlan and - user_input > self.management_mtu): - print("External OAM VLAN MTU must not be larger than " - "underlying management interface MTU") - continue - elif is_mtu_valid(user_input): - self.external_oam_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if not self.lag_external_oam_interface: - break - - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) Active-backup policy") - print(" 2) Balanced XOR policy") - print(" 3) 802.3ad (LACP) policy") - - user_input = input( - "\nExternal OAM interface bonding policy [" + - str(self.lag_external_oam_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - break - elif user_input == '2': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_BALANCE_XOR - self.lag_external_oam_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == '3': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_8023AD - self.lag_external_oam_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_external_oam_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "external OAM interface.", 80)) - print('') - - user_input = input( - "First external OAM interface member [" + - str(self.lag_external_oam_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_external_oam_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_external_oam_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_external_oam_interface_member0 = "" - continue - - user_input = input( - "Second external oam interface member [" + - str(self.lag_external_oam_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_external_oam_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif user_input == self.lag_external_oam_interface_member0: - print("Cannot use member 0 as member 1") - continue - if interface_exists(user_input): - self.lag_external_oam_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_external_oam_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - continue - - while True: - user_input = input("External OAM subnet [" + - str(self.external_oam_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_subnet - - try: - ip_input = IPNetwork(user_input) - if not self.is_valid_external_oam_subnet(ip_input): - continue - self.external_oam_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - " - "please enter a valid IPv4 or IPv6 subnet" - ) - - while True: - user_input = input("External OAM gateway address [" + - str(self.external_oam_subnet[1]) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_subnet[1] - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_gateway_address = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - self.input_aio_simplex_oam_ip_address() - else: - self.input_oam_ip_address() - - """ External OAM interface configuration complete""" - self.external_oam_interface_configured = True - - def is_valid_cluster_host_address(self, ip_address): - """Determine whether cluster host address is valid.""" - if ip_address == self.cluster_host_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.cluster_host_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.cluster_host_subnet: - print("Address must be in the cluster host subnet") - return False - else: - return True - - def input_cluster_host_config(self): - """Allow user to input cluster-host config and perform validation.""" - - print("\nCluster Host Network:") - print("-----------------------\n") - - print((textwrap.fill( - "The cluster host network is used for internal communication " - "between Kubernetes clusters. " - "IP addresses on this network are reachable only within the data " - "center.", 80))) - print('') - print(textwrap.fill( - "If a separate cluster host interface is not configured the " - "management network will be used.", 80)) - print('') - - while True: - print('') - print(textwrap.fill( - "An cluster host bond interface provides redundant " - "connections for the cluster host network.", 80)) - print('') - user_input = input( - "Cluster host interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_cluster_host_interface = True - break - elif user_input.lower() in ('n', ''): - self.lag_cluster_host_interface = False - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_cluster_host_interface: - self.cluster_host_interface = self.get_next_lag_name() - - user_input = input("Cluster host interface [" + - str(self.management_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '': - user_input = self.management_interface - if user_input == '': - print("Invalid interface") - continue - elif self.lag_cluster_host_interface: - print(textwrap.fill( - "Warning: The default name for the cluster host bond " - "interface (%s) cannot be changed." % - self.cluster_host_interface, 80)) - print('') - user_input = self.cluster_host_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_cluster_host_interface: - self.cluster_host_interface = user_input - self.cluster_host_interface_name = user_input - break - elif interface_exists(user_input): - self.cluster_host_interface = user_input - self.cluster_host_interface_name = user_input - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input( - "Configure a cluster host VLAN [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "Cluster host VLAN Identifier [" + - str(self.cluster_host_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - self.cluster_host_vlan = user_input - self.cluster_host_interface_name = \ - self.cluster_host_interface + '.' + \ - self.cluster_host_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - break - elif user_input.lower() in ('n', ''): - self.cluster_host_vlan = "" - break - else: - print("Invalid choice") - continue - - while True: - if self.cluster_host_interface == self.management_interface: - break - user_input = input("Cluster host interface MTU [" + - str(self.cluster_host_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.cluster_host_mtu - - if (self.management_interface_configured and - self.cluster_host_interface == - self.management_interface and - self.cluster_host_vlan and - user_input > self.management_mtu): - print("Cluster host VLAN MTU must not be larger than " - "underlying management interface MTU") - continue - elif is_mtu_valid(user_input): - self.cluster_host_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if self.cluster_host_interface == self.management_interface: - break - if not self.lag_cluster_host_interface: - break - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) Active-backup policy") - print(" 2) Balanced XOR policy") - print(" 3) 802.3ad (LACP) policy") - - user_input = input( - "\nCluster host interface bonding policy [" + - str(self.lag_cluster_host_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_cluster_host_interface_txhash = None - break - elif user_input == '2': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_BALANCE_XOR - self.lag_cluster_host_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == '3': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_8023AD - self.lag_cluster_host_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_cluster_host_interface: - break - if self.cluster_host_interface == self.management_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "cluster host interface.", 80)) - print('') - - user_input = input( - "First cluster host interface member [" + - str(self.lag_cluster_host_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_cluster_host_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_cluster_host_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_cluster_host_interface_member0 = "" - continue - - user_input = input( - "Second cluster host interface member [" + - str(self.lag_cluster_host_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_cluster_host_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - if user_input == self.lag_cluster_host_interface_member0: - print("Cannot use member 0 as member 1") - continue - else: - self.lag_cluster_host_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_cluster_host_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() in ('n', ''): - continue - else: - print("Invalid choice") - continue - - min_addresses = 8 - while True: - user_input = input("Cluster subnet [" + - str(self.cluster_host_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.cluster_host_subnet - - try: - ip_input = IPNetwork(user_input) - if ip_input.ip != ip_input.network: - print("Invalid network address") - continue - elif ip_input.size < min_addresses: - print("Cluster subnet too small - " - "must have at least 16 addresses") - continue - elif ip_input.version == 6 and ip_input.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - continue - elif ((self.separate_pxeboot_network and - ip_input.ip in self.pxeboot_subnet) or - ip_input.ip in self.management_subnet): - print("Cluster host subnet overlaps with an already " - "configured subnet") - continue - - if ip_input.size < 255: - print("WARNING: Subnet allows only %d addresses." - % ip_input.size) - - self.cluster_host_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - please enter a valid IPv4 subnet") - - """ Cluster host interface configuration complete""" - self.cluster_host_interface_configured = True - - def get_dns_servers(self): - """Produce a comma separated list of DNS servers.""" - servers = [str(s) for s in self.nameserver_addresses if s] - return ",".join(servers) - - def input_dns_config(self): - """Allow user to input DNS config and perform validation.""" - - print("\nDomain Name System (DNS):") - print("-------------------------\n") - print(textwrap.fill( - "Configuring DNS servers accessible through the external " - "OAM network allows domain names to be mapped to IP " - "addresses.", 80)) - print(textwrap.fill( - "The configuration of at least one DNS server is mandatory. To " - "skip the configuration of one or more nameservers (1 to 3 are " - "allowed), enter C to continue to the next configuration item.", - 80)) - print('') - - if self.external_oam_subnet.version == 6: - self.nameserver_addresses = ["2001:4860:4860::8888", - "2001:4860:4860::8844", ""] - - for server in range(0, len(self.nameserver_addresses)): - while True: - user_input = raw_input( - "Nameserver " + str(server + 1) + " [" + - str(self.nameserver_addresses[server]) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'c': - if server == 0: - print("At least one DNS server is required.") - continue - for x in range(server, len(self.nameserver_addresses)): - self.nameserver_addresses[x] = "" - return - elif user_input == "": - user_input = self.nameserver_addresses[server] - # Pressing enter with a blank default will continue - if user_input == "": - return - - try: - try: - ip_input = validate_nameserver_address_str( - user_input, self.external_oam_subnet.version) - except ValidateFail as e: - print('{}'.format(e)) - continue - self.nameserver_addresses[server] = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - please enter a valid IPv4 " - "address") - - def input_docker_proxy_config(self): - """Allow user to input docker proxy config.""" - - print("\nDocker Proxy:") - print("-------------\n") - print(textwrap.fill( - "Docker proxy is needed if host OAM network is behind a proxy.", - 80)) - print('') - while True: - user_input = input( - "Configure docker proxy [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "HTTP proxy (http://example.proxy:port): ") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - if is_valid_url(user_input): - self.docker_http_proxy = user_input - break - else: - print("Please enter a valid url") - continue - else: - self.docker_http_proxy = "" - break - - while True: - user_input = input( - "HTTPS proxy (https://example.proxy:port): ") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - if is_valid_url(user_input): - self.docker_https_proxy = user_input - break - else: - print("Please enter a valid url") - continue - else: - self.docker_https_proxy = "" - break - - if not self.docker_http_proxy and not self.docker_https_proxy: - print("At least one proxy required") - continue - else: - self.enable_docker_proxy = True - - while True: - # TODO: Current Docker version 18.03.1-ce utilizes go-lang - # net library for proxy setting. The go-lang net lib - # doesn't support CIDR notation until this commit: - # - # https://github.com/golang/net/commit/ - # c21de06aaf072cea07f3a65d6970e5c7d8b6cd6d - # - # After docker upgrades to a version that CIDR notation - # supported pre_set_no_proxy will be simplified to subnets - if self.system_mode == \ - sysinv_constants.SYSTEM_MODE_SIMPLEX: - pre_set_no_proxy = "localhost,127.0.0.1," + \ - str(self.controller_floating_address) + "," + \ - str(self.controller_address_0) + "," + \ - str(self.controller_address_1) + "," + \ - str(self.external_oam_address_0) - else: - pre_set_no_proxy = "localhost,127.0.0.1," + \ - str(self.controller_floating_address) + "," + \ - str(self.controller_address_0) + "," + \ - str(self.controller_address_1) + "," + \ - str(self.external_oam_floating_address) + "," + \ - str(self.external_oam_address_0) + "," + \ - str(self.external_oam_address_1) - - user_input = input( - "Additional NO proxy besides '" + - pre_set_no_proxy + - "'\n(Comma-separated addresses, " + - "wildcard/subnet not allowed)\n:") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - input_addr_list = user_input.split(",") - valid_address = True - for input_addr in input_addr_list: - if not is_valid_domain_or_ip(input_addr): - print("Input address '%s' is invalid" % - input_addr) - valid_address = False - break - if valid_address: - self.docker_no_proxy = pre_set_no_proxy + \ - "," + user_input - break - else: - continue - else: - self.docker_no_proxy = pre_set_no_proxy - break - break - elif user_input.lower() in ('n', ''): - self.enable_docker_proxy = False - break - else: - print("Invalid choice") - continue - - def input_docker_registry_config(self): - """Allow user to input docker registry config.""" - - print("\nDocker Registry:") - print("----------------\n") - print("Configure docker registries to pull images from.\n" - "Default registries are:\n" - "k8s.gcr.io, gcr.io, quay.io, docker.io\n" - ) - while True: - user_input = input( - "Use default docker registries [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'n': - # unexpected newline displayed if textwrap.fill with - # '\n' included - print("\nEach registry can be specified as one of the" - "following:\n" - " - domain (e.g. example.domain)\n" - " - domain with port (e.g. example.domain:5000)\n" - " - IPv4 address (e.g. 1.2.3.4)\n" - " - IPv4 address with port (e.g. 1.2.3.4:5000)\n" - ) - while True: - user_input = input( - "Use a unified registry replacing all " - "default registries [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - # Input a unified registry to avoid - # inputing the same registry repeatly - while True: - user_input = input( - "Enter a unified registry: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_k8s_registry = user_input - self.docker_gcr_registry = user_input - self.docker_quay_registry = user_input - self.docker_docker_registry = user_input - self.docker_use_default_registry = False - break - else: - print("Please enter a valid registry address") - continue - - # Only if a unified registry set, it could be - # an insecure registry - while True: - user_input = input( - "Is '" + self.docker_k8s_registry + - "' a secure registry (https) [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() in ('y', ''): - self.is_secure_registry = True - break - elif user_input.lower() == 'n': - self.is_secure_registry = False - break - else: - print("Invalid choice") - continue - break - - elif user_input.lower() == 'n': - # Input alternative registries separately - while True: - user_input = input( - "Alternative registry to k8s.gcr.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_k8s_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to gcr.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_gcr_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to quay.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_quay_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to docker.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_docker_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - if (self.docker_k8s_registry or - self.docker_gcr_registry or - self.docker_quay_registry or - self.docker_docker_registry): - self.docker_use_default_registry = False - break - else: - print("At least one registry is required") - continue - else: - print("Invalid choice") - continue - break - - elif user_input.lower() in ('y', ''): - self.docker_use_default_registry = True - break - else: - print("Invalid choice") - continue - - def input_authentication_config(self): - """Allow user to input authentication config and perform validation. - """ - - print("\nCloud Authentication:") - print("-------------------------------\n") - print(textwrap.fill( - "Configure a password for the Cloud admin user " - "The Password must have a minimum length of 7 character, " - "and conform to password complexity rules", 80)) - - password_input = "" - while True: - user_input = getpass.getpass("Create admin user password: ") - if user_input.lower() == 'q': - raise UserQuit - - password_input = user_input - if len(password_input) < 1: - print("Password cannot be empty") - continue - - user_input = getpass.getpass("Repeat admin user password: ") - if user_input.lower() == 'q': - raise UserQuit - - if user_input != password_input: - print("Password did not match") - continue - else: - print("\n") - self.admin_password = user_input - # the admin password will be validated - self.add_password_for_validation('ADMIN_PASSWORD', - self.admin_password) - if self.process_validation_passwords(console=True): - break - - def default_config(self): - """Use default configuration suitable for testing in virtual env.""" - - self.admin_password = "Li69nux*" - self.management_interface_configured = True - self.external_oam_interface_configured = True - self.default_pxeboot_config() - if not self.kubernetes: - self.nameserver_addresses = ["", "", ""] - - if utils.is_cpe(): - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX - - def input_config(self): - """Allow user to input configuration.""" - print("System Configuration") - print("====================") - print("Enter Q at any prompt to abort...\n") - - self.set_time() - self.set_timezone(self) - if utils.is_cpe(): - self.input_system_mode_config() - self.check_storage_config() - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - self.default_pxeboot_config() - self.input_aio_simplex_management_config() - else: - # An AIO system cannot function as a Distributed Cloud System - # Controller - if utils.get_system_type() != sysinv_constants.TIS_AIO_BUILD: - self.input_dc_selection() - self.input_pxeboot_config() - self.input_management_config() - if self.kubernetes: - self.input_cluster_host_config() - self.input_external_oam_config() - if self.kubernetes: - self.input_dns_config() - # Docker proxy is only used in kubernetes config - self.input_docker_proxy_config() - self.input_docker_registry_config() - self.input_authentication_config() - - def is_valid_management_multicast_subnet(self, ip_subnet): - """Determine whether the mgmt multicast subnet is valid.""" - # The multicast subnet must belong to the same Address Family - # as the management network - if ip_subnet.version != self.management_subnet.version: - print(textwrap.fill( - "Invalid network address - Management Multicast Subnet and " - " Network IP Families must be the same.", 80)) - return False - elif ip_subnet.size < 16: - print("Subnet too small - must have at least 16 addresses") - return False - elif ip_subnet.ip != ip_subnet.network: - print("Invalid network address") - return False - elif ip_subnet.version == 6 and ip_subnet.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - return False - elif not ip_subnet.is_multicast(): - print("Invalid network address - must be multicast") - return False - else: - return True - - def input_config_from_file(self, configfile, restore=False): - """Read configuration from answer or config file. - - WARNING: Any changes made here need to be reflected in the code - that translates region config to this format in regionconfig.py. - """ - if not os.path.isfile(configfile): - print("Specified answer or config file not found") - raise ConfigFail("Answer or Config file not found") - - config = configparser.RawConfigParser() - config_sections = [] - - try: - config.read(configfile) - config_sections = config.sections() - - self.system_mode = config.get('cSYSTEM', 'SYSTEM_MODE') - if config.has_option('cSYSTEM', 'DISTRIBUTED_CLOUD_ROLE'): - self.system_dc_role = \ - config.get('cSYSTEM', 'DISTRIBUTED_CLOUD_ROLE') - - if config.has_option('cMETA', 'CONFIG_UUID'): - self.config_uuid = config.get('cMETA', 'CONFIG_UUID') - - if config.has_option('cREGION', 'REGION_CONFIG'): - self.region_config = config.getboolean( - 'cREGION', 'REGION_CONFIG') - - if config.has_option('cREGION', 'REGION_SERVICES_CREATE'): - self.region_services_create = config.getboolean( - 'cREGION', 'REGION_SERVICES_CREATE') - - # Timezone configuration - if config.has_option('cSYSTEM', 'TIMEZONE'): - self.timezone = config.get('cSYSTEM', 'TIMEZONE') - - # Storage configuration - if (config.has_option('cSTOR', 'DATABASE_STORAGE') or - config.has_option('cSTOR', 'IMAGE_STORAGE') or - config.has_option('cSTOR', 'BACKUP_STORAGE') or - config.has_option('cSTOR', 'IMAGE_CONVERSIONS_VOLUME') or - config.has_option('cSTOR', 'SHARED_INSTANCE_STORAGE') or - config.has_option('cSTOR', 'CINDER_BACKEND') or - config.has_option('cSTOR', 'CINDER_DEVICE') or - config.has_option('cSTOR', 'CINDER_LVM_TYPE') or - config.has_option('cSTOR', 'CINDER_STORAGE')): - msg = "DATABASE_STORAGE, IMAGE_STORAGE, BACKUP_STORAGE, " + \ - "IMAGE_CONVERSIONS_VOLUME, SHARED_INSTANCE_STORAGE, " + \ - "CINDER_BACKEND, CINDER_DEVICE, CINDER_LVM_TYPE, " + \ - "CINDER_STORAGE " + \ - "are not valid entries in config file." - raise ConfigFail(msg) - - # PXEBoot network configuration - if config.has_option('cPXEBOOT', 'PXEBOOT_SUBNET'): - self.separate_pxeboot_network = True - self.pxeboot_subnet = IPNetwork(config.get( - 'cPXEBOOT', 'PXEBOOT_SUBNET')) - if config.has_option('cPXEBOOT', 'PXEBOOT_START_ADDRESS'): - self.pxeboot_start_address = IPAddress(config.get( - 'cPXEBOOT', 'PXEBOOT_START_ADDRESS')) - if config.has_option('cPXEBOOT', 'PXEBOOT_END_ADDRESS'): - self.pxeboot_end_address = IPAddress(config.get( - 'cPXEBOOT', 'PXEBOOT_END_ADDRESS')) - if not self.pxeboot_start_address and \ - not self.pxeboot_end_address: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - self.use_entire_pxeboot_subnet = True - else: - self.use_entire_pxeboot_subnet = False - self.controller_pxeboot_address_0 = IPAddress(config.get( - 'cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_0')) - self.controller_pxeboot_address_1 = IPAddress(config.get( - 'cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_1')) - self.controller_pxeboot_floating_address = IPAddress( - config.get('cPXEBOOT', - 'CONTROLLER_PXEBOOT_FLOATING_ADDRESS')) - else: - self.default_pxeboot_config() - # Allow this to be optional for backwards compatibility - if config.has_option('cPXEBOOT', - 'PXECONTROLLER_FLOATING_HOSTNAME'): - self.pxecontroller_floating_hostname = config.get( - 'cPXEBOOT', 'PXECONTROLLER_FLOATING_HOSTNAME') - - # Management network configuration - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX and \ - not self.subcloud_config(): - # For AIO-SX, only the management subnet is configurable - # (unless this is a subcloud). - if config.has_option('cMGMT', 'MANAGEMENT_SUBNET'): - management_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_SUBNET')) - else: - management_subnet = IPNetwork( - constants.DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4) - self.input_aio_simplex_management_config( - management_subnet=management_subnet) - else: - self.management_interface_name = config.get( - 'cMGMT', 'MANAGEMENT_INTERFACE_NAME') - self.management_interface = config.get( - 'cMGMT', 'MANAGEMENT_INTERFACE') - self.management_mtu = config.get( - 'cMGMT', 'MANAGEMENT_MTU') - self.management_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_SUBNET')) - if config.has_option('cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS'): - self.management_gateway_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS')) - else: - self.management_gateway_address = None - self.lag_management_interface = config.getboolean( - 'cMGMT', 'LAG_MANAGEMENT_INTERFACE') - if self.separate_pxeboot_network: - self.management_vlan = config.get('cMGMT', - 'MANAGEMENT_VLAN') - if self.lag_management_interface: - self.lag_management_interface_member0 = config.get( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_0') - self.lag_management_interface_member1 = config.get( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_1') - self.lag_management_interface_policy = config.get( - 'cMGMT', 'MANAGEMENT_BOND_POLICY') - self.controller_address_0 = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_0_ADDRESS')) - self.controller_address_1 = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_1_ADDRESS')) - self.controller_floating_address = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_FLOATING_ADDRESS')) - if config.has_option('cMGMT', 'NFS_MANAGEMENT_ADDRESS_1'): - self.nfs_management_address_1 = IPAddress(config.get( - 'cMGMT', 'NFS_MANAGEMENT_ADDRESS_1')) - else: - self.nfs_management_address_1 = '' - if config.has_option('cMGMT', 'NFS_MANAGEMENT_ADDRESS_2'): - self.nfs_management_address_2 = IPAddress(config.get( - 'cMGMT', 'NFS_MANAGEMENT_ADDRESS_2')) - else: - self.nfs_management_address_2 = '' - self.controller_floating_hostname = config.get( - 'cMGMT', 'CONTROLLER_FLOATING_HOSTNAME') - self.controller_hostname_prefix = config.get( - 'cMGMT', 'CONTROLLER_HOSTNAME_PREFIX') - self.oamcontroller_floating_hostname = config.get( - 'cMGMT', 'OAMCONTROLLER_FLOATING_HOSTNAME') - - if config.has_option('cMGMT', 'MANAGEMENT_MULTICAST_SUBNET'): - self.management_multicast_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_MULTICAST_SUBNET')) - else: - if self.management_subnet.version == 6: - # Management subnet is IPv6, so set the default value - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV6) - else: - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV4) - - self.management_interface_configured = True - if config.has_option('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION'): - self.dynamic_address_allocation = config.getboolean( - 'cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION') - else: - self.dynamic_address_allocation = True - if config.has_option('cMGMT', 'MANAGEMENT_START_ADDRESS'): - self.management_start_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_START_ADDRESS')) - if config.has_option('cMGMT', 'MANAGEMENT_END_ADDRESS'): - self.management_end_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_END_ADDRESS')) - if not self.management_start_address and \ - not self.management_end_address: - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - self.use_entire_mgmt_subnet = True - - # Cluster network configuration - if self.kubernetes: - if config.has_section('cCLUSTER'): - self.cluster_host_interface_name = config.get( - 'cCLUSTER', 'CLUSTER_INTERFACE_NAME') - self.cluster_host_interface = config.get( - 'cCLUSTER', 'CLUSTER_INTERFACE') - self.cluster_host_mtu = config.get( - 'cCLUSTER', 'CLUSTER_MTU') - self.cluster_host_vlan = '' - if config.has_option('cCLUSTER', 'CLUSTER_VLAN'): - cvalue = config.get('cCLUSTER', 'CLUSTER_VLAN') - if cvalue != 'NC': - self.cluster_host_vlan = cvalue - self.lag_cluster_host_interface = config.getboolean( - 'cCLUSTER', 'LAG_CLUSTER_INTERFACE') - if self.lag_cluster_host_interface: - self.lag_cluster_host_interface_member0 = config.get( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_0') - self.lag_cluster_host_interface_member1 = config.get( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_1') - self.lag_cluster_host_interface_policy = config.get( - 'cCLUSTER', 'CLUSTER_BOND_POLICY') - self.cluster_host_subnet = IPNetwork(config.get( - 'cCLUSTER', 'CLUSTER_SUBNET')) - else: - self.cluster_host_interface_name = \ - self.management_interface_name - self.cluster_host_interface = self.management_interface - self.cluster_host_vlan = self.management_vlan - self.cluster_host_interface_configured = True - - # External OAM network configuration - self.external_oam_interface_name = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_INTERFACE_NAME') - self.external_oam_interface = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_INTERFACE') - self.external_oam_mtu = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_MTU') - self.external_oam_vlan = '' - if config.has_option('cEXT_OAM', 'EXTERNAL_OAM_VLAN'): - cvalue = config.get('cEXT_OAM', 'EXTERNAL_OAM_VLAN') - if cvalue != 'NC': - self.external_oam_vlan = cvalue - self.external_oam_subnet = IPNetwork(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_SUBNET')) - self.lag_external_oam_interface = config.getboolean( - 'cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE') - if self.lag_external_oam_interface: - self.lag_external_oam_interface_member0 = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_0') - self.lag_external_oam_interface_member1 = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_1') - self.lag_external_oam_interface_policy = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_POLICY') - else: - self.lag_external_oam_interface_member0 = None - self.lag_external_oam_interface_member1 = None - self.lag_external_oam_interface_policy = None - self.lag_external_oam_interface_txhash = None - - if config.has_option('cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS'): - self.external_oam_gateway_address = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS')) - else: - self.external_oam_gateway_address = None - self.external_oam_floating_address = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS')) - self.external_oam_address_0 = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS')) - self.external_oam_address_1 = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS')) - - self.external_oam_interface_configured = True - - # DNS configuration - if self.kubernetes: - if config.has_section('cDNS'): - self.nameserver_addresses = ["", "", ""] - for x in range(0, len(self.nameserver_addresses)): - if config.has_option('cDNS', - 'NAMESERVER_' + str(x + 1)): - cvalue = config.get('cDNS', - 'NAMESERVER_' + str(x + 1)) - if cvalue != "NC" and cvalue != "": - self.nameserver_addresses[x] = \ - IPAddress(cvalue) - - # Docker Proxy Configuration - if config.has_section('cDOCKER_PROXY'): - self.enable_docker_proxy = True - if config.has_option('cDOCKER_PROXY', - 'DOCKER_HTTP_PROXY'): - self.docker_http_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_HTTP_PROXY') - if config.has_option('cDOCKER_PROXY', - 'DOCKER_HTTPS_PROXY'): - self.docker_https_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_HTTPS_PROXY') - if config.has_option('cDOCKER_PROXY', - 'DOCKER_NO_PROXY'): - self.docker_no_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_NO_PROXY') - - # Docker Registry Configuration - if config.has_section('cDOCKER_REGISTRY'): - self.docker_use_default_registry = False - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_K8S_REGISTRY'): - self.docker_k8s_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_GCR_REGISTRY'): - self.docker_gcr_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_QUAY_REGISTRY'): - self.docker_quay_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_DOCKER_REGISTRY'): - self.docker_docker_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'IS_SECURE_REGISTRY'): - self.is_secure_registry = config.getboolean( - 'cDOCKER_REGISTRY', 'IS_SECURE_REGISTRY') - else: - self.is_secure_registry = True - - # SDN Network configuration - if config.has_option('cSDN', 'ENABLE_SDN'): - raise ConfigFail("The option ENABLE_SDN is no longer " - "supported.") - - # Authentication configuration - if config.has_section('cAUTHENTICATION'): - if config.has_option('cAUTHENTICATION', 'ADMIN_PASSWORD'): - self.admin_password = config.get( - 'cAUTHENTICATION', 'ADMIN_PASSWORD') - - if self.admin_password == "" and not restore: - print("Admin password must be set in answer file") - raise ConfigFail("Admin password not set in answer file") - # the admin password will be validated - self.add_password_for_validation('ADMIN_PASSWORD', - self.admin_password) - - if config.has_option('cUSERS', 'SYSADMIN_SIG'): - raise ConfigFail("The option SYSADMIN_SIG is " - "no longer supported.") - - # Licensing configuration - if config.has_option('cLICENSING', 'LICENSE_FILE'): - raise ConfigFail("The option LICENSE_FILE is " - "no longer supported") - - # Security configuration - if config.has_option('cSECURITY', 'CONFIG_SYSADMIN_PW_AGE'): - raise ConfigFail("The option CONFIG_SYSADMIN_PW_AGE is " - "no longer supported.") - if config.has_option('cSECURITY', 'ENABLE_HTTPS'): - raise ConfigFail("The option ENABLE_HTTPS is " - "no longer supported.") - if config.has_option('cSECURITY', 'FIREWALL_RULES_FILE'): - raise ConfigFail("The option FIREWALL_RULES_FILE is " - "no longer supported") - - # Region configuration - if self.region_config: - self.region_1_name = config.get( - 'cREGION', 'REGION_1_NAME') - self.region_2_name = config.get( - 'cREGION', 'REGION_2_NAME') - self.admin_username = config.get( - 'cREGION', 'ADMIN_USER_NAME') - if config.has_option('cREGION', 'ADMIN_USER_DOMAIN'): - self.admin_user_domain = config.get( - 'cREGION', 'ADMIN_USER_DOMAIN') - if config.has_option('cREGION', 'ADMIN_PROJECT_NAME'): - self.admin_project_name = config.get( - 'cREGION', 'ADMIN_PROJECT_NAME') - else: - self.admin_project_name = config.get( - 'cREGION', 'ADMIN_TENANT_NAME') - if config.has_option('cREGION', 'ADMIN_PROJECT_DOMAIN'): - self.admin_project_domain = config.get( - 'cREGION', 'ADMIN_PROJECT_DOMAIN') - if config.has_option('cREGION', 'SERVICE_PROJECT_NAME'): - self.service_project_name = config.get( - 'cREGION', 'SERVICE_PROJECT_NAME') - else: - self.service_project_name = config.get( - 'cREGION', 'SERVICE_TENANT_NAME') - if config.has_option('cREGION', 'USER_DOMAIN_NAME'): - self.service_user_domain = config.get( - 'cREGION', 'USER_DOMAIN_NAME') - if config.has_option('cREGION', 'PROJECT_DOMAIN_NAME'): - self.service_project_domain = config.get( - 'cREGION', 'PROJECT_DOMAIN_NAME') - self.keystone_auth_uri = config.get( - 'cREGION', 'KEYSTONE_AUTH_URI') - self.keystone_identity_uri = config.get( - 'cREGION', 'KEYSTONE_IDENTITY_URI') - self.keystone_admin_uri = config.get( - 'cREGION', 'KEYSTONE_ADMIN_URI') - self.keystone_internal_uri = config.get( - 'cREGION', 'KEYSTONE_INTERNAL_URI') - self.keystone_public_uri = config.get( - 'cREGION', 'KEYSTONE_PUBLIC_URI') - self.keystone_service_name = config.get( - 'cREGION', 'KEYSTONE_SERVICE_NAME') - self.keystone_service_type = config.get( - 'cREGION', 'KEYSTONE_SERVICE_TYPE') - if config.has_option('cREGION', 'LDAP_REGION_NAME'): - self.ldap_region_name = config.get( - 'cREGION', 'LDAP_REGION_NAME') - if config.has_option('cREGION', 'LDAP_SERVICE_NAME'): - self.ldap_service_name = config.get( - 'cREGION', 'LDAP_SERVICE_NAME') - if config.has_option('cREGION', 'LDAP_SERVICE_URI'): - self.ldap_service_uri = config.get( - 'cREGION', 'LDAP_SERVICE_URI') - self.patching_ks_user_name = config.get( - 'cREGION', 'PATCHING_USER_NAME') - self.patching_ks_password = config.get( - 'cREGION', 'PATCHING_PASSWORD') - self.add_password_for_validation('PATCHING_PASSWORD', - self.patching_ks_password) - self.sysinv_ks_user_name = config.get( - 'cREGION', 'SYSINV_USER_NAME') - self.sysinv_ks_password = config.get( - 'cREGION', 'SYSINV_PASSWORD') - self.add_password_for_validation('SYSINV_PASSWORD', - self.sysinv_ks_password) - self.sysinv_service_name = config.get( - 'cREGION', 'SYSINV_SERVICE_NAME') - self.sysinv_service_type = config.get( - 'cREGION', 'SYSINV_SERVICE_TYPE') - self.mtce_ks_user_name = config.get( - 'cREGION', 'MTCE_USER_NAME') - self.mtce_ks_password = config.get( - 'cREGION', 'MTCE_PASSWORD') - self.add_password_for_validation('MTCE_PASSWORD', - self.mtce_ks_password) - - self.nfv_ks_user_name = config.get( - 'cREGION', 'NFV_USER_NAME') - self.nfv_ks_password = config.get( - 'cREGION', 'NFV_PASSWORD') - self.add_password_for_validation('NFV_PASSWORD', - self.nfv_ks_password) - self.fm_ks_user_name = config.get( - 'cREGION', 'FM_USER_NAME') - self.fm_ks_password = config.get( - 'cREGION', 'FM_PASSWORD') - self.add_password_for_validation('FM_PASSWORD', - self.fm_ks_password) - - self.barbican_ks_user_name = config.get( - 'cREGION', 'BARBICAN_USER_NAME') - self.barbican_ks_password = config.get( - 'cREGION', 'BARBICAN_PASSWORD') - self.add_password_for_validation('BARBICAN_PASSWORD', - self.barbican_ks_password) - - self.shared_services.append(self.keystone_service_type) - - if self.subcloud_config(): - self.system_controller_subnet = IPNetwork(config.get( - 'cREGION', 'SYSTEM_CONTROLLER_SUBNET')) - self.system_controller_floating_ip = config.get( - 'cREGION', 'SYSTEM_CONTROLLER_FLOATING_ADDRESS') - - except Exception: - print("Error parsing answer file") - raise - - return config_sections - - def display_config(self): - """Display configuration that will be applied.""" - print("\nThe following configuration will be applied:") - - print("\nSystem Configuration") - print("--------------------") - print("Time Zone: " + str(self.timezone)) - print("System mode: %s" % self.system_mode) - if self.system_type != sysinv_constants.TIS_AIO_BUILD: - dc_role_true = "no" - if (self.system_dc_role == - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - dc_role_true = "yes" - print("Distributed Cloud System Controller: %s" % dc_role_true) - - print("\nPXEBoot Network Configuration") - print("-----------------------------") - if not self.separate_pxeboot_network: - print("Separate PXEBoot network not configured") - else: - print("PXEBoot subnet: " + str(self.pxeboot_subnet.cidr)) - print("PXEBoot floating address: " + - str(self.controller_pxeboot_floating_address)) - print("Controller 0 PXEBoot address: " + - str(self.controller_pxeboot_address_0)) - print("Controller 1 PXEBoot address: " + - str(self.controller_pxeboot_address_1)) - if not self.use_entire_pxeboot_subnet: - print("PXEBoot start address: " + - str(self.pxeboot_start_address)) - print("PXEBoot end address: " + str(self.pxeboot_end_address)) - print("PXEBoot Controller floating hostname: " + - str(self.pxecontroller_floating_hostname)) - - print("\nManagement Network Configuration") - print("--------------------------------") - print("Management interface name: " + self.management_interface_name) - print("Management interface: " + self.management_interface) - if self.management_vlan: - print("Management vlan: " + self.management_vlan) - print("Management interface MTU: " + self.management_mtu) - if self.lag_management_interface: - print("Management ae member 0: " + - self.lag_management_interface_member0) - print("Management ae member 1: " + - self.lag_management_interface_member1) - print("Management ae policy : " + - self.lag_management_interface_policy) - print("Management subnet: " + str(self.management_subnet.cidr)) - if self.management_gateway_address: - print("Management gateway address: " + - str(self.management_gateway_address)) - print("Controller floating address: " + - str(self.controller_floating_address)) - print("Controller 0 address: " + str(self.controller_address_0)) - print("Controller 1 address: " + str(self.controller_address_1)) - print("NFS Management Address 1: " + - str(self.nfs_management_address_1)) - print("NFS Management Address 2: " + - str(self.nfs_management_address_2)) - print("Controller floating hostname: " + - str(self.controller_floating_hostname)) - print("Controller hostname prefix: " + self.controller_hostname_prefix) - print("OAM Controller floating hostname: " + - str(self.oamcontroller_floating_hostname)) - if not self.use_entire_mgmt_subnet: - print("Management start address: " + - str(self.management_start_address)) - print("Management end address: " + - str(self.management_end_address)) - if self.dynamic_address_allocation: - print("Dynamic IP address allocation is selected") - print("Management multicast subnet: " + - str(self.management_multicast_subnet)) - - if self.kubernetes: - print("\nKubernetes Cluster Network Configuration") - print("----------------------------------------") - print("Cluster pod network subnet: " + - str(self.cluster_pod_subnet.cidr)) - print("Cluster service network subnet: " + - str(self.cluster_service_subnet.cidr)) - print("Cluster host interface name: " + - self.cluster_host_interface_name) - print("Cluster host interface: " + self.cluster_host_interface) - if self.cluster_host_vlan: - print("Cluster host vlan: " + self.cluster_host_vlan) - print("Cluster host interface MTU: " + self.cluster_host_mtu) - if self.lag_cluster_host_interface: - print("Cluster host ae member 0: " + - self.lag_cluster_host_interface_member0) - print("Cluster host ae member 1: " + - self.lag_cluster_host_interface_member1) - print("Cluster host ae policy : " + - self.lag_cluster_host_interface_policy) - print("Cluster host subnet: " + - str(self.cluster_host_subnet.cidr)) - - print("\nExternal OAM Network Configuration") - print("----------------------------------") - print("External OAM interface name: " + - self.external_oam_interface_name) - print("External OAM interface: " + self.external_oam_interface) - if self.external_oam_vlan: - print("External OAM vlan: " + self.external_oam_vlan) - print("External OAM interface MTU: " + self.external_oam_mtu) - if self.lag_external_oam_interface: - print("External OAM ae member 0: " + - self.lag_external_oam_interface_member0) - print("External OAM ae member 1: " + - self.lag_external_oam_interface_member1) - print("External OAM ae policy : " + - self.lag_external_oam_interface_policy) - print("External OAM subnet: " + str(self.external_oam_subnet)) - if self.external_oam_gateway_address: - print("External OAM gateway address: " + - str(self.external_oam_gateway_address)) - if self.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - print("External OAM floating address: " + - str(self.external_oam_floating_address)) - print("External OAM 0 address: " + - str(self.external_oam_address_0)) - print("External OAM 1 address: " + - str(self.external_oam_address_1)) - else: - print("External OAM address: " + str(self.external_oam_address_0)) - - if self.kubernetes: - print("\nDNS Configuration") - print("-----------------") - dns_config = False - for x in range(0, len(self.nameserver_addresses)): - if self.nameserver_addresses[x]: - print("Nameserver " + str(x + 1) + ": " + - str(self.nameserver_addresses[x])) - dns_config = True - if not dns_config: - print("External DNS servers not configured") - if self.enable_docker_proxy: - print("\nDocker Proxy Configuration") - print("--------------------------") - if self.docker_http_proxy: - print("Docker HTTP proxy: " + self.docker_http_proxy) - if self.docker_https_proxy: - print("Docker HTTPS proxy: " + self.docker_https_proxy) - if self.docker_no_proxy: - print("Docker NO proxy: " + self.docker_no_proxy) - if not self.docker_use_default_registry: - print("\nDocker Registry Configuration") - print("-----------------------------") - if self.docker_k8s_registry: - print("Alternative registry to k8s.gcr.io: " + - self.docker_k8s_registry) - if self.docker_gcr_registry: - print("Alternative registry to gcr.io: " + - self.docker_gcr_registry) - if self.docker_quay_registry: - print("Alternative registry to quay.io: " + - self.docker_quay_registry) - if self.docker_docker_registry: - print("Alternative registry to docker.io: " + - self.docker_docker_registry) - print("Is registries secure: " + - str(self.is_secure_registry)) - - if self.region_config: - print("\nRegion Configuration") - print("--------------------") - print("Region 1 name: " + self.region_1_name) - print("Region 2 name: " + self.region_2_name) - print("Admin user name: " + self.admin_username) - print("Admin user domain: " + self.admin_user_domain) - print("Admin project name: " + self.admin_project_name) - print("Admin project domain: " + self.admin_project_domain) - print("Service project name: " + self.service_project_name) - print("Service user domain: " + self.service_user_domain) - print("Service project domain: " + self.service_project_domain) - print("Keystone auth URI: " + self.keystone_auth_uri) - print("Keystone identity URI: " + self.keystone_identity_uri) - print("Keystone admin URI: " + self.keystone_admin_uri) - print("Keystone internal URI: " + self.keystone_internal_uri) - print("Keystone public URI: " + self.keystone_public_uri) - print("Keystone service name: " + self.keystone_service_name) - print("Keystone service type: " + self.keystone_service_type) - print("LDAP service name: " + self.ldap_service_name) - print("LDAP region: " + self.ldap_region_name) - print("LDAP service URI:" + self.ldap_service_uri) - print("Patching user name: " + self.patching_ks_user_name) - print("Sysinv user name: " + self.sysinv_ks_user_name) - print("Sysinv service name: " + self.sysinv_service_name) - print("Sysinv service type: " + self.sysinv_service_type) - - if self.subcloud_config(): - print("\nSubcloud Configuration") - print("----------------------") - print("System controller subnet: " + - str(self.system_controller_subnet.cidr)) - print("System controller floating ip: " + - str(self.system_controller_floating_ip)) - - def write_config_file(self): - """Write configuration to a text file for later reference.""" - try: - os.makedirs(constants.CONFIG_WORKDIR, stat.S_IRWXU | stat.S_IRGRP | - stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) - except OSError as exc: - if exc.errno == errno.EEXIST and os.path.isdir( - constants.CONFIG_WORKDIR): - pass - else: - LOG.error("Failed to create config directory: %s", - constants.CONFIG_WORKDIR) - raise ConfigFail("Failed to write configuration file") - - try: - with open(constants.CGCS_CONFIG_FILE, 'w') as f: - # System configuration - f.write("[cSYSTEM]\n") - f.write("# System Configuration\n") - f.write("SYSTEM_MODE=" + str(self.system_mode) + "\n") - if self.system_dc_role is not None: - f.write("DISTRIBUTED_CLOUD_ROLE=" + - str(self.system_dc_role) + "\n") - # Time Zone configuration - f.write("TIMEZONE=" + str(self.timezone) + "\n") - - # PXEBoot network configuration - f.write("\n[cPXEBOOT]") - f.write("\n# PXEBoot Network Support Configuration\n") - if self.separate_pxeboot_network: - f.write("PXEBOOT_SUBNET=" + - str(self.pxeboot_subnet.cidr) + "\n") - f.write("CONTROLLER_PXEBOOT_FLOATING_ADDRESS=" + - str(self.controller_pxeboot_floating_address) + - "\n") - f.write("CONTROLLER_PXEBOOT_ADDRESS_0=" + - str(self.controller_pxeboot_address_0) + "\n") - f.write("CONTROLLER_PXEBOOT_ADDRESS_1=" + - str(self.controller_pxeboot_address_1) + "\n") - f.write("PXECONTROLLER_FLOATING_HOSTNAME=" + - str(self.pxecontroller_floating_hostname) + "\n") - - # Management network configuration - f.write("\n[cMGMT]") - f.write("\n# Management Network Configuration\n") - f.write("MANAGEMENT_INTERFACE_NAME=" + - self.management_interface_name + "\n") - f.write("MANAGEMENT_INTERFACE=" + self.management_interface + - "\n") - if self.separate_pxeboot_network: - f.write("MANAGEMENT_VLAN=" + self.management_vlan + "\n") - f.write("MANAGEMENT_MTU=" + self.management_mtu + "\n") - f.write("MANAGEMENT_SUBNET=" + - str(self.management_subnet.cidr) + "\n") - if self.management_gateway_address: - f.write("MANAGEMENT_GATEWAY_ADDRESS=" + - str(self.management_gateway_address) + "\n") - if self.lag_management_interface: - f.write("LAG_MANAGEMENT_INTERFACE=yes\n") - f.write("MANAGEMENT_BOND_MEMBER_0=" + - str(self.lag_management_interface_member0) + "\n") - f.write("MANAGEMENT_BOND_MEMBER_1=" + - str(self.lag_management_interface_member1) + "\n") - f.write("MANAGEMENT_BOND_POLICY=" + - str(self.lag_management_interface_policy) + "\n") - else: - f.write("LAG_MANAGEMENT_INTERFACE=no\n") - f.write("CONTROLLER_FLOATING_ADDRESS=" + - str(self.controller_floating_address) + "\n") - f.write("CONTROLLER_0_ADDRESS=" + - str(self.controller_address_0) + "\n") - f.write("CONTROLLER_1_ADDRESS=" + - str(self.controller_address_1) + "\n") - f.write("NFS_MANAGEMENT_ADDRESS_1=" + - str(self.nfs_management_address_1) + "\n") - f.write("NFS_MANAGEMENT_ADDRESS_2=" + - str(self.nfs_management_address_2) + "\n") - f.write("CONTROLLER_FLOATING_HOSTNAME=" + - str(self.controller_floating_hostname) + "\n") - f.write("CONTROLLER_HOSTNAME_PREFIX=" + - self.controller_hostname_prefix + "\n") - f.write("OAMCONTROLLER_FLOATING_HOSTNAME=" + - str(self.oamcontroller_floating_hostname) + "\n") - if self.dynamic_address_allocation: - f.write("DYNAMIC_ADDRESS_ALLOCATION=yes\n") - else: - f.write("DYNAMIC_ADDRESS_ALLOCATION=no\n") - if self.region_config or not self.use_entire_mgmt_subnet: - f.write("MANAGEMENT_START_ADDRESS=" + - str(self.management_start_address) + "\n") - f.write("MANAGEMENT_END_ADDRESS=" + - str(self.management_end_address) + "\n") - f.write("MANAGEMENT_MULTICAST_SUBNET=" + - str(self.management_multicast_subnet) + "\n") - - # Cluster host network configuration - if self.kubernetes: - f.write("\n[cCLUSTER]") - f.write("\n# Cluster Host Network Configuration\n") - f.write("CLUSTER_INTERFACE_NAME=" - + self.cluster_host_interface_name + "\n") - f.write("CLUSTER_INTERFACE=" - + self.cluster_host_interface + "\n") - if self.cluster_host_vlan: - f.write("CLUSTER_VLAN=" - + self.cluster_host_vlan + "\n") - else: - f.write("CLUSTER_VLAN=NC\n") - f.write("CLUSTER_MTU=" - + self.cluster_host_mtu + "\n") - f.write("CLUSTER_SUBNET=" + - str(self.cluster_host_subnet.cidr) + "\n") - if self.lag_cluster_host_interface: - f.write("LAG_CLUSTER_INTERFACE=yes\n") - f.write("CLUSTER_BOND_MEMBER_0=" + - str(self.lag_cluster_host_interface_member0) - + "\n") - f.write("CLUSTER_BOND_MEMBER_1=" + - str(self.lag_cluster_host_interface_member1) - + "\n") - f.write("CLUSTER_BOND_POLICY=" + - str(self.lag_cluster_host_interface_policy) - + "\n") - else: - f.write("LAG_CLUSTER_INTERFACE=no\n") - - # External OAM network configuration - f.write("\n[cEXT_OAM]") - f.write("\n# External OAM Network Configuration\n") - f.write("EXTERNAL_OAM_INTERFACE_NAME=" + - self.external_oam_interface_name + "\n") - f.write("EXTERNAL_OAM_INTERFACE=" + - self.external_oam_interface + "\n") - if self.external_oam_vlan: - f.write("EXTERNAL_OAM_VLAN=" - + self.external_oam_vlan + "\n") - else: - f.write("EXTERNAL_OAM_VLAN=NC\n") - f.write("EXTERNAL_OAM_MTU=" + - self.external_oam_mtu + "\n") - if self.lag_external_oam_interface: - f.write("LAG_EXTERNAL_OAM_INTERFACE=yes\n") - f.write("EXTERNAL_OAM_BOND_MEMBER_0=" + - str(self.lag_external_oam_interface_member0) + - "\n") - f.write("EXTERNAL_OAM_BOND_MEMBER_1=" + - str(self.lag_external_oam_interface_member1) + - "\n") - f.write("EXTERNAL_OAM_BOND_POLICY=" + - str(self.lag_external_oam_interface_policy) + - "\n") - else: - f.write("LAG_EXTERNAL_OAM_INTERFACE=no\n") - f.write("EXTERNAL_OAM_SUBNET=" + - str(self.external_oam_subnet) + "\n") - if self.external_oam_gateway_address: - f.write("EXTERNAL_OAM_GATEWAY_ADDRESS=" + - str(self.external_oam_gateway_address) + "\n") - f.write("EXTERNAL_OAM_FLOATING_ADDRESS=" + - str(self.external_oam_floating_address) + "\n") - f.write("EXTERNAL_OAM_0_ADDRESS=" + - str(self.external_oam_address_0) + "\n") - f.write("EXTERNAL_OAM_1_ADDRESS=" + - str(self.external_oam_address_1) + "\n") - - if self.kubernetes: - # DNS configuration - f.write("\n[cDNS]") - f.write("\n# DNS Configuration\n") - for x in range(0, len(self.nameserver_addresses)): - if self.nameserver_addresses[x]: - f.write("NAMESERVER_" + str(x + 1) + "=" + - str(self.nameserver_addresses[x]) + "\n") - else: - f.write("NAMESERVER_" + str(x + 1) + "=NC" + "\n") - - # Docker proxy configuration - if self.enable_docker_proxy: - f.write("\n[cDOCKER_PROXY]") - f.write("\n# Docker Proxy Configuration\n") - if self.docker_http_proxy: - f.write( - "DOCKER_HTTP_PROXY=" + - str(self.docker_http_proxy) + "\n") - if self.docker_https_proxy: - f.write( - "DOCKER_HTTPS_PROXY=" + - str(self.docker_https_proxy) + "\n") - if self.docker_no_proxy: - f.write( - "DOCKER_NO_PROXY=" + - str(self.docker_no_proxy) + "\n") - - # Docker registry configuration - if not self.docker_use_default_registry: - f.write("\n[cDOCKER_REGISTRY]") - f.write("\n# Docker Registry Configuration\n") - if self.docker_k8s_registry: - f.write( - "DOCKER_K8S_REGISTRY=" + - str(self.docker_k8s_registry) + "\n") - if self.docker_gcr_registry: - f.write( - "DOCKER_GCR_REGISTRY=" + - str(self.docker_gcr_registry) + "\n") - if self.docker_quay_registry: - f.write( - "DOCKER_QUAY_REGISTRY=" + - str(self.docker_quay_registry) + "\n") - if self.docker_docker_registry: - f.write( - "DOCKER_DOCKER_REGISTRY=" + - str(self.docker_docker_registry) + "\n") - f.write( - "IS_SECURE_REGISTRY=" + - str(self.is_secure_registry) + "\n") - - # Security configuration - f.write("\n[cSECURITY]") - - # Region configuration - f.write("\n[cREGION]") - f.write("\n# Region Configuration\n") - f.write("REGION_CONFIG=" + str(self.region_config) + "\n") - if self.region_config: - f.write("REGION_1_NAME=%s\n" % - self.region_1_name) - f.write("REGION_2_NAME=%s\n" % - self.region_2_name) - f.write("ADMIN_USER_NAME=%s\n" % - self.admin_username) - f.write("ADMIN_USER_DOMAIN=%s\n" % - self.admin_user_domain) - f.write("ADMIN_PROJECT_NAME=%s\n" % - self.admin_project_name) - f.write("ADMIN_PROJECT_DOMAIN=%s\n" % - self.admin_project_domain) - f.write("SERVICE_PROJECT_NAME=%s\n" % - self.service_project_name) - f.write("SERVICE_USER_DOMAIN=%s\n" % - self.service_user_domain) - f.write("SERVICE_PROJECT_DOMAIN=%s\n" % - self.service_project_domain) - f.write("KEYSTONE_AUTH_URI=%s\n" % - self.keystone_auth_uri) - f.write("KEYSTONE_IDENTITY_URI=%s\n" % - self.keystone_identity_uri) - f.write("KEYSTONE_ADMIN_URI=%s\n" % - self.keystone_admin_uri) - f.write("KEYSTONE_INTERNAL_URI=%s\n" % - self.keystone_internal_uri) - f.write("KEYSTONE_PUBLIC_URI=%s\n" % - self.keystone_public_uri) - f.write("KEYSTONE_SERVICE_NAME=%s\n" % - self.keystone_service_name) - f.write("KEYSTONE_SERVICE_TYPE=%s\n" % - self.keystone_service_type) - if self.ldap_service_name: - f.write("LDAP_SERVICE_NAME=%s\n" % - self.ldap_service_name) - if self.ldap_region_name: - f.write("LDAP_REGION_NAME=%s\n" % - self.ldap_region_name) - if self.ldap_service_uri: - f.write("LDAP_SERVICE_URI=%s\n" % - self.ldap_service_uri) - f.write("PATCHING_USER_NAME=%s\n" % - self.patching_ks_user_name) - f.write("PATCHING_PASSWORD=%s\n" % - self.patching_ks_password) - f.write("SYSINV_USER_NAME=%s\n" % - self.sysinv_ks_user_name) - f.write("SYSINV_PASSWORD=%s\n" % - self.sysinv_ks_password) - f.write("SYSINV_SERVICE_NAME=%s\n" % - self.sysinv_service_name) - f.write("SYSINV_SERVICE_TYPE=%s\n" % - self.sysinv_service_type) - f.write("NFV_USER_NAME=%s\n" % - self.nfv_ks_user_name) - f.write("NFV_PASSWORD=%s\n" % - self.nfv_ks_password) - f.write("MTCE_USER_NAME=%s\n" % - self.mtce_ks_user_name) - f.write("MTCE_PASSWORD=%s\n" % - self.mtce_ks_password) - f.write("FM_USER_NAME=%s\n" % - self.fm_ks_user_name) - f.write("FM_PASSWORD=%s\n" % - self.fm_ks_password) - f.write("BARBICAN_USER_NAME=%s\n" % - self.barbican_ks_user_name) - f.write("BARBICAN_PASSWORD=%s\n" % - self.barbican_ks_password) - - # Subcloud configuration - if self.subcloud_config(): - f.write("SUBCLOUD_CONFIG=%s\n" % - str(self.subcloud_config())) - f.write("SYSTEM_CONTROLLER_SUBNET=%s\n" % - str(self.system_controller_subnet)) - f.write("SYSTEM_CONTROLLER_FLOATING_ADDRESS=%s\n" % - str(self.system_controller_floating_ip)) - - except IOError: - LOG.error("Failed to open file: %s", constants.CGCS_CONFIG_FILE) - raise ConfigFail("Failed to write configuration file") - - def setup_pxeboot_files(self): - """Create links for default pxeboot configuration files""" - try: - if self.dynamic_address_allocation: - default_pxelinux = "/pxeboot/pxelinux.cfg.files/default" - efi_grub_cfg = "/pxeboot/pxelinux.cfg.files/grub.cfg" - else: - default_pxelinux = "/pxeboot/pxelinux.cfg.files/default.static" - efi_grub_cfg = "/pxeboot/pxelinux.cfg.files/grub.cfg.static" - subprocess.check_call(["ln", "-s", - default_pxelinux, - "/pxeboot/pxelinux.cfg/default"]) - subprocess.check_call(["ln", "-s", - efi_grub_cfg, - "/pxeboot/pxelinux.cfg/grub.cfg"]) - except subprocess.CalledProcessError: - LOG.error("Failed to create pxelinux.cfg/default or " - "grub.cfg symlink") - raise ConfigFail("Failed to persist config files") - - def verify_branding(self): - """ Verify the constraints for custom branding procedure """ - found = False - for f in os.listdir('/opt/branding'): - if f == 'applied': - continue - if not f.endswith('.tgz'): - raise ConfigFail('/opt/branding/%s is not a valid branding ' - 'file name, refer to the branding section ' - 'of the documentation' % f) - else: - if found: - raise ConfigFail( - 'Only one branding tarball is permitted in /opt/' - 'branding, refer to the branding section of the ' - 'documentation') - found = True - - def persist_local_config(self): - utils.persist_config() - - if os.path.isdir('/opt/banner'): - utils.apply_banner_customization() - - def finalize_controller_config(self): - - # restart maintenance to pick up configuration changes - utils.mtce_restart() - - self.setup_pxeboot_files() - - # pass control over to service management (SM) - utils.mark_config_complete() - - def wait_service_enable(self): - # wait for the following service groups to go active - services = [ - 'oam-services', - 'controller-services', - 'cloud-services', - 'patching-services', - 'directory-services', - 'web-services', - 'vim-services', - ] - - if self.system_dc_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - services.append('distributed-cloud-services') - - count = len(services) - egrep = '"^(%s)[[:space:]]*active[[:space:]]*active"' % \ - '|'.join(services) - cmd = 'test $(sm-dump | grep -E %s | wc -l) -eq %d' % (egrep, count) - - interval = 10 - for _ in range(0, constants.SERVICE_ENABLE_TIMEOUT, interval): - try: - subprocess.check_call(cmd, shell=True, - stderr=subprocess.STDOUT) - return - except subprocess.CalledProcessError: - pass - time.sleep(interval) - else: - raise ConfigFail('Timeout waiting for service enable') - - def store_admin_password(self): - """Store the supplied admin password in the temporary keyring vault""" - os.environ["XDG_DATA_HOME"] = "/tmp" - keyring.set_password("CGCS", self.admin_username, self.admin_password) - del os.environ["XDG_DATA_HOME"] - - def create_bootstrap_config(self): - self.store_admin_password() - if self.region_config: - self._store_service_password() - utils.create_static_config() - - def apply_bootstrap_manifest(self): - filename = None - try: - utils.apply_manifest(self.controller_address_0, - sysinv_constants.CONTROLLER, - 'bootstrap', - constants.HIERADATA_WORKDIR, - runtime_filename=filename) - except Exception as e: - LOG.exception(e) - raise ConfigFail( - 'Failed to apply bootstrap manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - def apply_controller_manifest(self): - try: - utils.apply_manifest(self.controller_address_0, - sysinv_constants.CONTROLLER, - 'controller', - constants.HIERADATA_PERMDIR) - except Exception as e: - LOG.exception(e) - raise ConfigFail( - 'Failed to apply controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - def add_password_for_validation(self, key, password): - """Add the config key and the password to be validated """ - if key and password: - for idx, stanza in enumerate(self.openstack_passwords): - if key in stanza: - # this password was previously added for validation, - # simply update the password value - self.openstack_passwords[idx][key] = password - return - self.openstack_passwords.append({key: password}) - - def process_validation_passwords(self, console=False): - """Validate the list of openstack passwords """ - if (self.os_password_rules_file and - not os.path.exists(self.os_password_rules_file)): - msg = ("Password rules file could not be found(%s) " - "Password rules cannot be applied" % - self.os_password_rules_file) - LOG.error(msg) - raise ConfigFail("Failed to apply Openstack password rules") - - if len(self.openstack_passwords) == 0: - # nothing to validate - return True - for stanza in self.openstack_passwords: - try: - ret, msg = validate_openstack_password( - stanza.values()[0], self.os_password_rules_file) - if not ret: - # one of the openstack passwords failed validation! - fail_msg = ("%s: %s" % (stanza.keys()[0], msg)) - if console: - print(textwrap.fill(fail_msg, 80)) - return False - raise ConfigFail(fail_msg) - except Exception as e: - # this implies an internal issue, either with - # the parsing rules or the validator. In the - # interest of robustness, we will proceed without - # password rules and possibly provision them - # later using service parameters - LOG.error("Failure on validating openstack password: %s" % e) - raise ConfigFail("%s" % e) - return True - - def _wait_system_config(self, client): - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT): - try: - systems = client.sysinv.isystem.list() - if systems: - # only one system (default) - return systems[0] - except Exception: - pass - time.sleep(1) - else: - raise ConfigFail('Timeout waiting for default system ' - 'configuration') - - def _wait_ethernet_port_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - ports = client.sysinv.ethernet_port.list(host.uuid) - if ports and count == len(ports): - return ports - count = len(ports) - except Exception: - pass - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller port ' - 'configuration') - - def _wait_disk_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - disks = client.sysinv.idisk.list(host.uuid) - if disks and count == len(disks): - return disks - count = len(disks) - except Exception: - pass - if disks: - time.sleep(1) # We don't need to wait that long - else: - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller disk ' - 'configuration') - - def _wait_pv_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - pvs = client.sysinv.ipv.list(host.uuid) - if pvs and count == len(pvs): - return pvs - count = len(pvs) - except Exception: - pass - if pvs: - time.sleep(1) # We don't need to wait that long - else: - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller PV ' - 'configuration') - - def _populate_system_config(self, client): - # Wait for pre-populated system - system = self._wait_system_config(client) - - # Update system attributes - capabilities = {'region_config': self.region_config, - 'vswitch_type': str(self.vswitch_type), - 'shared_services': str(self.shared_services), - 'sdn_enabled': self.enable_sdn, - 'https_enabled': self.enable_https} - - system_type = utils.get_system_type() - - region_name = constants.DEFAULT_REGION_NAME - if self.region_config: - region_name = self.region_2_name - - values = { - 'system_type': system_type, - 'system_mode': str(self.system_mode), - 'capabilities': capabilities, - 'timezone': str(self.timezone), - 'region_name': region_name, - 'service_project_name': self.service_project_name - } - if self.system_dc_role in \ - [sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]: - values['distributed_cloud_role'] = self.system_dc_role - if self.system_dc_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD: - # Set the system name to the subcloud name for subclouds - values['name'] = region_name - - patch = sysinv.dict_to_patch(values) - client.sysinv.isystem.update(system.uuid, patch) - - if self.region_config: - self._populate_region_config(client) - - def _populate_region_config(self, client): - self._populate_service_config(client) - - def _populate_service_config(self, client): - # populate service attributes in services table - - # Strip the version from the URIs - modified_identity_uri = (re.split(r'/v[0-9]', - self.keystone_identity_uri)[0]) - modified_auth_uri = (re.split(r'/v[0-9]', - self.keystone_auth_uri)[0]) - modified_admin_uri = (re.split(r'/v[0-9]', - self.keystone_admin_uri)[0]) - modified_internal_uri = (re.split(r'/v[0-9]', - self.keystone_internal_uri)[0]) - modified_public_uri = (re.split(r'/v[0-9]', - self.keystone_public_uri)[0]) - - # always populates keystone config - capabilities = {'admin_user_domain': self.admin_user_domain, - 'admin_project_domain': self.admin_project_domain, - 'service_user_domain': self.service_user_domain, - 'service_project_domain': self.service_project_domain, - 'admin_user_name': self.admin_username, - 'admin_project_name': self.admin_project_name, - 'auth_uri': modified_auth_uri, - 'auth_url': modified_identity_uri, - 'service_name': self.keystone_service_name, - 'service_type': self.keystone_service_type, - 'region_services_create': self.region_services_create} - - # TODO (aning): Once we eliminate duplicated endpoints of shared - # services for non-primary region(s), we can remove the following code - # that pass over the URLs to sysinv for puppet to create these - # endpoints. - if modified_admin_uri: - capabilities.update({'admin_uri': modified_admin_uri}) - if modified_internal_uri: - capabilities.update({'internal_uri': modified_internal_uri}) - if modified_public_uri: - capabilities.update({'public_uri': modified_public_uri}) - - values = {'name': 'keystone', - 'enabled': True, - 'region_name': self.region_1_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # fm service config - capabilities = {'user_name': self.fm_ks_user_name} - values = {'name': "fm", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # if ldap is a shared service - if self.ldap_service_uri: - capabilities = {'service_name': self.ldap_service_name} - capabilities.update({'service_uri': self.ldap_service_uri}) - values = {'name': self.ldap_service_name, - 'enabled': True, - 'region_name': self.ldap_region_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # sysinv service config - capabilities = {'service_name': self.sysinv_service_name, - 'service_type': self.sysinv_service_type, - 'user_name': self.sysinv_ks_user_name} - values = {'name': self.sysinv_service_name, - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # populate patching service config - capabilities = {'service_name': 'patching', - 'service_type': 'patching', - 'user_name': self.patching_ks_user_name} - values = {'name': 'patching', - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # mtc service config - capabilities = {'user_name': self.mtce_ks_user_name} - values = {'name': "mtce", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # nfv service config - capabilities = {'user_name': self.nfv_ks_user_name} - values = {'name': "vim", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # barbican service config - capabilities = {'user_name': self.barbican_ks_user_name} - values = {'name': "barbican", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - def _store_service_password(self): - # store service password in the temporary keyring vault - - os.environ["XDG_DATA_HOME"] = "/tmp" - - keyring.set_password(self.sysinv_service_name, - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.sysinv_ks_password) - - keyring.set_password('patching', - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.patching_ks_password) - - keyring.set_password('mtce', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.mtce_ks_password) - - keyring.set_password('vim', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.nfv_ks_password) - - keyring.set_password('fm', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.fm_ks_password) - - keyring.set_password('barbican', - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.barbican_ks_password) - - del os.environ["XDG_DATA_HOME"] - - def _populate_network_config(self, client): - self._populate_mgmt_network(client) - self._populate_pxeboot_network(client) - self._populate_oam_network(client) - self._populate_multicast_network(client) - if self.kubernetes: - self._populate_cluster_host_network(client) - self._populate_cluster_pod_network(client) - self._populate_cluster_service_network(client) - if self.subcloud_config(): - self._populate_system_controller_network(client) - - def _populate_mgmt_network(self, client): - # create the address pool - values = { - 'name': 'management', - 'network': str(self.management_subnet.network), - 'prefix': self.management_subnet.prefixlen, - 'ranges': [(str(self.management_start_address), - str(self.management_end_address))], - } - if self.management_gateway_address: - values.update({ - 'gateway_address': str(self.management_gateway_address)}) - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_MGMT, - 'name': sysinv_constants.NETWORK_TYPE_MGMT, - 'dynamic': self.dynamic_address_allocation, - 'pool_uuid': pool.uuid, - } - - client.sysinv.network.create(**values) - - def _populate_pxeboot_network(self, client): - # create the address pool - values = { - 'name': 'pxeboot', - 'network': str(self.pxeboot_subnet.network), - 'prefix': self.pxeboot_subnet.prefixlen, - 'ranges': [(str(self.pxeboot_start_address), - str(self.pxeboot_end_address))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_PXEBOOT, - 'name': sysinv_constants.NETWORK_TYPE_PXEBOOT, - 'dynamic': True, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_oam_network(self, client): - - # set default range if not specified as part of configuration - self.external_oam_start_address = self.external_oam_subnet[1] - self.external_oam_end_address = self.external_oam_subnet[-2] - - # create the address pool - values = { - 'name': 'oam', - 'network': str(self.external_oam_subnet.network), - 'prefix': self.external_oam_subnet.prefixlen, - 'ranges': [(str(self.external_oam_start_address), - str(self.external_oam_end_address))], - 'floating_address': str(self.external_oam_floating_address), - } - - if self.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - values.update({ - 'controller0_address': str(self.external_oam_address_0), - 'controller1_address': str(self.external_oam_address_1), - }) - if self.external_oam_gateway_address: - values.update({ - 'gateway_address': str(self.external_oam_gateway_address), - }) - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_OAM, - 'name': sysinv_constants.NETWORK_TYPE_OAM, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - - client.sysinv.network.create(**values) - - def _populate_multicast_network(self, client): - # create the address pool - values = { - 'name': 'multicast-subnet', - 'network': str(self.management_multicast_subnet.network), - 'prefix': self.management_multicast_subnet.prefixlen, - 'ranges': [(str(self.management_multicast_subnet[1]), - str(self.management_multicast_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_MULTICAST, - 'name': sysinv_constants.NETWORK_TYPE_MULTICAST, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_system_controller_network(self, client): - # create the address pool - values = { - 'name': 'system-controller-subnet', - 'network': str(self.system_controller_subnet.network), - 'prefix': self.system_controller_subnet.prefixlen, - 'floating_address': str(self.system_controller_floating_ip), - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_SYSTEM_CONTROLLER, - 'name': sysinv_constants.NETWORK_TYPE_SYSTEM_CONTROLLER, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_host_network(self, client): - - # set default range if not specified as part of configuration - self.cluster_host_subnet_start_address = self.cluster_host_subnet[2] - self.cluster_host_subnet_end_address = self.cluster_host_subnet[-2] - - # create the address pool - values = { - 'name': 'cluster-host-subnet', - 'network': str(self.cluster_host_subnet.network), - 'prefix': self.cluster_host_subnet.prefixlen, - 'ranges': [(str(self.cluster_host_subnet_start_address), - str(self.cluster_host_subnet_end_address))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST, - 'dynamic': self.dynamic_address_allocation, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_pod_network(self, client): - # create the address pool - values = { - 'name': 'cluster-pod-subnet', - 'network': str(self.cluster_pod_subnet.network), - 'prefix': self.cluster_pod_subnet.prefixlen, - 'ranges': [(str(self.cluster_pod_subnet[1]), - str(self.cluster_pod_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_POD, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_POD, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_service_network(self, client): - # create the address pool - values = { - 'name': 'cluster-service-subnet', - 'network': str(self.cluster_service_subnet.network), - 'prefix': self.cluster_service_subnet.prefixlen, - 'ranges': [(str(self.cluster_service_subnet[1]), - str(self.cluster_service_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_network_addresses(self, client, pool, network, addresses): - for name, address in addresses.items(): - values = { - 'pool_uuid': pool.uuid, - 'address': str(address), - 'prefix': pool.prefix, - 'name': "%s-%s" % (name, network.type), - } - client.sysinv.address.create(**values) - - def _inventory_config_complete_wait(self, client, controller): - - # This is a gate for the generation of hiera data. - - # TODO: Really need this to detect when inventory is - # TODO: .. complete at the host level rather than each - # TODO: .. individual entity being populated as it is - # TODO: .. today for storage. - - # Wait for sysinv-agent to populate disks and PVs - self._wait_disk_config(client, controller) - self._wait_pv_config(client, controller) - - def _get_management_mac_address(self): - - if self.lag_management_interface: - ifname = self.lag_management_interface_member0 - else: - ifname = self.management_interface - - try: - filename = '/sys/class/net/%s/address' % ifname - with open(filename, 'r') as f: - return f.readline().rstrip() - except Exception: - raise ConfigFail("Failed to obtain mac address of %s" % ifname) - - def _populate_controller_config(self, client): - mgmt_mac = self._get_management_mac_address() - rootfs_device = get_device_from_function(get_rootfs_node) - boot_device = get_device_from_function(find_boot_device) - console = get_console_info() - tboot = get_tboot_info() - install_output = get_orig_install_mode() - - provision_state = sysinv.HOST_PROVISIONED - if utils.is_combined_load(): - provision_state = sysinv.HOST_PROVISIONING - - values = { - 'personality': sysinv.HOST_PERSONALITY_CONTROLLER, - 'hostname': self.controller_hostname_prefix + "0", - 'mgmt_ip': str(self.controller_address_0), - 'mgmt_mac': mgmt_mac, - 'administrative': sysinv.HOST_ADMIN_STATE_LOCKED, - 'operational': sysinv.HOST_OPERATIONAL_STATE_DISABLED, - 'availability': sysinv.HOST_AVAIL_STATE_OFFLINE, - 'invprovision': provision_state, - 'rootfs_device': rootfs_device, - 'boot_device': boot_device, - 'console': console, - 'tboot': tboot, - 'install_output': install_output, - } - controller = client.sysinv.ihost.create(**values) - return controller - - def _populate_interface_config(self, client, controller): - # Wait for Ethernet port inventory - self._wait_ethernet_port_config(client, controller) - - self._populate_management_interface(client, controller) - self._populate_oam_interface(client, controller) - if self.kubernetes: - self._populate_cluster_host_interface(client, controller) - - def _update_interface_config(self, client, values): - host_uuid = values.get('ihost_uuid') - ifname = values.get('ifname') - interfaces = client.sysinv.iinterface.list(host_uuid) - for interface in interfaces: - if interface.ifname == ifname: - patch = sysinv.dict_to_patch(values) - client.sysinv.iinterface.update(interface.uuid, patch) - break - else: - raise ConfigFail("Failed to find interface %s" % ifname) - - def _get_interface(self, client, host_uuid, ifname): - interfaces = client.sysinv.iinterface.list(host_uuid) - for interface in interfaces: - if interface.ifname == ifname: - return interface - else: - raise ConfigFail("Failed to find interface %s" % ifname) - - def _get_interface_aemode(self, aemode): - """Convert the AE mode to an AE mode supported by the interface API""" - if aemode == constants.LAG_MODE_ACTIVE_BACKUP: - return 'active_standby' - elif aemode == constants.LAG_MODE_BALANCE_XOR: - return 'balanced' - elif aemode == constants.LAG_MODE_8023AD: - return '802.3ad' - else: - raise ConfigFail("Unknown interface AE mode: %s" % aemode) - - def _get_interface_txhashpolicy(self, aemode): - """Convert the AE mode to a L2 hash supported by the interface API""" - if aemode == constants.LAG_MODE_ACTIVE_BACKUP: - return None - elif aemode == constants.LAG_MODE_BALANCE_XOR: - return constants.LAG_TXHASH_LAYER2 - elif aemode == constants.LAG_MODE_8023AD: - return constants.LAG_TXHASH_LAYER2 - else: - raise ConfigFail("Unknown interface AE mode: %s" % aemode) - - def _get_network(self, client, network_type): - networks = client.sysinv.network.list() - for net in networks: - if net.type == network_type: - return net - else: - raise ConfigFail("Failed to find network %s" % type) - - def _get_interface_mtu(self, ifname): - """ - This function determines the MTU value that must be configured on an - interface. It is accounting for the possibility that different network - types are sharing the same interfaces in which case the lowest - interface must have an interface equal to or greater than any of the - VLAN interfaces above it. The input semantic checks enforce specific - precedence rules (e.g., cluster-host must be less than or equal to the - mgmt mtu if cluster-host is a vlan over mgmt), but this function allows - for any permutation to avoid issues if the semantic checks are loosened - or if the ini input method allows different possibities. - - This function must not be used for VLAN interfaces. VLAN interfaces - have no requirement to be large enough to accomodate another VLAN above - it so for those interfaces we simply use the interface MTU as was - specified by the user. - """ - value = 0 - if self.management_interface_configured: - if ifname == self.management_interface: - value = max(value, self.management_mtu) - if self.cluster_host_interface_configured: - if ifname == self.cluster_host_interface: - value = max(value, self.cluster_host_mtu) - if self.external_oam_interface_configured: - if ifname == self.external_oam_interface: - value = max(value, self.external_oam_mtu) - assert value != 0 - return value - - def _populate_management_interface(self, client, controller): - """Configure the management/pxeboot interface(s)""" - - interface_class = sysinv_constants.INTERFACE_CLASS_PLATFORM - if self.management_vlan: - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_PXEBOOT) - else: - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_MGMT) - - if self.lag_management_interface: - members = [self.lag_management_interface_member0] - if self.lag_management_interface_member1: - members.append(self.lag_management_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_management_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_management_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'iftype': 'ae', - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': interface_class, - 'networks': [str(network.id)], - 'uses': members, - } - - client.sysinv.iinterface.create(**values) - elif self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX and \ - not self.subcloud_config(): - # Create the management interface record for the loopback interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VIRTUAL, - 'ifclass': interface_class, - 'networks': [str(network.id)], - } - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'ifclass': interface_class, - 'networks': str(network.id), - } - self._update_interface_config(client, values) - - if self.management_vlan: - mgmt_network = self._get_network( - client, sysinv_constants.NETWORK_TYPE_MGMT) - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface_name, - 'imtu': self.management_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': interface_class, - 'networks': [str(mgmt_network.id)], - 'uses': [self.management_interface], - 'vlan_id': self.management_vlan, - } - client.sysinv.iinterface.create(**values) - elif self.subcloud_config(): - # Create a route to the system controller. - # For managament vlan case, route will get - # created upon interface creation if subcloud config. - management_interface = self._get_interface( - client, controller.uuid, self.management_interface_name) - values = { - 'interface_uuid': management_interface.uuid, - 'network': str(self.system_controller_subnet.ip), - 'prefix': self.system_controller_subnet.prefixlen, - 'gateway': str(self.management_gateway_address), - 'metric': 1, - } - client.sysinv.route.create(**values) - - def _populate_default_storage_backend(self, client, controller): - # Create the Ceph monitor for controller-0 - values = {'ihost_uuid': controller.uuid} - client.sysinv.ceph_mon.create(**values) - - # Create the Ceph default backend - values = {'confirmed': True} - client.sysinv.storage_ceph.create(**values) - - def _populate_cluster_host_interface(self, client, controller): - """Configure the cluster host interface(s)""" - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_CLUSTER_HOST) - - if (self.lag_cluster_host_interface and - self.cluster_host_interface_name != - self.management_interface_name): - members = [self.lag_cluster_host_interface_member0] - if self.lag_cluster_host_interface_member1: - members.append(self.lag_cluster_host_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_cluster_host_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_cluster_host_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface, - 'imtu': self._get_interface_mtu(self.cluster_host_interface), - 'iftype': sysinv_constants.INTERFACE_TYPE_AE, - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': members, - } - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface, - } - values.update({ - 'imtu': self._get_interface_mtu(self.cluster_host_interface) - }) - if not self.cluster_host_vlan: - values.update({ - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks_to_add': str(network.id), - }) - self._update_interface_config(client, values) - - if self.cluster_host_vlan: - if (self.cluster_host_interface_name != - self.management_interface_name): - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface_name, - 'imtu': self.cluster_host_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': [self.cluster_host_interface], - 'vlan_id': self.cluster_host_vlan, - } - client.sysinv.iinterface.create(**values) - else: - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface_name, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks_to_add': str(network.id), - } - self._update_interface_config(client, values) - - def _populate_oam_interface(self, client, controller): - """Configure the OAM interface(s)""" - - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_OAM) - - if self.lag_external_oam_interface: - members = [self.lag_external_oam_interface_member0] - if self.lag_external_oam_interface_member1: - members.append(self.lag_external_oam_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_external_oam_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_external_oam_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface, - 'imtu': self._get_interface_mtu(self.external_oam_interface), - 'iftype': sysinv_constants.INTERFACE_TYPE_AE, - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': members, - } - - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - } - values.update({ - 'imtu': self._get_interface_mtu(self.external_oam_interface) - }) - if not self.external_oam_vlan: - values.update({ - 'networks': str(network.id), - }) - - self._update_interface_config(client, values) - - if self.external_oam_vlan: - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface_name, - 'imtu': self.external_oam_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': [self.external_oam_interface], - 'vlan_id': self.external_oam_vlan, - } - client.sysinv.iinterface.create(**values) - - def _populate_load_config(self, client): - patch = {'software_version': SW_VERSION, "compatible_version": "N/A", - "required_patches": "N/A"} - client.sysinv.load.create(**patch) - - def _populate_dns_config(self, client): - # Retrieve the list of dns servers to get the uuid - dns_list = client.sysinv.idns.list() - dns_record = dns_list[0] - values = { - 'nameservers': self.get_dns_servers(), - 'action': 'apply' - } - patch = sysinv.dict_to_patch(values) - client.sysinv.idns.update(dns_record.uuid, patch) - - def _populate_docker_config(self, client): - if self.enable_docker_proxy: - proxy_parameter = {} - if self.docker_http_proxy: - proxy_parameter['http_proxy'] = self.docker_http_proxy - if self.docker_https_proxy: - proxy_parameter['https_proxy'] = self.docker_https_proxy - if self.docker_no_proxy: - proxy_parameter['no_proxy'] = self.docker_no_proxy - - if proxy_parameter: - client.sysinv.service_parameter.create( - sysinv_constants.SERVICE_TYPE_DOCKER, - sysinv_constants.SERVICE_PARAM_SECTION_DOCKER_PROXY, - None, - None, - proxy_parameter - ) - - if not self.docker_use_default_registry: - registry_parameter = {} - if self.docker_k8s_registry: - registry_parameter['k8s'] = \ - self.docker_k8s_registry - - if self.docker_gcr_registry: - registry_parameter['gcr'] = \ - self.docker_gcr_registry - - if self.docker_quay_registry: - registry_parameter['quay'] = \ - self.docker_quay_registry - - if self.docker_docker_registry: - registry_parameter['docker'] = \ - self.docker_docker_registry - - if not self.is_secure_registry: - registry_parameter['insecure_registry'] = "True" - - if registry_parameter: - client.sysinv.service_parameter.create( - sysinv_constants.SERVICE_TYPE_DOCKER, - sysinv_constants. - SERVICE_PARAM_SECTION_DOCKER_REGISTRY, - None, - None, - registry_parameter - ) - - def populate_initial_config(self): - """Populate initial system inventory configuration""" - try: - with openstack.OpenStack() as client: - self._populate_system_config(client) - self._populate_load_config(client) - self._populate_network_config(client) - if self.kubernetes: - self._populate_dns_config(client) - self._populate_docker_config(client) - controller = self._populate_controller_config(client) - # ceph_mon config requires controller host to be created - self._inventory_config_complete_wait(client, controller) - self._populate_interface_config(client, controller) - self._populate_default_storage_backend(client, controller) - - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - raise ConfigFail("Failed to provision initial system " - "configuration") - - def create_puppet_config(self): - try: - utils.create_system_config() - utils.create_host_config() - except Exception as e: - LOG.exception(e) - raise ConfigFail("Failed to update hiera configuration") - - def provision(self, configfile): - """Perform system provisioning only""" - if not self.labmode: - raise ConfigFail("System provisioning only available with " - "lab mode enabled") - if not configfile: - raise ConfigFail("Missing input configuration file") - self.input_config_from_file(configfile) - self.populate_initial_config() - - def configure(self, configfile=None, default_config=False, - display_config=True): - """Configure initial controller node.""" - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(constants.CONFIG_PERMDIR) or - os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FILE)): - raise ConfigFail("Configuration has already been done " - "and cannot be repeated.") - - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(["vgdisplay", "cgts-vg"], stdout=fnull, - stderr=fnull) - except subprocess.CalledProcessError: - LOG.error("The cgts-vg volume group was not found") - raise ConfigFail("Volume groups not configured") - - if default_config: - self.default_config() - elif not configfile: - self.input_config() - else: - self.input_config_from_file(configfile) - - if display_config: - self.display_config() - - # Validate Openstack passwords loaded in via config - if configfile: - self.process_validation_passwords() - - if not configfile and not default_config: - while True: - user_input = input( - "\nApply the above configuration? [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - raise UserQuit - else: - print("Invalid choice") - - # Verify at most one branding tarball is present - self.verify_branding() - - self.write_config_file() - utils.write_simplex_flag() - - print("\nApplying configuration (this will take several minutes):") - - runner = progress.ProgressRunner() - runner.add(self.create_bootstrap_config, - 'Creating bootstrap configuration') - runner.add(self.apply_bootstrap_manifest, - "Applying bootstrap manifest") - runner.add(self.persist_local_config, - 'Persisting local configuration') - runner.add(self.populate_initial_config, - 'Populating initial system inventory') - runner.add(self.create_puppet_config, - 'Creating system configuration') - runner.add(self.apply_controller_manifest, - 'Applying controller manifest') - runner.add(self.finalize_controller_config, - 'Finalize controller configuration') - runner.add(self.wait_service_enable, - 'Waiting for service activation') - runner.run() - - def check_required_interfaces_status(self): - if self.management_interface_configured: - if not is_interface_up(self.management_interface): - print('') - if (self.system_mode != - sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT - and self.system_mode != - sysinv_constants.SYSTEM_MODE_SIMPLEX): - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some platform services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.management_interface, 80)) - - if self.cluster_host_interface_configured: - if not is_interface_up(self.cluster_host_interface): - if self.system_mode != \ - sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT: - print('') - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some platform services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.cluster_host_interface, 80)) - - if self.external_oam_interface_configured: - if not is_interface_up(self.external_oam_interface): - print('') - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some OAM services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.external_oam_interface, 80)) diff --git a/controllerconfig/controllerconfig/controllerconfig/openstack.py b/controllerconfig/controllerconfig/controllerconfig/openstack.py deleted file mode 100755 index ab25ae2779..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/openstack.py +++ /dev/null @@ -1,285 +0,0 @@ -# -# Copyright (c) 2014-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -OpenStack -""" - -import os -import time -import subprocess - -from controllerconfig.common import log -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig.common.rest_api_utils import get_token -from controllerconfig import sysinv_api as sysinv - - -LOG = log.get_logger(__name__) - -KEYSTONE_AUTH_SERVER_RETRY_CNT = 60 -KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry - - -class OpenStack(object): - - def __init__(self): - self.admin_token = None - self.conf = {} - self._sysinv = None - - source_command = 'source /etc/platform/openrc && env' - - with open(os.devnull, "w") as fnull: - proc = subprocess.Popen( - ['bash', '-c', source_command], - stdout=subprocess.PIPE, stderr=fnull) - - for line in proc.stdout: - key, _, value = line.partition("=") - if key == 'OS_USERNAME': - self.conf['admin_user'] = value.strip() - elif key == 'OS_PASSWORD': - self.conf['admin_pwd'] = value.strip() - elif key == 'OS_PROJECT_NAME': - self.conf['admin_tenant'] = value.strip() - elif key == 'OS_AUTH_URL': - self.conf['auth_url'] = value.strip() - elif key == 'OS_REGION_NAME': - self.conf['region_name'] = value.strip() - elif key == 'OS_USER_DOMAIN_NAME': - self.conf['user_domain'] = value.strip() - elif key == 'OS_PROJECT_DOMAIN_NAME': - self.conf['project_domain'] = value.strip() - - proc.communicate() - - def __enter__(self): - if not self._connect(): - raise Exception('Failed to connect') - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._disconnect() - - def __del__(self): - self._disconnect() - - def _connect(self): - """ Connect to an OpenStack instance """ - - if self.admin_token is not None: - self._disconnect() - - # Try to obtain an admin token from keystone - for _ in range(KEYSTONE_AUTH_SERVER_RETRY_CNT): - self.admin_token = get_token(self.conf['auth_url'], - self.conf['admin_tenant'], - self.conf['admin_user'], - self.conf['admin_pwd'], - self.conf['user_domain'], - self.conf['project_domain']) - if self.admin_token: - break - time.sleep(KEYSTONE_AUTH_SERVER_WAIT) - - return self.admin_token is not None - - def _disconnect(self): - """ Disconnect from an OpenStack instance """ - self.admin_token = None - - def lock_hosts(self, exempt_hostnames=None, progress_callback=None, - timeout=60): - """ Lock hosts of an OpenStack instance except for host names - in the exempt list - """ - failed_hostnames = [] - - if exempt_hostnames is None: - exempt_hostnames = [] - - hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name']) - if not hosts: - if progress_callback is not None: - progress_callback(0, 0, None, None) - return - - wait = False - host_i = 0 - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_unlocked(): - if not host.force_lock(self.admin_token, - self.conf['region_name']): - failed_hostnames.append(host.name) - LOG.warning("Could not lock %s" % host.name) - else: - wait = True - else: - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('locking %s' % host.name), - 'DONE') - - if wait and timeout > 5: - time.sleep(5) - timeout -= 5 - - for _ in range(0, timeout): - wait = False - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if (host.name not in failed_hostnames) and host.is_unlocked(): - host.refresh_data(self.admin_token, - self.conf['region_name']) - - if host.is_locked(): - LOG.info("Locked %s" % host.name) - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('locking %s' % host.name), - 'DONE') - else: - LOG.info("Waiting for lock of %s" % host.name) - wait = True - - if not wait: - break - - time.sleep(1) - else: - failed_hostnames.append(host.name) - LOG.warning("Wait failed for lock of %s" % host.name) - - return failed_hostnames - - def power_off_hosts(self, exempt_hostnames=None, progress_callback=None, - timeout=60): - """ Power-off hosts of an OpenStack instance except for host names - in the exempt list - """ - - if exempt_hostnames is None: - exempt_hostnames = [] - - hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name']) - - hosts[:] = [host for host in hosts if host.support_power_off()] - if not hosts: - if progress_callback is not None: - progress_callback(0, 0, None, None) - return - - wait = False - host_i = 0 - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_powered_on(): - if not host.power_off(self.admin_token, - self.conf['region_name']): - raise SysInvFail("Could not power-off %s" % host.name) - wait = True - else: - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('powering off %s' % host.name), - 'DONE') - - if wait and timeout > 5: - time.sleep(5) - timeout -= 5 - - for _ in range(0, timeout): - wait = False - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_powered_on(): - host.refresh_data(self.admin_token, - self.conf['region_name']) - - if host.is_powered_off(): - LOG.info("Powered-Off %s" % host.name) - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('powering off %s' % host.name), - 'DONE') - else: - LOG.info("Waiting for power-off of %s" % host.name) - wait = True - - if not wait: - break - - time.sleep(1) - else: - failed_hosts = [h.name for h in hosts if h.is_powered_on()] - msg = "Wait timeout for power-off of %s" % failed_hosts - LOG.info(msg) - raise SysInvFail(msg) - - def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300, - interval_step=10): - """Wait for hosts to be identified as disabled. - Run check every interval_step seconds - """ - if exempt_hostnames is None: - exempt_hostnames = [] - - for _ in range(timeout / interval_step): - hosts = sysinv.get_hosts(self.admin_token, - self.conf['region_name']) - if not hosts: - time.sleep(interval_step) - continue - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_enabled(): - LOG.info("host %s is still enabled" % host.name) - break - else: - LOG.info("all hosts disabled.") - return True - - time.sleep(interval_step) - - return False - - @property - def sysinv(self): - if self._sysinv is None: - # TOX cannot import cgts_client and all the dependencies therefore - # the client is being lazy loaded since TOX doesn't actually - # require the cgtsclient module. - from cgtsclient import client as cgts_client - - endpoint = self.admin_token.get_service_url( - self.conf['region_name'], "sysinv", "platform", 'admin') - self._sysinv = cgts_client.Client( - sysinv.API_VERSION, - endpoint=endpoint, - token=self.admin_token.get_id()) - - return self._sysinv diff --git a/controllerconfig/controllerconfig/controllerconfig/progress.py b/controllerconfig/controllerconfig/controllerconfig/progress.py deleted file mode 100644 index 72e4e7fcc6..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/progress.py +++ /dev/null @@ -1,31 +0,0 @@ -import sys - -from controllerconfig.common import log - -LOG = log.get_logger(__name__) - - -class ProgressRunner(object): - steps = [] - - def add(self, action, message): - self.steps.append((action, message)) - - def run(self): - total = len(self.steps) - for i, step in enumerate(self.steps, start=1): - action, message = step - LOG.info("Start step: %s" % message) - sys.stdout.write( - "\n%.2u/%.2u: %s ... " % (i, total, message)) - sys.stdout.flush() - try: - action() - sys.stdout.write('DONE') - sys.stdout.flush() - except Exception: - sys.stdout.flush() - raise - LOG.info("Finish step: %s" % message) - sys.stdout.write("\n") - sys.stdout.flush() diff --git a/controllerconfig/controllerconfig/controllerconfig/regionconfig.py b/controllerconfig/controllerconfig/controllerconfig/regionconfig.py deleted file mode 100755 index eee9b66f54..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/regionconfig.py +++ /dev/null @@ -1,629 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import os -import subprocess -import sys -import textwrap -import time -from controllerconfig import utils -import uuid - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common import rest_api_utils as rutils -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.configobjects import REGION_CONFIG -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG -from controllerconfig import ConfigFail -from controllerconfig.configassistant import ConfigAssistant -from controllerconfig.systemconfig import parse_system_config -from controllerconfig.systemconfig import configure_management_interface -from controllerconfig.systemconfig import create_cgcs_config_file -from controllerconfig import DEFAULT_DOMAIN_NAME - -# Temporary file for building cgcs_config -TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config" - -# For region mode, this is the list of users that we expect to find configured -# in the region config file as _USER_KEY and _PASSWORD. -# For distributed cloud, this is the list of users that we expect to find -# configured in keystone. The password for each user will be retrieved from -# the DC Manager in the system controller and added to the region config file. -# The format is: -# REGION_NAME = key in region config file for this user's region -# USER_KEY = key in region config file for this user's name -# USER_NAME = user name in keystone - -REGION_NAME = 0 -USER_KEY = 1 -USER_NAME = 2 - -EXPECTED_USERS = [ - ('REGION_2_SERVICES', 'SYSINV', 'sysinv'), - ('REGION_2_SERVICES', 'PATCHING', 'patching'), - ('REGION_2_SERVICES', 'NFV', 'vim'), - ('REGION_2_SERVICES', 'MTCE', 'mtce'), - ('REGION_2_SERVICES', 'FM', 'fm'), - ('REGION_2_SERVICES', 'BARBICAN', 'barbican')] - -# This a description of the region 2 endpoints that we expect to configure or -# find configured in keystone. The format is as follows: -# SERVICE_NAME = key in region config file for this service's name -# SERVICE_TYPE = key in region config file for this service's type -# PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP -# INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP -# ADMIN_URL = required adminurl - {} is replaced with CLM floating IP -# DESCRIPTION = Description of the service (for automatic configuration) - -SERVICE_NAME = 0 -SERVICE_TYPE = 1 -PUBLIC_URL = 2 -INTERNAL_URL = 3 -ADMIN_URL = 4 -DESCRIPTION = 5 - -EXPECTED_REGION2_ENDPOINTS = [ - ('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE', - 'http://{}:6385/v1', - 'http://{}:6385/v1', - 'http://{}:6385/v1', - 'SysInv Service'), - ('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE', - 'http://{}:15491', - 'http://{}:5491', - 'http://{}:5491', - 'Patching Service'), - ('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE', - 'http://{}:4545', - 'http://{}:4545', - 'http://{}:4545', - 'Virtual Infrastructure Manager'), - ('FM_SERVICE_NAME', 'FM_SERVICE_TYPE', - 'http://{}:18002', - 'http://{}:18002', - 'http://{}:18002', - 'Fault Management Service'), - ('BARBICAN_SERVICE_NAME', 'BARBICAN_SERVICE_TYPE', - 'http://{}:9311', - 'http://{}:9311', - 'http://{}:9311', - 'OpenStack Key Manager Service'), -] - -EXPECTED_KEYSTONE_ENDPOINT = ( - 'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE', - 'http://{}:8081/keystone/main/v2.0', - 'http://{}:8081/keystone/main/v2.0', - 'http://{}:8081/keystone/admin/v2.0', - 'OpenStack Identity') - - -LOG = log.get_logger(__name__) - - -def validate_region_one_keystone_config(region_config, token, api_url, users, - services, endpoints, create=False, - config_type=REGION_CONFIG, - user_config=None): - """ Validate that the required region one configuration are in place, - if create is True, any missing entries will be set up to be added - to keystone later on by puppet. - """ - - region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') - region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') - - # Determine what keystone entries are expected - expected_users = EXPECTED_USERS - expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS - # Keystone is always in region 1 - expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT] - - domains = rutils.get_domains(token, api_url) - # Verify service project domain, creating if necessary - if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): - project_domain = region_config.get('REGION_2_SERVICES', - 'PROJECT_DOMAIN_NAME') - else: - project_domain = DEFAULT_DOMAIN_NAME - project_domain_id = domains.get_domain_id(project_domain) - if not project_domain_id: - if create and config_type == REGION_CONFIG: - region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME', - project_domain) - else: - raise ConfigFail( - "Keystone configuration error: service project domain '%s' is " - "not configured." % project_domain) - - # Verify service project, creating if necessary - if region_config.has_option('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME'): - service_project = region_config.get('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME') - else: - service_project = region_config.get('SHARED_SERVICES', - 'SERVICE_TENANT_NAME') - projects = rutils.get_projects(token, api_url) - project_id = projects.get_project_id(service_project) - if not project_id: - if create and config_type == REGION_CONFIG: - region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME', - service_project) - else: - raise ConfigFail( - "Keystone configuration error: service project '%s' is not " - "configured." % service_project) - - # Verify and retrieve the id of the admin role (only needed when creating) - roles = rutils.get_roles(token, api_url) - role_id = roles.get_role_id('admin') - if not role_id and create: - raise ConfigFail("Keystone configuration error: No admin role present") - - # verify that the service user domain is configured, creating if necessary - if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): - user_domain = region_config.get('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - user_domain = DEFAULT_DOMAIN_NAME - domains = rutils.get_domains(token, api_url) - user_domain_id = domains.get_domain_id(user_domain) - if not user_domain_id: - if create and config_type == REGION_CONFIG: - region_config.set('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - raise ConfigFail( - "Unable to obtain id for for %s domain. Please ensure " - "keystone configuration is correct." % user_domain) - - auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') - if config_type == REGION_CONFIG: - # Verify that all users are configured and can retrieve a token, - # Optionally set up to create missing users + their admin role - for user in expected_users: - auth_user = region_config.get(user[REGION_NAME], - user[USER_KEY] + '_USER_NAME') - user_id = users.get_user_id(auth_user) - auth_password = None - if not user_id and create: - if not region_config.has_option( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): - # Generate random password for new user via - # /dev/urandom if necessary - try: - region_config.set( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD', - uuid.uuid4().hex[:10] + "TiC2*") - except Exception as e: - raise ConfigFail("Failed to generate random user " - "password: %s" % e) - elif user_id and user_domain_id and\ - project_id and project_domain_id: - # If there is a user_id existing then we cannot use - # a randomized password as it was either created by - # a previous run of regionconfig or was created as - # part of Titanium Cloud Primary region config - if not region_config.has_option( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): - raise ConfigFail("Failed to find configured password " - "for pre-defined user %s" % auth_user) - auth_password = region_config.get(user[REGION_NAME], - user[USER_KEY] + '_PASSWORD') - # Verify that the existing user can seek an auth token - user_token = rutils.get_token(auth_url, service_project, - auth_user, - auth_password, user_domain, - project_domain) - if not user_token: - raise ConfigFail( - "Unable to obtain keystone token for %s user. " - "Please ensure keystone configuration is correct." - % auth_user) - else: - # For subcloud configs we re-use the users from the system controller - # (the primary region). - for user in expected_users: - auth_user = user[USER_NAME] - user_id = users.get_user_id(auth_user) - auth_password = None - - if user_id: - # Add the password to the region config so it will be used when - # configuring services. - auth_password = user_config.get_password(user[USER_NAME]) - region_config.set(user[REGION_NAME], - user[USER_KEY] + '_PASSWORD', - auth_password) - else: - raise ConfigFail( - "Unable to obtain user (%s). Please ensure " - "keystone configuration is correct." % user[USER_NAME]) - - # Verify that the existing user can seek an auth token - user_token = rutils.get_token(auth_url, service_project, auth_user, - auth_password, user_domain, - project_domain) - if not user_token: - raise ConfigFail( - "Unable to obtain keystone token for %s user. " - "Please ensure keystone configuration is correct." % - auth_user) - - # Verify that region two endpoints & services for shared services - # match our requirements, optionally creating missing entries - for endpoint in expected_region_1_endpoints: - service_name = region_config.get('SHARED_SERVICES', - endpoint[SERVICE_NAME]) - service_type = region_config.get('SHARED_SERVICES', - endpoint[SERVICE_TYPE]) - - try: - service_id = services.get_service_id(service_name, service_type) - except KeystoneFail as ex: - # No option to create services for region one, if those are not - # present, something is seriously wrong - raise ex - - # Extract region one url information from the existing endpoint entry: - try: - endpoints.get_service_url( - region_1_name, service_id, "public") - endpoints.get_service_url( - region_1_name, service_id, "internal") - endpoints.get_service_url( - region_1_name, service_id, "admin") - except KeystoneFail as ex: - # Fail since shared services endpoints are not found - raise ConfigFail("Endpoint for shared service %s " - "is not configured" % service_name) - - # Verify that region two endpoints & services match our requirements, - # optionally creating missing entries - public_address = utils.get_optional(region_config, 'CAN_NETWORK', - 'CAN_IP_START_ADDRESS') - if not public_address: - public_address = utils.get_optional(region_config, 'CAN_NETWORK', - 'CAN_IP_FLOATING_ADDRESS') - if not public_address: - public_address = utils.get_optional(region_config, 'OAM_NETWORK', - 'IP_START_ADDRESS') - if not public_address: - # AIO-SX configuration - public_address = utils.get_optional(region_config, 'OAM_NETWORK', - 'IP_ADDRESS') - if not public_address: - public_address = region_config.get('OAM_NETWORK', - 'IP_FLOATING_ADDRESS') - - if region_config.has_section('CLM_NETWORK'): - internal_address = region_config.get('CLM_NETWORK', - 'CLM_IP_START_ADDRESS') - else: - internal_address = region_config.get('MGMT_NETWORK', - 'IP_START_ADDRESS') - - for endpoint in expected_region_2_endpoints: - service_name = utils.get_service(region_config, 'REGION_2_SERVICES', - endpoint[SERVICE_NAME]) - service_type = utils.get_service(region_config, 'REGION_2_SERVICES', - endpoint[SERVICE_TYPE]) - service_id = services.get_service_id(service_name, service_type) - - expected_public_url = endpoint[PUBLIC_URL].format(public_address) - - expected_internal_url = endpoint[INTERNAL_URL].format(internal_address) - expected_admin_url = endpoint[ADMIN_URL].format(internal_address) - - try: - public_url = endpoints.get_service_url(region_2_name, service_id, - "public") - internal_url = endpoints.get_service_url(region_2_name, service_id, - "internal") - admin_url = endpoints.get_service_url(region_2_name, service_id, - "admin") - except KeystoneFail as ex: - # The endpoint will be created optionally - if not create: - raise ConfigFail("Keystone configuration error: Unable to " - "find endpoints for service %s" - % service_name) - continue - - # Validate the existing endpoints - for endpointtype, found, expected in [ - ('public', public_url, expected_public_url), - ('internal', internal_url, expected_internal_url), - ('admin', admin_url, expected_admin_url)]: - if found != expected: - raise ConfigFail( - "Keystone configuration error for:\nregion ({}), " - "service name ({}), service type ({})\n" - "expected {}: {}\nconfigured {}: {}".format( - region_2_name, service_name, service_type, - endpointtype, expected, endpointtype, found)) - - -def validate_region_one_ldap_config(region_config): - """Validate ldap on region one by a ldap search""" - - ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL') - cmd = ["ldapsearch", "-xH", ldapserver_uri, - "-b", "dc=cgcs,dc=local", "(objectclass=*)"] - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(cmd, stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - raise ConfigFail("LDAP configuration error: not accessible") - - -def set_subcloud_config_defaults(region_config): - """Set defaults in region_config for subclouds""" - - # We always create endpoints for subclouds - region_config.set('REGION_2_SERVICES', 'CREATE', 'Y') - - # We use the default service project - region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME', - constants.DEFAULT_SERVICE_PROJECT_NAME) - - # Add the necessary users to the region config, which will allow the - # validation code to run and will later result in services being - # configured to use the users from the system controller. - expected_users = EXPECTED_USERS - - for user in expected_users: - # Add the user to the region config so to allow validation. - region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME', - user[USER_NAME]) - - -def configure_region(config_file, config_type=REGION_CONFIG): - """Configure the region""" - - # Parse the region/subcloud config file - print("Parsing configuration file... ", end=' ') - region_config = parse_system_config(config_file) - print("DONE") - - if config_type == SUBCLOUD_CONFIG: - # Set defaults in region_config for subclouds - set_subcloud_config_defaults(region_config) - - # Validate the region/subcloud config file - print("Validating configuration file... ", end=' ') - try: - create_cgcs_config_file(None, region_config, None, None, None, - config_type=config_type, - validate_only=True) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Bring up management interface to allow us to reach Region 1 - print("Configuring management interface... ", end=' ') - configure_management_interface(region_config, config_type=config_type) - print("DONE") - - # Get token from keystone - print("Retrieving keystone token...", end=' ') - sys.stdout.flush() - auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') - if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'): - auth_project = region_config.get('SHARED_SERVICES', - 'ADMIN_TENANT_NAME') - else: - auth_project = region_config.get('SHARED_SERVICES', - 'ADMIN_PROJECT_NAME') - auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME') - auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD') - if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'): - admin_user_domain = region_config.get('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN') - else: - admin_user_domain = DEFAULT_DOMAIN_NAME - if region_config.has_option('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN'): - admin_project_domain = region_config.get('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN') - else: - admin_project_domain = DEFAULT_DOMAIN_NAME - - attempts = 0 - token = None - # Wait for connectivity to region one. It can take some time, especially if - # we have LAG on the management network. - while not token: - token = rutils.get_token(auth_url, auth_project, auth_user, - auth_password, admin_user_domain, - admin_project_domain) - if not token: - attempts += 1 - if attempts < 10: - print("\rRetrieving keystone token...{}".format( - '.' * attempts), end=' ') - sys.stdout.flush() - time.sleep(10) - else: - raise ConfigFail( - "Unable to obtain keystone token. Please ensure " - "networking and keystone configuration is correct.") - print("DONE") - - # Get services, endpoints, users and domains from keystone - print("Retrieving services, endpoints and users from keystone... ", - end=' ') - region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') - service_name = region_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - service_type = region_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - - api_url = token.get_service_url( - region_name, service_name, service_type, "admin").replace( - 'v2.0', 'v3') - - services = rutils.get_services(token, api_url) - endpoints = rutils.get_endpoints(token, api_url) - users = rutils.get_users(token, api_url) - domains = rutils.get_domains(token, api_url) - if not services or not endpoints or not users: - raise ConfigFail( - "Unable to retrieve services, endpoints or users from keystone. " - "Please ensure networking and keystone configuration is correct.") - print("DONE") - - user_config = None - if config_type == SUBCLOUD_CONFIG: - # Retrieve subcloud configuration from dcmanager - print("Retrieving configuration from dcmanager... ", end=' ') - dcmanager_url = token.get_service_url( - 'SystemController', 'dcmanager', 'dcmanager', "admin") - subcloud_name = region_config.get('REGION_2_SERVICES', - 'REGION_NAME') - subcloud_management_subnet = region_config.get('MGMT_NETWORK', - 'CIDR') - hash_string = subcloud_name + subcloud_management_subnet - subcloud_config = rutils.get_subcloud_config(token, dcmanager_url, - subcloud_name, - hash_string) - user_config = subcloud_config['users'] - print("DONE") - - try: - # Configure missing region one keystone entries - create = True - # Prepare region configuration for puppet to create keystone identities - if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and - region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - print("Preparing keystone configuration... ", end=' ') - # If keystone configuration for this region already in place, - # validate it only - else: - # Validate region one keystone config - create = False - print("Validating keystone configuration... ", end=' ') - - validate_region_one_keystone_config(region_config, token, api_url, - users, services, endpoints, create, - config_type=config_type, - user_config=user_config) - print("DONE") - - # validate ldap if it is shared - if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'): - print("Validating ldap configuration... ", end=' ') - validate_region_one_ldap_config(region_config) - print("DONE") - - # Create cgcs_config file - print("Creating config apply file... ", end=' ') - try: - create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config, - services, endpoints, domains, - config_type=config_type) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Configure controller - assistant = ConfigAssistant() - assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False) - - except ConfigFail as e: - print("A configuration failure has occurred.", end=' ') - raise e - - -def show_help_region(): - print("Usage: %s [OPTIONS] " % sys.argv[0]) - print(textwrap.fill( - "Perform region configuration using the region " - "configuration from CONFIG_FILE.", 80)) - print("--allow-ssh Allow configuration to be executed in " - "ssh\n") - - -def show_help_subcloud(): - print("Usage: %s [OPTIONS] " % sys.argv[0]) - print(textwrap.fill( - "Perform subcloud configuration using the subcloud " - "configuration from CONFIG_FILE.", 80)) - print("--allow-ssh Allow configuration to be executed in " - "ssh\n") - - -def config_main(config_type=REGION_CONFIG): - allow_ssh = False - if config_type == REGION_CONFIG: - config_file = "/home/sysadmin/region_config" - elif config_type == SUBCLOUD_CONFIG: - config_file = "/home/sysadmin/subcloud_config" - else: - raise ConfigFail("Invalid config_type: %s" % config_type) - - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] in ['--help', '-h', '-?']: - if config_type == REGION_CONFIG: - show_help_region() - else: - show_help_subcloud() - exit(1) - elif sys.argv[arg] == "--allow-ssh": - allow_ssh = True - elif arg == len(sys.argv) - 1: - config_file = sys.argv[arg] - else: - print("Invalid option. Use --help for more information.") - exit(1) - arg += 1 - - log.configure() - - # Check if that the command is being run from the console - if utils.is_ssh_parent(): - if allow_ssh: - print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) - print('') - else: - print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) - exit(1) - - if not os.path.isfile(config_file): - print("Config file %s does not exist." % config_file) - exit(1) - - try: - configure_region(config_file, config_type=config_type) - except KeyboardInterrupt: - print("\nAborting configuration") - except ConfigFail as e: - LOG.exception(e) - print("\nConfiguration failed: {}".format(e)) - except Exception as e: - LOG.exception(e) - print("\nConfiguration failed: {}".format(e)) - else: - print("\nConfiguration finished successfully.") - finally: - if os.path.isfile(TEMP_CGCS_CONFIG_FILE): - os.remove(TEMP_CGCS_CONFIG_FILE) - - -def region_main(): - config_main(REGION_CONFIG) - - -def subcloud_main(): - config_main(SUBCLOUD_CONFIG) diff --git a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py b/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py deleted file mode 100644 index dd520c5b92..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py +++ /dev/null @@ -1,579 +0,0 @@ -# -# Copyright (c) 2014-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -System Inventory Interactions -""" - -import json -import openstack - -from six.moves.urllib import request as urlrequest -from six.moves.urllib.error import URLError -from six.moves.urllib.error import HTTPError - -from controllerconfig.common import log -from controllerconfig.common.exceptions import KeystoneFail - - -LOG = log.get_logger(__name__) - -API_VERSION = 1 - -# Host Personality Constants -HOST_PERSONALITY_NOT_SET = "" -HOST_PERSONALITY_UNKNOWN = "unknown" -HOST_PERSONALITY_CONTROLLER = "controller" -HOST_PERSONALITY_WORKER = "worker" -HOST_PERSONALITY_STORAGE = "storage" - -# Host Administrative State Constants -HOST_ADMIN_STATE_NOT_SET = "" -HOST_ADMIN_STATE_UNKNOWN = "unknown" -HOST_ADMIN_STATE_LOCKED = "locked" -HOST_ADMIN_STATE_UNLOCKED = "unlocked" - -# Host Operational State Constants -HOST_OPERATIONAL_STATE_NOT_SET = "" -HOST_OPERATIONAL_STATE_UNKNOWN = "unknown" -HOST_OPERATIONAL_STATE_ENABLED = "enabled" -HOST_OPERATIONAL_STATE_DISABLED = "disabled" - -# Host Availability State Constants -HOST_AVAIL_STATE_NOT_SET = "" -HOST_AVAIL_STATE_UNKNOWN = "unknown" -HOST_AVAIL_STATE_AVAILABLE = "available" -HOST_AVAIL_STATE_ONLINE = "online" -HOST_AVAIL_STATE_OFFLINE = "offline" -HOST_AVAIL_STATE_POWERED_OFF = "powered-off" -HOST_AVAIL_STATE_POWERED_ON = "powered-on" - -# Host Board Management Constants -HOST_BM_TYPE_NOT_SET = "" -HOST_BM_TYPE_UNKNOWN = "unknown" -HOST_BM_TYPE_ILO3 = 'ilo3' -HOST_BM_TYPE_ILO4 = 'ilo4' - -# Host invprovision state -HOST_PROVISIONING = "provisioning" -HOST_PROVISIONED = "provisioned" - - -class Host(object): - def __init__(self, hostname, host_data=None): - self.name = hostname - self.personality = HOST_PERSONALITY_NOT_SET - self.admin_state = HOST_ADMIN_STATE_NOT_SET - self.operational_state = HOST_OPERATIONAL_STATE_NOT_SET - self.avail_status = [] - self.bm_type = HOST_BM_TYPE_NOT_SET - self.uuid = None - self.config_status = None - self.invprovision = None - self.boot_device = None - self.rootfs_device = None - self.console = None - self.tboot = None - - if host_data is not None: - self.__host_set_state__(host_data) - - def __host_set_state__(self, host_data): - if host_data is None: - self.admin_state = HOST_ADMIN_STATE_UNKNOWN - self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN - self.avail_status = [] - self.bm_type = HOST_BM_TYPE_NOT_SET - - # Set personality - if host_data['personality'] == "controller": - self.personality = HOST_PERSONALITY_CONTROLLER - elif host_data['personality'] == "worker": - self.personality = HOST_PERSONALITY_WORKER - elif host_data['personality'] == "storage": - self.personality = HOST_PERSONALITY_STORAGE - else: - self.personality = HOST_PERSONALITY_UNKNOWN - - # Set administrative state - if host_data['administrative'] == "locked": - self.admin_state = HOST_ADMIN_STATE_LOCKED - elif host_data['administrative'] == "unlocked": - self.admin_state = HOST_ADMIN_STATE_UNLOCKED - else: - self.admin_state = HOST_ADMIN_STATE_UNKNOWN - - # Set operational state - if host_data['operational'] == "enabled": - self.operational_state = HOST_OPERATIONAL_STATE_ENABLED - elif host_data['operational'] == "disabled": - self.operational_state = HOST_OPERATIONAL_STATE_DISABLED - else: - self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN - - # Set availability status - self.avail_status[:] = [] - if host_data['availability'] == "available": - self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE) - elif host_data['availability'] == "online": - self.avail_status.append(HOST_AVAIL_STATE_ONLINE) - elif host_data['availability'] == "offline": - self.avail_status.append(HOST_AVAIL_STATE_OFFLINE) - elif host_data['availability'] == "power-on": - self.avail_status.append(HOST_AVAIL_STATE_POWERED_ON) - elif host_data['availability'] == "power-off": - self.avail_status.append(HOST_AVAIL_STATE_POWERED_OFF) - else: - self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE) - - # Set board management type - if host_data['bm_type'] is None: - self.bm_type = HOST_BM_TYPE_NOT_SET - elif host_data['bm_type'] == 'ilo3': - self.bm_type = HOST_BM_TYPE_ILO3 - elif host_data['bm_type'] == 'ilo4': - self.bm_type = HOST_BM_TYPE_ILO4 - else: - self.bm_type = HOST_BM_TYPE_UNKNOWN - - if host_data['invprovision'] == 'provisioned': - self.invprovision = HOST_PROVISIONED - else: - self.invprovision = HOST_PROVISIONING - - self.uuid = host_data['uuid'] - self.config_status = host_data['config_status'] - self.boot_device = host_data['boot_device'] - self.rootfs_device = host_data['rootfs_device'] - self.console = host_data['console'] - self.tboot = host_data['tboot'] - - def __host_update__(self, admin_token, region_name): - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" + self.name - - request_info = urlrequest.Request(url) - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Accept", "application/json") - - request = urlrequest.urlopen(request_info) - response = json.loads(request.read()) - request.close() - return response - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return None - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return None - - except URLError as e: - LOG.error(e) - return None - - def __host_action__(self, admin_token, action, region_name): - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" + self.name - - request_info = urlrequest.Request(url) - request_info.get_method = lambda: 'PATCH' - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Content-type", "application/json") - request_info.add_header("Accept", "application/json") - request_info.add_data(action) - - request = urlrequest.urlopen(request_info) - request.close() - return True - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return False - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return False - - except URLError as e: - LOG.error(e) - return False - - def is_unlocked(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED) - - def is_locked(self): - return(not self.is_unlocked()) - - def is_enabled(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and - self.operational_state == HOST_OPERATIONAL_STATE_ENABLED) - - def is_controller_enabled_provisioned(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and - self.operational_state == HOST_OPERATIONAL_STATE_ENABLED and - self.personality == HOST_PERSONALITY_CONTROLLER and - self.invprovision == HOST_PROVISIONED) - - def is_disabled(self): - return(not self.is_enabled()) - - def support_power_off(self): - return(HOST_BM_TYPE_NOT_SET != self.bm_type) - - def is_powered_off(self): - for status in self.avail_status: - if status == HOST_AVAIL_STATE_POWERED_OFF: - return(self.admin_state == HOST_ADMIN_STATE_LOCKED and - self.operational_state == - HOST_OPERATIONAL_STATE_DISABLED) - return False - - def is_powered_on(self): - return not self.is_powered_off() - - def refresh_data(self, admin_token, region_name): - """ Ask the System Inventory for an update view of the host """ - - host_data = self.__host_update__(admin_token, region_name) - self.__host_set_state__(host_data) - - def lock(self, admin_token, region_name): - """ Asks the Platform to perform a lock against a host """ - - if self.is_unlocked(): - action = json.dumps([{"path": "/action", - "value": "lock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def force_lock(self, admin_token, region_name): - """ Asks the Platform to perform a force lock against a host """ - - if self.is_unlocked(): - action = json.dumps([{"path": "/action", - "value": "force-lock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def unlock(self, admin_token, region_name): - """ Asks the Platform to perform an ulock against a host """ - - if self.is_locked(): - action = json.dumps([{"path": "/action", - "value": "unlock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def power_off(self, admin_token, region_name): - """ Asks the Platform to perform a power-off against a host """ - - if self.is_powered_on(): - action = json.dumps([{"path": "/action", - "value": "power-off", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def power_on(self, admin_token, region_name): - """ Asks the Platform to perform a power-on against a host """ - - if self.is_powered_off(): - action = json.dumps([{"path": "/action", - "value": "power-on", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - -def get_hosts(admin_token, region_name, personality=None, - exclude_hostnames=None): - """ Asks System Inventory for a list of hosts """ - - if exclude_hostnames is None: - exclude_hostnames = [] - - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" - - request_info = urlrequest.Request(url) - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Accept", "application/json") - - request = urlrequest.urlopen(request_info) - response = json.loads(request.read()) - request.close() - - host_list = [] - if personality is None: - for host in response['ihosts']: - if host['hostname'] not in exclude_hostnames: - host_list.append(Host(host['hostname'], host)) - else: - for host in response['ihosts']: - if host['hostname'] not in exclude_hostnames: - if (host['personality'] == "controller" and - personality == HOST_PERSONALITY_CONTROLLER): - host_list.append(Host(host['hostname'], host)) - - elif (host['personality'] == "worker" and - personality == HOST_PERSONALITY_WORKER): - host_list.append(Host(host['hostname'], host)) - - elif (host['personality'] == "storage" and - personality == HOST_PERSONALITY_STORAGE): - host_list.append(Host(host['hostname'], host)) - - return host_list - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return [] - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return [] - - except URLError as e: - LOG.error(e) - return [] - - -def dict_to_patch(values, install_action=False): - # install default action - if install_action: - values.update({'action': 'install'}) - patch = [] - for key, value in values.items(): - path = '/' + key - patch.append({'op': 'replace', 'path': path, 'value': value}) - return patch - - -def get_shared_services(): - try: - services = "" - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if systems: - services = systems[0].capabilities.get("shared_services", "") - except Exception as e: - LOG.exception("failed to get shared services") - raise e - - return services - - -def get_alarms(): - """ get all alarms """ - alarm_list = [] - try: - with openstack.OpenStack() as client: - alarm_list = client.sysinv.ialarm.list() - except Exception as e: - LOG.exception("failed to get alarms") - raise e - return alarm_list - - -def controller_enabled_provisioned(hostname): - """ check if host is enabled """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if (hostname == host.name and - host.is_controller_enabled_provisioned()): - LOG.info("host %s is enabled/provisioned" % host.name) - return True - except Exception as e: - LOG.exception("failed to check if host is enabled/provisioned") - raise e - return False - - -def get_system_uuid(): - """ get system uuid """ - try: - sysuuid = "" - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if systems: - sysuuid = systems[0].uuid - except Exception as e: - LOG.exception("failed to get system uuid") - raise e - return sysuuid - - -def get_oam_ip(): - """ get OAM ip details """ - try: - with openstack.OpenStack() as client: - oam_list = client.sysinv.iextoam.list() - if oam_list: - return oam_list[0] - except Exception as e: - LOG.exception("failed to get OAM IP") - raise e - return None - - -def get_mac_addresses(hostname): - """ get MAC addresses for the host """ - macs = {} - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - port_list = client.sysinv.ethernet_port.list(host.uuid) - macs = {port.name: port.mac for port in port_list} - except Exception as e: - LOG.exception("failed to get MAC addresses") - raise e - return macs - - -def get_disk_serial_ids(hostname): - """ get disk serial ids for the host """ - disk_serial_ids = {} - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - disk_list = client.sysinv.idisk.list(host.uuid) - disk_serial_ids = { - disk.device_node: disk.serial_id for disk in disk_list} - except Exception as e: - LOG.exception("failed to get disks") - raise e - return disk_serial_ids - - -def update_clone_system(descr, hostname): - """ update system parameters on clone installation """ - try: - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if not systems: - return False - values = { - 'name': "Cloned_system", - 'description': descr - } - patch = dict_to_patch(values) - LOG.info("Updating system: {} [{}]".format(systems[0].name, patch)) - client.sysinv.isystem.update(systems[0].uuid, patch) - - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - values = { - 'location': {}, - 'serialid': "" - } - patch = dict_to_patch(values) - client.sysinv.ihost.update(host.uuid, patch) - LOG.info("Updating host: {} [{}]".format(host, patch)) - except Exception as e: - LOG.exception("failed to update system parameters") - raise e - return True - - -def get_config_status(hostname): - """ get config status of the host """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - return host.config_status - except Exception as e: - LOG.exception("failed to get config status") - raise e - return None - - -def get_host_data(hostname): - """ get data for the specified host """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - return host - except Exception as e: - LOG.exception("failed to get host data") - raise e - return None - - -def do_worker_config_complete(hostname): - """ enable worker functionality """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - # Create/apply worker manifests - values = { - 'action': "subfunction_config" - } - patch = dict_to_patch(values) - LOG.info("Applying worker manifests: {} [{}]" - .format(host, patch)) - client.sysinv.ihost.update(host.uuid, patch) - except Exception as e: - LOG.exception("worker_config_complete failed") - raise e - - -def get_storage_backend_services(): - """ get all storage backends and their assigned services """ - backend_service_dict = {} - try: - with openstack.OpenStack() as client: - backend_list = client.sysinv.storage_backend.list() - for backend in backend_list: - backend_service_dict.update( - {backend.backend: backend.services}) - - except Exception as e: - LOG.exception("failed to get storage backend services") - raise e - - return backend_service_dict diff --git a/controllerconfig/controllerconfig/controllerconfig/systemconfig.py b/controllerconfig/controllerconfig/controllerconfig/systemconfig.py deleted file mode 100644 index 801b02d66e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/systemconfig.py +++ /dev/null @@ -1,499 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import os -import readline -import sys -import textwrap - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common.exceptions import BackupFail -from controllerconfig.common.exceptions import RestoreFail -from controllerconfig.common.exceptions import UserQuit -from controllerconfig.common.exceptions import CloneFail -from controllerconfig import lag_mode_to_str -from controllerconfig import Network -from controllerconfig import validate -from controllerconfig import ConfigFail -from controllerconfig import DEFAULT_CONFIG -from controllerconfig import REGION_CONFIG -from controllerconfig import SUBCLOUD_CONFIG -from controllerconfig import MGMT_TYPE -from controllerconfig import HP_NAMES -from controllerconfig import DEFAULT_NAMES -from controllerconfig.configassistant import ConfigAssistant -from controllerconfig import backup_restore -from controllerconfig import utils -from controllerconfig import clone - -# Temporary file for building cgcs_config -TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config" - -LOG = log.get_logger(__name__) - - -def parse_system_config(config_file): - """Parse system config file""" - system_config = configparser.RawConfigParser() - try: - system_config.read(config_file) - except Exception as e: - LOG.exception(e) - raise ConfigFail("Error parsing system config file") - - # Dump configuration for debugging - # for section in config.sections(): - # print "Section: %s" % section - # for (name, value) in config.items(section): - # print "name: %s, value: %s" % (name, value) - return system_config - - -def configure_management_interface(region_config, config_type=REGION_CONFIG): - """Bring up management interface - """ - mgmt_network = Network() - if region_config.has_section('CLM_NETWORK'): - naming_type = HP_NAMES - else: - naming_type = DEFAULT_NAMES - - if config_type == SUBCLOUD_CONFIG: - min_addresses = 5 - else: - min_addresses = 8 - try: - mgmt_network.parse_config(region_config, config_type, MGMT_TYPE, - min_addresses=min_addresses, - naming_type=naming_type) - except ConfigFail: - raise - except Exception as e: - LOG.exception("Error parsing configuration file") - raise ConfigFail("Error parsing configuration file: %s" % e) - - try: - # Remove interface config files currently installed - utils.remove_interface_config_files() - - # Create the management interface configuration files. - # Code based on ConfigAssistant._write_interface_config_management - parameters = utils.get_interface_config_static( - mgmt_network.start_address, - mgmt_network.cidr, - mgmt_network.gateway_address) - - if mgmt_network.logical_interface.lag_interface: - management_interface = 'bond0' - else: - management_interface = mgmt_network.logical_interface.ports[0] - - if mgmt_network.vlan: - management_interface_name = "%s.%s" % (management_interface, - mgmt_network.vlan) - utils.write_interface_config_vlan( - management_interface_name, - mgmt_network.logical_interface.mtu, - parameters) - - # underlying interface has no additional parameters - parameters = None - else: - management_interface_name = management_interface - - if mgmt_network.logical_interface.lag_interface: - utils.write_interface_config_bond( - management_interface, - mgmt_network.logical_interface.mtu, - lag_mode_to_str(mgmt_network.logical_interface.lag_mode), - None, - constants.LAG_MIIMON_FREQUENCY, - mgmt_network.logical_interface.ports[0], - mgmt_network.logical_interface.ports[1], - parameters) - else: - utils.write_interface_config_ethernet( - management_interface, - mgmt_network.logical_interface.mtu, - parameters) - - # Restart networking with the new management interface configuration - utils.restart_networking() - - # Send a GARP for floating address. Doing this to help in - # cases where we are re-installing in a lab and another node - # previously held the floating address. - if mgmt_network.cidr.version == 4: - utils.send_interface_garp(management_interface_name, - mgmt_network.start_address) - except Exception: - LOG.exception("Failed to configure management interface") - raise ConfigFail("Failed to configure management interface") - - -def create_cgcs_config_file(output_file, system_config, - services, endpoints, domains, - config_type=REGION_CONFIG, validate_only=False): - """ - Create cgcs_config file or just perform validation of the system_config if - validate_only=True. - :param output_file: filename of output cgcs_config file - :param system_config: system configuration - :param services: keystone services (not used if validate_only) - :param endpoints: keystone endpoints (not used if validate_only) - :param domains: keystone domains (not used if validate_only) - :param config_type: specify region, subcloud or standard config - :param validate_only: used to validate the input system_config - :return: - """ - cgcs_config = None - if not validate_only: - cgcs_config = configparser.RawConfigParser() - cgcs_config.optionxform = str - - # general error checking, if not validate_only cgcs config data is returned - validate(system_config, config_type, cgcs_config) - - # Region configuration: services, endpoints and domain - if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only: - # The services and endpoints are not available in the validation phase - region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME') - keystone_service_name = system_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - keystone_service_type = system_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - keystone_service_id = services.get_service_id(keystone_service_name, - keystone_service_type) - keystone_admin_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "admin") - keystone_internal_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "internal") - keystone_public_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "public") - - cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url) - cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url) - cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url) - cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI', - keystone_internal_url) - cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url) - - # if ldap is a shared service - if (system_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL')): - ldap_service_url = system_config.get('SHARED_SERVICES', - 'LDAP_SERVICE_URL') - cgcs_config.set('cREGION', 'LDAP_SERVICE_URI', ldap_service_url) - cgcs_config.set('cREGION', 'LDAP_SERVICE_NAME', 'open-ldap') - cgcs_config.set('cREGION', 'LDAP_REGION_NAME', region_1_name) - - # If primary region is non-TiC and keystone entries already created, - # the flag will tell puppet not to create them. - if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and - system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True') - - # System Timezone configuration - if system_config.has_option('SYSTEM', 'TIMEZONE'): - timezone = system_config.get('SYSTEM', 'TIMEZONE') - if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): - raise ConfigFail( - "Timezone file %s does not exist" % timezone) - - # Dump results for debugging - # for section in cgcs_config.sections(): - # print "[%s]" % section - # for (name, value) in cgcs_config.items(section): - # print "%s=%s" % (name, value) - - if not validate_only: - # Write config file - with open(output_file, 'w') as config_file: - cgcs_config.write(config_file) - - -def configure_system(config_file): - """Configure the system""" - - # Parse the system config file - print("Parsing system configuration file... ", end=' ') - system_config = parse_system_config(config_file) - print("DONE") - - # Validate the system config file - print("Validating system configuration file... ", end=' ') - try: - create_cgcs_config_file(None, system_config, None, None, None, - DEFAULT_CONFIG, validate_only=True) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Create cgcs_config file - print("Creating config apply file... ", end=' ') - try: - create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config, - None, None, None, DEFAULT_CONFIG) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - -def show_help(): - print("Usage: %s\n" - "--backup Backup configuration using the given " - "name\n" - "--clone-iso Clone and create an image with " - "the given file name\n" - "--clone-status Status of the last installation of " - "cloned image\n" - "--restore-system " - " " - "\n" - " Restore system configuration from backup " - "file with\n" - " the given name, full path required\n" - % sys.argv[0]) - - -def show_help_lab_only(): - print("Usage: %s\n" - "Perform initial configuration\n" - "\nThe following options are for lab use only:\n" - "--answerfile Apply the configuration from the specified " - "file without\n" - " any validation or user interaction\n" - "--default Apply default configuration with no NTP or " - "DNS server\n" - " configuration (suitable for testing in a " - "virtual\n" - " environment)\n" - "--archive-dir Directory to store the archive in\n" - "--provision Provision initial system data only\n" - % sys.argv[0]) - - -def no_complete(text, state): - return - - -def main(): - options = {} - answerfile = None - backup_name = None - archive_dir = constants.BACKUPS_PATH - do_default_config = False - do_backup = False - do_system_restore = False - include_storage_reinstall = False - do_clone = False - do_non_interactive = False - do_provision = False - system_config_file = "/home/sysadmin/system_config" - allow_ssh = False - - # Disable completion as the default completer shows python commands - readline.set_completer(no_complete) - - # remove any previous config fail flag file - if os.path.exists(constants.CONFIG_FAIL_FILE) is True: - os.remove(constants.CONFIG_FAIL_FILE) - - if os.environ.get('CGCS_LABMODE'): - options['labmode'] = True - - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] == "--answerfile": - arg += 1 - if arg < len(sys.argv): - answerfile = sys.argv[arg] - else: - print("--answerfile option requires a file to be specified") - exit(1) - elif sys.argv[arg] == "--backup": - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print("--backup requires the name of the backup") - exit(1) - do_backup = True - elif sys.argv[arg] == "--restore-system": - arg += 1 - if arg < len(sys.argv): - if sys.argv[arg] in ["include-storage-reinstall", - "exclude-storage-reinstall"]: - if sys.argv[arg] == "include-storage-reinstall": - include_storage_reinstall = True - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print(textwrap.fill( - "--restore-system requires the filename " - " of the backup", 80)) - exit(1) - else: - backup_name = sys.argv[arg] - else: - print(textwrap.fill( - "--restore-system requires the filename " - "of the backup", 80)) - exit(1) - do_system_restore = True - elif sys.argv[arg] == "--archive-dir": - arg += 1 - if arg < len(sys.argv): - archive_dir = sys.argv[arg] - else: - print("--archive-dir requires a directory") - exit(1) - elif sys.argv[arg] == "--clone-iso": - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print("--clone-iso requires the name of the image") - exit(1) - do_clone = True - elif sys.argv[arg] == "--clone-status": - clone.clone_status() - exit(0) - elif sys.argv[arg] == "--default": - do_default_config = True - elif sys.argv[arg] == "--config-file": - arg += 1 - if arg < len(sys.argv): - system_config_file = sys.argv[arg] - else: - print("--config-file requires the filename of the config file") - exit(1) - do_non_interactive = True - elif sys.argv[arg] in ["--help", "-h", "-?"]: - show_help() - exit(1) - elif sys.argv[arg] == "--labhelp": - show_help_lab_only() - exit(1) - elif sys.argv[arg] == "--provision": - do_provision = True - elif sys.argv[arg] == "--allow-ssh": - allow_ssh = True - elif sys.argv[arg] == "--kubernetes": - # This is a temporary flag for use during development. Once things - # are stable, we will remove it and make kubernetes the default. - options['kubernetes'] = True - else: - print("Invalid option. Use --help for more information.") - exit(1) - arg += 1 - - if [do_backup, - do_system_restore, - do_clone, - do_default_config, - do_non_interactive].count(True) > 1: - print("Invalid combination of options selected") - exit(1) - - if answerfile and [do_backup, - do_system_restore, - do_clone, - do_default_config, - do_non_interactive].count(True) > 0: - print("The --answerfile option cannot be used with the selected " - "option") - exit(1) - - log.configure() - - if not do_backup and not do_clone: - # Check if that the command is being run from the console - if utils.is_ssh_parent(): - if allow_ssh: - print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) - print('') - else: - print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) - exit(1) - - # Reduce the printk console log level to avoid noise during configuration - printk_levels = '' - with open('/proc/sys/kernel/printk', 'r') as f: - printk_levels = f.readline() - - temp_printk_levels = '3' + printk_levels[1:] - with open('/proc/sys/kernel/printk', 'w') as f: - f.write(temp_printk_levels) - - try: - if do_backup: - backup_restore.backup(backup_name, archive_dir) - print("\nBackup complete") - elif do_system_restore: - backup_restore.restore_system(backup_name, - include_storage_reinstall) - print("\nSystem restore complete") - elif do_clone: - clone.clone(backup_name, archive_dir) - print("\nCloning complete") - elif do_provision: - assistant = ConfigAssistant(**options) - assistant.provision(answerfile) - else: - print(textwrap.fill( - "Please use bootstrap playbook to configure the " - "first controller.", 80)) - exit(1) - - if do_non_interactive: - if not os.path.isfile(system_config_file): - raise ConfigFail("Config file %s does not exist." % - system_config_file) - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(constants.CONFIG_PERMDIR) or - os.path.exists( - constants.INITIAL_CONFIG_COMPLETE_FILE)): - raise ConfigFail("Configuration has already been done " - "and cannot be repeated.") - configure_system(system_config_file) - answerfile = TEMP_CGCS_CONFIG_FILE - assistant = ConfigAssistant(**options) - assistant.configure(answerfile, do_default_config) - print("\nConfiguration was applied\n") - print(textwrap.fill( - "Please complete any out of service commissioning steps " - "with system commands and unlock controller to proceed.", 80)) - assistant.check_required_interfaces_status() - - except KeyboardInterrupt: - print("\nAborting configuration") - except BackupFail as e: - print("\nBackup failed: {}".format(e)) - except RestoreFail as e: - print("\nRestore failed: {}".format(e)) - except ConfigFail as e: - print("\nConfiguration failed: {}".format(e)) - except CloneFail as e: - print("\nCloning failed: {}".format(e)) - except UserQuit: - print("\nAborted configuration") - finally: - if os.path.isfile(TEMP_CGCS_CONFIG_FILE): - os.remove(TEMP_CGCS_CONFIG_FILE) - - # Restore the printk console log level - with open('/proc/sys/kernel/printk', 'w') as f: - f.write(printk_levels) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/__init__.py b/controllerconfig/controllerconfig/controllerconfig/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly deleted file mode 100755 index 547856f6ba..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly +++ /dev/null @@ -1,78 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_3] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth2 - -[MGMT_NETWORK] -VLAN=121 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 -DYNAMIC_ALLOCATION=N - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.99 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=admin_domain -ADMIN_PROJECT_DOMAIN=admin_domain -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=FULL_TEST - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -USER_DOMAIN_NAME=service_domain -PROJECT_DOMAIN_NAME=service_domain - -SYSINV_USER_NAME=sysinvTWO -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patchingTWO -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vimTWO -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtceTWO -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fmTWO -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result deleted file mode 100755 index b9624b18d5..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result +++ /dev/null @@ -1,78 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth0 -MANAGEMENT_VLAN = 121 -MANAGEMENT_INTERFACE_NAME = eth0.121 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth1 -EXTERNAL_OAM_INTERFACE_NAME = eth1 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = admin_domain -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = admin_domain -SERVICE_PROJECT_NAME = FULL_TEST -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patchingTWO -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinvTWO -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vimTWO -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtceTWO -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fmTWO -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = service_domain -PROJECT_DOMAIN_NAME = service_domain -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall deleted file mode 100755 index 8c24d037df..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_3] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth2 - -[MGMT_NETWORK] -VLAN=121 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 -DYNAMIC_ALLOCATION=N - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.99 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=FULL_TEST - -LDAP_SERVICE_URL=ldap://192.168.204.12:389 - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinvTWO -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patchingTWO -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vimTWO -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtceTWO -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fmTWO -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result deleted file mode 100755 index 17c6982bd8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result +++ /dev/null @@ -1,81 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth0 -MANAGEMENT_VLAN = 121 -MANAGEMENT_INTERFACE_NAME = eth0.121 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth1 -EXTERNAL_OAM_INTERFACE_NAME = eth1 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = FULL_TEST -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patchingTWO -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinvTWO -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vimTWO -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtceTWO -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fmTWO -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 -LDAP_SERVICE_URI = ldap://192.168.204.12:389 -LDAP_SERVICE_NAME = open-ldap -LDAP_REGION_NAME = RegionOne - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem b/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem deleted file mode 100644 index d2ef173b37..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem +++ /dev/null @@ -1 +0,0 @@ -# Dummy certificate file diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph deleted file mode 100755 index b4b3a1249f..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.7 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.8 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default deleted file mode 100755 index c071de26ea..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.5 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 deleted file mode 100755 index 97b357ba70..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=1234::/64 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=1234::2 -CONTROLLER_0_ADDRESS=1234::3 -CONTROLLER_1_ADDRESS=1234::4 -NFS_MANAGEMENT_ADDRESS_1=1234::5 -NFS_MANAGEMENT_ADDRESS_2=1234::6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=ff08::1:1:0/124 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=abcd::/64 -EXTERNAL_OAM_GATEWAY_ADDRESS=abcd::1 -EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2 -EXTERNAL_OAM_0_ADDRESS=abcd::3 -EXTERNAL_OAM_1_ADDRESS=abcd::4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes deleted file mode 100755 index f340017eb8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes +++ /dev/null @@ -1,76 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.5 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=1.2.3.4 -NAMESERVER_2=5.6.7.8 -NAMESERVER_3=NC - -[cDOCKER_PROXY] -# Docker Proxy Configuration -DOCKER_HTTP_PROXY=http://proxy.com:123 -DOCKER_HTTPS_PROXY=https://proxy.com:123 -DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2 - -[cDOCKER_REGISTRY] -# Docker Registry Configuration -DOCKER_K8S_REGISTRY=my.registry.com:5000 -DOCKER_GCR_REGISTRY=my.registry.com -DOCKER_QUAY_REGISTRY=1.2.3.4:5000 -DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000 -IS_SECURE_REGISTRY=False - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region deleted file mode 100755 index 7be15f8887..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region +++ /dev/null @@ -1,94 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.102 -CONTROLLER_0_ADDRESS=192.168.204.103 -CONTROLLER_1_ADDRESS=192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_START_ADDRESS=192.168.204.102 -MANAGEMENT_END_ADDRESS=192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=True -REGION_1_NAME=RegionOne -REGION_2_NAME=RegionTwo -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=Default -ADMIN_PROJECT_NAME=admin -ADMIN_PROJECT_DOMAIN=Default -SERVICE_PROJECT_NAME=service -SERVICE_USER_DOMAIN=Default -SERVICE_PROJECT_DOMAIN=Default -KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs deleted file mode 100755 index 7be15f8887..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs +++ /dev/null @@ -1,94 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.102 -CONTROLLER_0_ADDRESS=192.168.204.103 -CONTROLLER_1_ADDRESS=192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_START_ADDRESS=192.168.204.102 -MANAGEMENT_END_ADDRESS=192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=True -REGION_1_NAME=RegionOne -REGION_2_NAME=RegionTwo -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=Default -ADMIN_PROJECT_NAME=admin -ADMIN_PROJECT_DOMAIN=Default -SERVICE_PROJECT_NAME=service -SERVICE_USER_DOMAIN=Default -SERVICE_PROJECT_DOMAIN=Default -KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/iptables.rules b/controllerconfig/controllerconfig/controllerconfig/tests/files/iptables.rules deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan deleted file mode 100755 index 46a945c310..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan +++ /dev/null @@ -1,72 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=Y -LAG_MODE=4 -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1,eth2 - -[CLM_NETWORK] -CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -CAN_VLAN=125 -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -;CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result deleted file mode 100755 index b0e5af3efa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result +++ /dev/null @@ -1,82 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = yes -MANAGEMENT_BOND_MEMBER_0 = eth1 -MANAGEMENT_BOND_MEMBER_1 = eth2 -MANAGEMENT_BOND_POLICY = 802.3ad -MANAGEMENT_INTERFACE = bond0 -MANAGEMENT_VLAN = 123 -MANAGEMENT_INTERFACE_NAME = bond0.123 -MANAGEMENT_GATEWAY_ADDRESS = 192.168.204.12 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = bond0 -EXTERNAL_OAM_VLAN = 125 -EXTERNAL_OAM_INTERFACE_NAME = bond0.125 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs deleted file mode 100755 index a3b262f150..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs +++ /dev/null @@ -1,81 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[NETWORK] -VSWITCH_TYPE=nuage_vrs -METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security deleted file mode 100755 index 00779938e8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple deleted file mode 100755 index 00779938e8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips deleted file mode 100755 index f18359bb5d..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips +++ /dev/null @@ -1,78 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_FLOATING_ADDRESS=10.10.10.2 -CAN_IP_UNIT_0_ADDRESS=10.10.10.3 -CAN_IP_UNIT_1_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph deleted file mode 100755 index c82f87dfe0..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph +++ /dev/null @@ -1,55 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.2 -IP_END_ADDRESS=192.168.204.99 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 deleted file mode 100755 index a53219de18..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 +++ /dev/null @@ -1,53 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -VLAN=123 -CIDR=1234::/64 -MULTICAST_CIDR=ff08::1:1:0/124 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=abcd::2 -;IP_END_ADDRESS=abcd::4 -IP_FLOATING_ADDRESS=abcd::2 -IP_UNIT_0_ADDRESS=abcd::3 -IP_UNIT_1_ADDRESS=abcd::4 -CIDR=abcd::/64 -GATEWAY=abcd::1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes deleted file mode 100755 index 10e8d54691..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes +++ /dev/null @@ -1,70 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -INTERFACE_MTU=1500 -INTERFACE_LINK_CAPACITY=1000 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -;INTERFACE_LINK_CAPACITY= -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CLUSTER_NETWORK] -CIDR=192.168.206.0/24 -DYNAMIC_ALLOCATION=Y -IP_START_ADDRESS=192.168.206.2 -IP_END_ADDRESS=192.168.206.245 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[DNS] -# DNS Configuration -NAMESERVER_1=1.2.3.4 -NAMESERVER_2=5.6.7.8 - -[DOCKER_PROXY] -# Docker Proxy Configuration -DOCKER_HTTP_PROXY=http://proxy.com:123 -DOCKER_HTTPS_PROXY=https://proxy.com:123 -DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2 - -[DOCKER_REGISTRY] -# Docker Registry Configuration -DOCKER_K8S_REGISTRY=my.registry.com:5000 -DOCKER_GCR_REGISTRY=my.registry.com -DOCKER_QUAY_REGISTRY=1.2.3.4:5000 -DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000 -IS_SECURE_REGISTRY=False - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan deleted file mode 100755 index 8bcd0b7e70..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan +++ /dev/null @@ -1,55 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=Y -LAG_MODE=4 -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1,eth2 - -[MGMT_NETWORK] -VLAN=123 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CLUSTER_NETWORK] -VLAN=126 -IP_START_ADDRESS=192.168.206.102 -IP_END_ADDRESS=192.168.206.199 -CIDR=192.168.206.0/24 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -VLAN=125 -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -;GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot deleted file mode 100755 index b5d7e708e2..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot +++ /dev/null @@ -1,49 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -INTERFACE_MTU=1500 -INTERFACE_LINK_CAPACITY=1000 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -;INTERFACE_LINK_CAPACITY= -INTERFACE_PORTS=eth0 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.102.0/24 -IP_START_ADDRESS=192.168.102.32 -IP_END_ADDRESS=192.168.102.54 - -[MGMT_NETWORK] -VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security deleted file mode 100755 index 2aded723c4..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security +++ /dev/null @@ -1,51 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[BOARD_MANAGEMENT_NETWORK] -VLAN=1 -MTU=1496 -SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple deleted file mode 100755 index 3c69db1f06..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple +++ /dev/null @@ -1,63 +0,0 @@ -;[DNS] -;NAMESERVER_1=8.8.8.8 -;NAMESERVER_2=8.8.4.4 -;NAMESERVER_3= - -;[NTP] -;NTP_SERVER_1=0.pool.ntp.org -;NTP_SERVER_2=1.pool.ntp.org -;NTP_SERVER_3=2.pool.ntp.org - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex deleted file mode 100644 index 050e007c14..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex +++ /dev/null @@ -1,46 +0,0 @@ -;[DNS] -;NAMESERVER_1=8.8.8.8 -;NAMESERVER_2=8.8.4.4 -;NAMESERVER_3= - -;[NTP] -;NTP_SERVER_1=0.pool.ntp.org -;NTP_SERVER_2=1.pool.ntp.org -;NTP_SERVER_3=2.pool.ntp.org - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[OAM_NETWORK] -IP_ADDRESS=10.10.10.20 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION - -[SYSTEM] -SYSTEM_TYPE=All-in-one -SYSTEM_MODE=simplex diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt deleted file mode 100644 index c555f037fa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt +++ /dev/null @@ -1,24 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -CIDR=192.168.42.0/28 - -[OAM_NETWORK] -IP_ADDRESS=10.10.10.20 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION - -[SYSTEM] -SYSTEM_TYPE=All-in-one -SYSTEM_MODE=simplex diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr deleted file mode 100755 index d368cd446e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr +++ /dev/null @@ -1,52 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.20 -IP_END_ADDRESS=192.168.204.99 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=N -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py deleted file mode 100644 index d87c735caa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import difflib -import filecmp -import os -from mock import patch - -import controllerconfig.configassistant as ca -import controllerconfig.common.constants as constants - - -@patch('controllerconfig.configassistant.get_rootfs_node') -@patch('controllerconfig.configassistant.get_net_device_list') -def _test_answerfile(tmpdir, filename, - mock_get_net_device_list, - mock_get_rootfs_node, - compare_results=True, - ca_options={}): - """ Test import and generation of answerfile """ - mock_get_net_device_list.return_value = \ - ['eth0', 'eth1', 'eth2'] - mock_get_rootfs_node.return_value = '/dev/sda' - - assistant = ca.ConfigAssistant(**ca_options) - - # Create the path to the answerfile - answerfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", filename) - - # Input the config from the answerfile - assistant.input_config_from_file(answerfile) - - # Test the display method - print("Output from display_config:") - assistant.display_config() - - # Ensure we can write the configuration - constants.CONFIG_WORKDIR = os.path.join(str(tmpdir), 'config_workdir') - constants.CGCS_CONFIG_FILE = os.path.join(constants.CONFIG_WORKDIR, - 'cgcs_config') - assistant.write_config_file() - - # Add the password to the generated file so it can be compared with the - # answerfile - with open(constants.CGCS_CONFIG_FILE, 'a') as f: - f.write("\n[cAUTHENTICATION]\nADMIN_PASSWORD=Li69nux*\n") - - # Do a diff between the answerfile and the generated config file - print("\n\nDiff of answerfile vs. generated config file:\n") - with open(answerfile) as a, open(constants.CGCS_CONFIG_FILE) as b: - a_lines = a.readlines() - b_lines = b.readlines() - - differ = difflib.Differ() - diff = differ.compare(a_lines, b_lines) - print(''.join(diff)) - - if compare_results: - # Fail the testcase if the answerfile and generated config file don't - # match. - assert filecmp.cmp(answerfile, constants.CGCS_CONFIG_FILE) - - -def test_answerfile_default(tmpdir): - """ Test import of answerfile with default values """ - - _test_answerfile(tmpdir, "cgcs_config.default") - - -def test_answerfile_ipv6(tmpdir): - """ Test import of answerfile with ipv6 oam values """ - - _test_answerfile(tmpdir, "cgcs_config.ipv6") - - -def test_answerfile_ceph(tmpdir): - """ Test import of answerfile with ceph backend values """ - - _test_answerfile(tmpdir, "cgcs_config.ceph") - - -def test_answerfile_region(tmpdir): - """ Test import of answerfile with region values """ - - _test_answerfile(tmpdir, "cgcs_config.region") - - -def test_answerfile_region_nuage_vrs(tmpdir): - """ Test import of answerfile with region values for nuage_vrs""" - - _test_answerfile(tmpdir, "cgcs_config.region_nuage_vrs") - - -def test_answerfile_kubernetes(tmpdir): - """ Test import of answerfile with kubernetes values """ - - _test_answerfile(tmpdir, "cgcs_config.kubernetes", - ca_options={"kubernetes": True}) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py deleted file mode 100644 index 0808b3b439..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py +++ /dev/null @@ -1,759 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import difflib -import filecmp -import fileinput -import mock -from mock import patch -import os -import pytest -import shutil -import sys - -import controllerconfig.common.exceptions as exceptions -from controllerconfig import REGION_CONFIG -from controllerconfig import validate -import controllerconfig.common.keystone as keystone -from controllerconfig.tests import test_answerfile - -sys.modules['fm_core'] = mock.Mock() - -import controllerconfig.systemconfig as cr # noqa: E402 - -FAKE_SERVICE_DATA = {u'services': [ - {u'type': u'keystore', u'description': u'Barbican Key Management Service', - u'enabled': True, u'id': u'9029af23540f4eecb0b7f70ac5e00152', - u'name': u'barbican'}, - {u'type': u'network', u'description': u'OpenStack Networking service', - u'enabled': True, u'id': u'85a8a3342a644df193af4b68d5b65ce5', - u'name': u'neutron'}, {u'type': u'cloudformation', - u'description': - u'OpenStack Cloudformation Service', - u'enabled': True, - u'id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'name': u'heat-cfn'}, - {u'type': u'object-store', u'description': u'OpenStack object-store', - u'enabled': True, u'id': u'd588956f759f4bbda9e65a1019902b9c', - u'name': u'swift'}, - {u'type': u'volumev2', - u'description': u'OpenStack Volume Service v2.0 API', - u'enabled': True, u'id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'name': u'cinderv2'}, - {u'type': u'volume', u'description': u'OpenStack Volume Service', - u'enabled': True, u'id': u'505aa37457774e55b545654aa8630822', - u'name': u'cinder'}, {u'type': u'orchestration', - u'description': u'OpenStack Orchestration Service', - u'enabled': True, - u'id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'name': u'heat'}, - {u'type': u'compute', u'description': u'OpenStack Compute Service', - u'enabled': True, u'id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'name': u'nova'}, - {u'type': u'identity', u'description': u'OpenStack Identity', - u'enabled': True, u'id': u'1fe7b1de187b47228fe853fbbd149664', - u'name': u'keystone'}, - {u'type': u'image', u'description': u'OpenStack Image Service', - u'enabled': True, u'id': u'd41750c98a864fdfb25c751b4ad84996', - u'name': u'glance'}, - {u'type': u'database', u'description': u'Trove Database As A Service', - u'enabled': True, u'id': u'82265e39a77b4097bd8aee4f78e13867', - u'name': u'trove'}, - {u'type': u'patching', u'description': u'Patching Service', - u'enabled': True, u'id': u'8515c4f28f9346199eb8704bca4f5db4', - u'name': u'patching'}, - {u'type': u'platform', u'description': u'SysInv Service', u'enabled': True, - u'id': u'08758bed8d894ddaae744a97db1080b3', u'name': u'sysinv'}, - {u'type': u'computev3', u'description': u'Openstack Compute Service v3', - u'enabled': True, u'id': u'959f2214543a47549ffd8c66f98d27d4', - u'name': u'novav3'}]} - -FAKE_ENDPOINT_DATA = {u'endpoints': [ - {u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a0', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a1', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a2', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d323', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d324', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d324', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd271', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd272', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd273', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'admin', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ac', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - {u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'internal', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ad', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - {u'url': u'http://10.10.10.2:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'public', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ae', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - - {u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe266', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe267', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe268', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8081/keystone/admin/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a1159f', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8081/keystone/main/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a11510', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8081/keystone/main/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a11512', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768bc', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768bd', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768be', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b3', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a3', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b3', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a4', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b5', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a3', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064dc', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064dd', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064de', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8888/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e286', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8888/v1/AUTH_$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e287', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8888/v1/AUTH_$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e288', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0b', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0c', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0d', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46db', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46dc', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46dd', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10a', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10b', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10c', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10a', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10b', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10c', - u'interface': u'public'}, - - - {u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1431', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1432', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1433', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a7', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a8', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a9', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028d', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028e', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028f', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63bd', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63be', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63bf', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c7', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c8', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c9', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6125', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6126', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6127', - u'interface': u'public'}]} - -FAKE_DOMAIN_DATA = {u'domains': [ - {u'id': u'default', u'enabled': True, - u'description': - u'Owns users and tenants (i.e. projects) available on Identity API ' - u'v2.', - u'links': { - u'self': - u'http://192.168.204.12:8081/keystone/main/v3/domains/default'}, - u'name': u'Default'}, - {u'id': u'05d847889e9a4cb9aa94f541eb6b9e2e', - u'enabled': True, - u'description': u'Contains users and projects created by heat', - u'links': { - u'self': - u'http://192.168.204.12:8081/keystone/main/v3/domains/' - u'05d847889e9a4cb9aa94f541eb6b9e2e'}, - u'name': u'heat'}], - u'links': { - u'self': u'http://192.168.204.12:8081/keystone/main/v3/domains', - u'next': None, - u'previous': None}} - - -def _dump_config(config): - """ Prints contents of config object """ - for section in config.sections(): - print("[%s]" % section) - for (name, value) in config.items(section): - print("%s=%s" % (name, value)) - - -def _replace_in_file(filename, old, new): - """ Replaces old with new in file filename. """ - for line in fileinput.FileInput(filename, inplace=1): - line = line.replace(old, new) - print(line, end='') - fileinput.close() - - -@patch('controllerconfig.configassistant.ConfigAssistant.get_sysadmin_sig') -def _test_region_config(tmpdir, inputfile, resultfile, - mock_get_sysadmin_sig): - """ Test import and generation of answerfile """ - - mock_get_sysadmin_sig.return_value = None - - # Create the path to the output file - outputfile = os.path.join(str(tmpdir), 'output') - - # Parse the region_config file - region_config = cr.parse_system_config(inputfile) - - # Dump results for debugging - print("Parsed region_config:\n") - _dump_config(region_config) - - # Validate the region config file - cr.create_cgcs_config_file(outputfile, region_config, - keystone.ServiceList(FAKE_SERVICE_DATA), - keystone.EndpointList(FAKE_ENDPOINT_DATA), - keystone.DomainList(FAKE_DOMAIN_DATA)) - - # Make a local copy of the results file - local_resultfile = os.path.join(str(tmpdir), 'result') - shutil.copyfile(resultfile, local_resultfile) - - # Do a diff between the output and the expected results - print("\n\nDiff of output file vs. expected results file:\n") - with open(outputfile) as a, open(local_resultfile) as b: - a_lines = a.readlines() - b_lines = b.readlines() - - differ = difflib.Differ() - diff = differ.compare(a_lines, b_lines) - print(''.join(diff)) - # Fail the testcase if the output doesn't match the expected results - assert filecmp.cmp(outputfile, local_resultfile) - - # Now test that configassistant can parse this answerfile. We can't - # compare the resulting cgcs_config file because the ordering, spacing - # and comments are different between the answerfile generated by - # systemconfig and ConfigAssistant. - test_answerfile._test_answerfile(tmpdir, outputfile, compare_results=False) - - # Validate the region config file. - # Using onboard validation since the validator's reference version number - # is only set at build-time when validating offboard - validate(region_config, REGION_CONFIG, None, False) - - -def test_region_config_simple(tmpdir): - """ Test import of simple region_config file """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_simple_can_ips(tmpdir): - """ Test import of simple region_config file with unit ips for CAN """ - print("IN TEST ################################################") - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.can_ips") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_lag_vlan(tmpdir): - """ Test import of region_config file with lag and vlan """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.lag.vlan") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.lag.vlan.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_security(tmpdir): - """ Test import of region_config file with security config """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.security") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.security.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_nuage_vrs(tmpdir): - """ Test import of region_config file with nuage vrs config """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.nuage_vrs") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.nuage_vrs.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_share_keystone_only(tmpdir): - """ Test import of Titanium Cloud region_config file with - shared keystone """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.share.keystoneonly") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.share.keystoneonly.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_share_keystone_glance_cinder(tmpdir): - """ Test import of Titanium Cloud region_config file with shared keystone, - glance and cinder """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.shareall") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.shareall.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_validation(): - """ Test detection of various errors in region_config file """ - - # Create the path to the region_config files - simple_regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "region_config.simple") - lag_vlan_regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "region_config.lag.vlan") - - # Test detection of non-required CINDER_* parameters - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_BACKEND', 'lvm') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, True) - - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_DEVICE', - '/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_STORAGE', '10') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test detection of an invalid PXEBOOT_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - 'FD00::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - '192.168.1.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.remove_option('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR') - with pytest.raises(configparser.NoOptionError): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(configparser.NoOptionError): - validate(region_config, REGION_CONFIG, None, False) - - # Test overlap of CLM_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CLM_NETWORK', 'CLM_CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test invalid CLM LAG_MODE - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CLM_VLAN not allowed - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('CLM_NETWORK', 'CLM_VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CLM_VLAN missing - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CLM_NETWORK', 'CLM_VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test overlap of CAN_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.205.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test invalid CAN LAG_MODE - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.add_section('LOGICAL_INTERFACE_2') - region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - region_config.set('CAN_NETWORK', 'CAN_LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CAN_VLAN overlap - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CAN_VLAN missing - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CAN_NETWORK', 'CAN_VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test missing gateway - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CLM_NETWORK', 'CLM_GATEWAY') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test two gateways - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_GATEWAY', '10.10.10.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py deleted file mode 100644 index 9976d460fa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py +++ /dev/null @@ -1,601 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from six.moves import configparser -import mock -import os -import pytest -import sys - -import controllerconfig.common.exceptions as exceptions -from controllerconfig import validate -from controllerconfig import DEFAULT_CONFIG - -sys.modules['fm_core'] = mock.Mock() - -import controllerconfig.systemconfig as cr # noqa: E402 - - -def _dump_config(config): - """ Prints contents of config object """ - for section in config.sections(): - print("[%s]" % section) - for (name, value) in config.items(section): - print("%s=%s" % (name, value)) - - -def _test_system_config(filename): - """ Test import and generation of answerfile """ - - # Parse the system_config file - system_config = cr.parse_system_config(filename) - - # Dump results for debugging - print("Parsed system_config:\n") - _dump_config(system_config) - - # Validate the system config file - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - # Validate the system config file. - # Using onboard validation since the validator's reference version number - # is only set at build-time when validating offboard - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_system_config_simple(): - """ Test import of simple system_config file """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simple") - - _test_system_config(systemfile) - - -def test_system_config_ipv6(): - """ Test import of system_config file with ipv6 oam """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6") - - _test_system_config(systemfile) - - -def test_system_config_lag_vlan(): - """ Test import of system_config file with lag and vlan """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan") - - _test_system_config(systemfile) - - -def test_system_config_security(): - """ Test import of system_config file with security config """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.security") - - _test_system_config(systemfile) - - -def test_system_config_ceph(): - """ Test import of system_config file with ceph config """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph") - - _test_system_config(systemfile) - - -def test_system_config_simplex(): - """ Test import of system_config file for AIO-simplex """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simplex") - - _test_system_config(systemfile) - - -def test_system_config_simplex_mgmt(): - """ Test import of system_config file for AIO-simplex with management - configuration""" - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.simplex_mgmt") - - _test_system_config(systemfile) - - # Test MGMT_NETWORK parameters that are not allowed - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'GATEWAY', '192.168.42.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap with OAM network - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '10.10.10.0/24') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test IPv6 management CIDR (not supported) - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', 'FD01::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test management CIDR that is too small - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '192.168.42.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_system_config_validation(): - """ Test detection of various errors in system_config file """ - - # Create the path to the system_config files - simple_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simple") - ipv6_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6") - lag_vlan_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan") - ceph_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph") - static_addr_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.static_addr") - - # Test floating outside of OAM_NETWORK CIDR - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_FLOATING_ADDRESS', '5555::5') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test non-ipv6 unit address - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_UNIT_0_ADDRESS', '10.10.10.3') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test missing pxeboot network when using IPv6 management network - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.remove_section('PXEBOOT_NETWORK') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test ridiculously sized management network - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '1234::b:0:0:0') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', - '1234::b:ffff:ffff:ffff') - system_config.remove_option('MGMT_NETWORK', 'IP_FLOATING_ADDRESS') - system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_0_ADDRESS') - system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_1_ADDRESS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test using start/end addresses - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_START_ADDRESS', 'abcd::2') - system_config.set('OAM_NETWORK', 'IP_END_ADDRESS', 'abcd::4') - system_config.remove_option('OAM_NETWORK', 'IP_FLOATING_ADDRESS') - system_config.remove_option('OAM_NETWORK', 'IP_UNIT_0_ADDRESS') - system_config.remove_option('OAM_NETWORK', 'IP_UNIT_1_ADDRESS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of an invalid PXEBOOT_CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - 'FD00::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - '192.168.1.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.remove_option('PXEBOOT_NETWORK', 'PXEBOOT_CIDR') - with pytest.raises(configparser.NoOptionError): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(configparser.NoOptionError): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of MGMT_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid MGMT_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK VLAN not allowed - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK VLAN missing - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('MGMT_NETWORK', 'VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start address specified without end address - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK end address specified without start address - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.200') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start and end range does not have enough addresses - system_config = cr.parse_system_config(static_addr_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.8') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start address not in subnet - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.200.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.254') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK end address not in subnet - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.214.254') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of CLUSTER_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid CLUSTER_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.add_section('LOGICAL_INTERFACE_2') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - system_config.set('CLUSTER_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test CLUSTER_NETWORK VLAN overlap - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('CLUSTER_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of OAM_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'CIDR', '192.168.205.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid OAM_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.add_section('LOGICAL_INTERFACE_2') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - system_config.set('OAM_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test OAM_NETWORK VLAN overlap - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'VLAN', '126') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test OAM_NETWORK VLAN missing - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('OAM_NETWORK', 'VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test missing gateway - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('MGMT_NETWORK', 'GATEWAY') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test two gateways - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'GATEWAY', '10.10.10.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of unsupported NTP NTP_SERVER - system_config = cr.parse_system_config(simple_systemfile) - system_config.add_section('NTP') - system_config.set('NTP', 'NTP_SERVER_1', '0.pool.ntp.org') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - # Test detection of overspecification of MGMT network addresses - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '192.168.204.3') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '192.168.204.6') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '192.168.204.9') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of overspecification of OAM network addresses - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '10.10.10.2') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '10.10.10.3') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '10.10.10.4') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of invalid release version - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('VERSION', 'RELEASE', '15.12') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_pxeboot_range(): - """ Test import of system_config file for PXEBoot network address """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.pxeboot") - - # Test import and generation of answer file - _test_system_config(systemfile) - - # Test detection of invalid PXEBoot network start address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_START_ADDRESS', '8.123.122.345') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of invalid PXEBoot network end address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.345') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of smaller PXEBoot network end address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '192.168.102.30') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of PXEBoot network range less than min required (8) - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.34') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_kubernetes(): - """ Test import of system_config file for kubernetes """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.kubernetes") - - # Test import and generation of answer file - _test_system_config(systemfile) - - # Test CLUSTER_NETWORK start address specified without end address - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test CLUSTER_NETWORK end address specified without start address - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_END_ADDRESS', '192.168.204.200') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of overspecification of CLUSTER network addresses - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_FLOATING_ADDRESS', - '192.168.206.103') - system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_0_ADDRESS', - '192.168.206.106') - system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_1_ADDRESS', - '192.168.206.109') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional DNS configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DNS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional docker proxy configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DOCKER_PROXY') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional docker registry configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DOCKER_REGISTRY') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) diff --git a/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py b/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py index 0aa8baeb09..cfce88d4ba 100644 --- a/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py +++ b/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py @@ -19,11 +19,12 @@ from cinderclient.v3 import client as cinder_client_v3 from glanceclient import Client from cinderclient import utils as c_utils -from controllerconfig.common import log from controllerconfig.common.rest_api_utils import get_token from controllerconfig.common.exceptions import TidyStorageFail -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) KEYSTONE_AUTH_SERVER_RETRY_CNT = 60 KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry @@ -565,8 +566,6 @@ def main(): show_help() exit(1) - log.configure() - result_file = sys.argv[1] try: diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py index b46833616a..225cad97ef 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -42,13 +42,12 @@ from tsconfig.tsconfig import CONTROLLER_UPGRADE_STARTED_FLAG from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG from controllerconfig.common import constants -from controllerconfig.common import log from controllerconfig import utils as cutils -from controllerconfig import backup_restore - from controllerconfig.upgrades import utils -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) POSTGRES_MOUNT_PATH = '/mnt/postgresql' POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump' @@ -865,8 +864,6 @@ def main(): exit(1) arg += 1 - log.configure() - if not from_release or not to_release: print("Both the FROM_RELEASE and TO_RELEASE must be specified") exit(1) @@ -955,9 +952,10 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release): extract_relative_directory(archive, 'config/ssh_config', tmp_config_path + '/ssh_config') + # TODO: Switch this over to use Ansible # Restore certificate files if they are in the archive - backup_restore.restore_etc_ssl_dir(archive, - configpath=tmp_config_path) + # backup_restore.restore_etc_ssl_dir(archive, + # configpath=tmp_config_path) # Extract etc files archive.extract('etc/hostname', '/') @@ -975,11 +973,12 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release): path = 'config/' + file extract_relative_file(archive, path, tmp_config_path) + # TODO: Switch this over to use Ansible # Extract distributed cloud addn_hosts file if present in archive. - if backup_restore.file_exists_in_archive( - archive, 'config/dnsmasq.addn_hosts_dc'): - extract_relative_file( - archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path) + # if backup_restore.file_exists_in_archive( + # archive, 'config/dnsmasq.addn_hosts_dc'): + # extract_relative_file( + # archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path) def extract_postgres_data(archive): @@ -1114,7 +1113,8 @@ def upgrade_controller_simplex(backup_file): to_release = metadata['upgrade']['to_release'] check_load_version(to_release) - backup_restore.check_load_subfunctions(archive, staging_dir) + # TODO: Switch this over to use Ansible + # backup_restore.check_load_subfunctions(archive, staging_dir) # Patching is potentially a multi-phase step. # If the controller is impacted by patches from the backup, @@ -1271,7 +1271,8 @@ def upgrade_controller_simplex(backup_file): LOG.info("Generating manifests for %s" % sysinv_constants.CONTROLLER_0_HOSTNAME) - backup_restore.configure_loopback_interface(archive) + # TODO: Switch this over to use Ansible + # backup_restore.configure_loopback_interface(archive) print_log_info("Creating configs...") cutils.create_system_config() @@ -1301,10 +1302,10 @@ def upgrade_controller_simplex(backup_file): cutils.apply_banner_customization() - backup_restore.restore_ldap(archive, backup_restore.ldap_permdir, - staging_dir) - - backup_restore.restore_std_dir(archive, backup_restore.home_permdir) + # TODO: Switch this over to use Ansible + # backup_restore.restore_ldap(archive, backup_restore.ldap_permdir, + # staging_dir) + # backup_restore.restore_std_dir(archive, backup_restore.home_permdir) archive.close() shutil.rmtree(staging_dir, ignore_errors=True) @@ -1352,8 +1353,6 @@ def simplex_main(): exit(1) arg += 1 - log.configure() - # Enforce that the command is being run from the console if cutils.is_ssh_parent(): print ( diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py index 2a73d1ef78..fecba26025 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2019 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -15,13 +15,13 @@ import subprocess import tsconfig.tsconfig as tsc -from controllerconfig import backup_restore -from controllerconfig.common import log from controllerconfig.common import constants from sysinv.common import constants as sysinv_constants from controllerconfig.upgrades import utils -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) def get_upgrade_databases(shared_services): @@ -197,8 +197,9 @@ def create_simplex_backup(software_upgrade): with open(metadata_filename, 'w') as metadata_file: metadata_file.write(json_data) - backup_filename = get_upgrade_backup_filename(software_upgrade) - backup_restore.backup(backup_filename, constants.BACKUPS_PATH) + # TODO: Switch this over to use Ansible + # backup_filename = get_upgrade_backup_filename(software_upgrade) + # backup_restore.backup(backup_filename, constants.BACKUPS_PATH) LOG.info("Create simplex backup complete") diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py index 1b32454f82..230e70e7f8 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -21,14 +21,13 @@ import yaml from tsconfig.tsconfig import SW_VERSION from tsconfig.tsconfig import PLATFORM_PATH -from controllerconfig import DEFAULT_DOMAIN_NAME from controllerconfig import utils as cutils -from controllerconfig.common import log from controllerconfig.common import constants from sysinv.common import constants as sysinv_constants +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) POSTGRES_PATH = '/var/lib/postgresql' POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION) @@ -36,6 +35,9 @@ RABBIT_PATH = '/var/lib/rabbitmq' CONTROLLER_1_HOSTNAME = "controller-1" DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n" +# well-known default domain name +DEFAULT_DOMAIN_NAME = 'Default' + # Migration script actions ACTION_START = "start" ACTION_MIGRATE = "migrate" diff --git a/controllerconfig/controllerconfig/controllerconfig/utils.py b/controllerconfig/controllerconfig/controllerconfig/utils.py index fbd21decb1..59fec88575 100644 --- a/controllerconfig/controllerconfig/controllerconfig/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2014-2019 Wind River Systems, Inc. +# Copyright (c) 2014-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -8,151 +8,27 @@ Utilities """ -import collections -import errno import glob import os import shutil -import socket import subprocess import time -import sys import yaml -from six.moves import configparser import re import six import netaddr from tsconfig import tsconfig -from sysinv.common import constants as sysinv_constants from controllerconfig.common import constants -from controllerconfig.common import log from controllerconfig.common.exceptions import ValidateFail +from oslo_log import log -LOOPBACK_IFNAME = 'lo' - -NETWORK_SCRIPTS_PATH = '/etc/sysconfig/network-scripts' -NETWORK_SCRIPTS_PREFIX = 'ifcfg' -NETWORK_SCRIPTS_LOOPBACK = '%s-%s' % (NETWORK_SCRIPTS_PREFIX, LOOPBACK_IFNAME) - -BOND_MIIMON_DEFAULT = 100 - - -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) DEVNULL = open(os.devnull, 'w') -EXPECTED_SERVICE_NAME_AND_TYPE = ( - {"KEYSTONE_SERVICE_NAME": "keystone", - "KEYSTONE_SERVICE_TYPE": "identity", - "SYSINV_SERVICE_NAME": "sysinv", - "SYSINV_SERVICE_TYPE": "platform", - "PATCHING_SERVICE_NAME": "patching", - "PATCHING_SERVICE_TYPE": "patching", - "NFV_SERVICE_NAME": "vim", - "NFV_SERVICE_TYPE": "nfv", - "FM_SERVICE_NAME": "fm", - "FM_SERVICE_TYPE": "faultmanagement", - "BARBICAN_SERVICE_NAME": "barbican", - "BARBICAN_SERVICE_TYPE": "key-manager", - }) - - -def filesystem_get_free_space(path): - """ Get Free space of directory """ - statvfs = os.statvfs(path) - return (statvfs.f_frsize * statvfs.f_bavail) - - -def directory_get_size(start_dir, regex=None): - """ - Get total size of a directory tree in bytes - :param start_dir: top of tree - :param regex: only include files matching this regex (if provided) - :return: size in bytes - """ - total_size = 0 - for dirpath, _, filenames in os.walk(start_dir): - for filename in filenames: - if regex is None or regex.match(filename): - filep = os.path.join(dirpath, filename) - try: - total_size += os.path.getsize(filep) - except OSError as e: - if e.errno != errno.ENOENT: - raise e - return total_size - - -def print_bytes(sizeof): - """ Pretty print bytes """ - for size in ['Bytes', 'KB', 'MB', 'GB', 'TB']: - if abs(sizeof) < 1024.0: - return "%3.1f %s" % (sizeof, size) - sizeof /= 1024.0 - - -def modprobe_drbd(): - """Load DRBD module""" - try: - mod_parms = subprocess.check_output(['drbdadm', 'sh-mod-parms'], - close_fds=True).rstrip() - subprocess.call(["modprobe", "-s", "drbd", mod_parms], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to load drbd module") - raise - - -def drbd_start(resource): - """Start drbd resource""" - try: - subprocess.check_call(["drbdadm", "up", resource], - stdout=DEVNULL) - - subprocess.check_call(["drbdadm", "primary", resource], - stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to start drbd %s" % resource) - raise - - -def drbd_stop(resource): - """Stop drbd resource""" - try: - subprocess.check_call(["drbdadm", "secondary", resource], - stdout=DEVNULL) - # Allow time for demotion to be processed - time.sleep(1) - subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to stop drbd %s" % resource) - raise - - -def mount(device, directory): - """Mount a directory""" - try: - subprocess.check_call(["mount", device, directory], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to mount %s filesystem" % directory) - raise - - -def umount(directory): - """Unmount a directory""" - try: - subprocess.check_call(["umount", directory], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to umount %s filesystem" % directory) - raise - def start_service(name): """ Start a systemd service """ @@ -181,48 +57,6 @@ def restart_service(name): raise -def start_lsb_service(name): - """ Start a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "start"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to start %s service" % name) - raise - - -def stop_lsb_service(name): - """ Stop a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "stop"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to stop %s service" % name) - raise - - -def restart_lsb_service(name): - """ Restart a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "restart"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to restart %s service" % name) - raise - - def check_sm_service(service, state): """ Check whether an SM service has the supplied state """ try: @@ -245,34 +79,6 @@ def wait_sm_service(service, timeout=180): return False -def is_active(service): - """ Check whether an SM service is active """ - return check_sm_service(service, 'enabled-active') - - -def get_controller_hostname(): - """ - Get the hostname for this controller - :return: controller hostname - """ - return socket.gethostname() - - -def get_mate_controller_hostname(): - """ - Get the hostname for the mate controller - :return: mate controller hostname - """ - my_hostname = socket.gethostname() - if my_hostname.endswith('-0'): - postfix = '-1' - elif my_hostname.endswith('-1'): - postfix = '-0' - else: - raise Exception("Invalid controller hostname") - return my_hostname.rsplit('-', 1)[0] + postfix - - def get_address_from_hosts_file(hostname): """ Get the IP address of a host from the /etc/hosts file @@ -286,303 +92,6 @@ def get_address_from_hosts_file(hostname): raise Exception("Hostname %s not found in /etc/hosts" % hostname) -def validate_and_normalize_mac(address): - """Validate a MAC address and return normalized form. - - Checks whether the supplied MAC address is formally correct and - normalize it to all lower case. - - :param address: MAC address to be validated and normalized. - :returns: Normalized and validated MAC address. - :raises: InvalidMAC If the MAC address is not valid. - - """ - if not is_valid_mac(address): - raise Exception("InvalidMAC %s" % address) - return address.lower() - - -def is_valid_ip(address): - if not is_valid_ipv4(address): - return is_valid_ipv6(address) - return True - - -def lag_mode_to_str(lag_mode): - if lag_mode == 0: - return "balance-rr" - if lag_mode == 1: - return "active-backup" - elif lag_mode == 2: - return "balance-xor" - elif lag_mode == 3: - return "broadcast" - elif lag_mode == 4: - return "802.3ad" - elif lag_mode == 5: - return "balance-tlb" - elif lag_mode == 6: - return "balance-alb" - else: - raise Exception( - "Invalid LAG_MODE value of %d. Valid values: 0-6" % lag_mode) - - -def is_combined_load(): - return 'worker' in tsconfig.subfunctions - - -def get_system_type(): - if is_combined_load(): - return sysinv_constants.TIS_AIO_BUILD - return sysinv_constants.TIS_STD_BUILD - - -def get_security_profile(): - eprofile = sysinv_constants.SYSTEM_SECURITY_PROFILE_EXTENDED - if tsconfig.security_profile == eprofile: - return eprofile - return sysinv_constants.SYSTEM_SECURITY_PROFILE_STANDARD - - -def is_cpe(): - return get_system_type() == sysinv_constants.TIS_AIO_BUILD - - -def get_interface_config_common(device, mtu=None): - """ - Return the interface configuration parameters that is common to all - device types. - """ - parameters = collections.OrderedDict() - parameters['BOOTPROTO'] = 'none' - parameters['ONBOOT'] = 'yes' - parameters['DEVICE'] = device - # Increased to accommodate devices that require more time to - # complete link auto-negotiation - parameters['LINKDELAY'] = '20' - if mtu: - parameters['MTU'] = mtu - return parameters - - -def get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway): - """ - Return the interface configuration parameters for all IPv4 static - addressing. - """ - parameters = collections.OrderedDict() - parameters['IPADDR'] = ip_address - parameters['NETMASK'] = ip_subnet.netmask - parameters['BROADCAST'] = ip_subnet.broadcast - if ip_gateway: - parameters['GATEWAY'] = ip_gateway - return parameters - - -def get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway): - """ - Return the interface configuration parameters for all IPv6 static - addressing. - """ - parameters = collections.OrderedDict() - parameters['IPV6INIT'] = 'yes' - parameters['IPV6ADDR'] = netaddr.IPNetwork('%s/%u' % (ip_address, - ip_subnet.prefixlen)) - if ip_gateway: - parameters['IPV6_DEFAULTGW'] = ip_gateway - return parameters - - -def get_interface_config_static(ip_address, ip_subnet, ip_gateway=None): - """ - Return the interface configuration parameters for all IP static - addressing. - """ - if netaddr.IPAddress(ip_address).version == 4: - return get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway) - else: - return get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway) - - -def write_interface_config_file(device, parameters): - """ - Write interface configuration parameters to the network scripts - directory named after the supplied device. - - :param device device name as str - :param parameters dict of parameters - """ - filename = os.path.join(NETWORK_SCRIPTS_PATH, "%s-%s" % - (NETWORK_SCRIPTS_PREFIX, device)) - try: - with open(filename, 'w') as f: - for parameter, value in parameters.items(): - f.write("%s=%s\n" % (parameter, str(value))) - except IOError: - LOG.error("Failed to create file: %s" % filename) - raise - - -def write_interface_config_ethernet(device, mtu=None, parameters=None): - """Write the interface configuration for an Ethernet device.""" - config = get_interface_config_common(device, mtu) - if parameters: - config.update(parameters) - write_interface_config_file(device, config) - - -def write_interface_config_vlan(device, mtu, parameters=None): - """Write the interface configuration for a VLAN device.""" - config = get_interface_config_vlan() - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, mtu, parameters=config) - - -def write_interface_config_slave(device, master, parameters=None): - """Write the interface configuration for a bond slave device.""" - config = get_interface_config_slave(master) - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, parameters=config) - - -def write_interface_config_bond(device, mtu, mode, txhash, miimon, - member1, member2, parameters=None): - """Write the interface configuration for a bond master device.""" - config = get_interface_config_bond(mode, txhash, miimon) - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, mtu, parameters=config) - - # create slave device configuration files - if member1: - write_interface_config_slave(member1, device) - if member2: - write_interface_config_slave(member2, device) - - -def get_interface_config_vlan(): - """ - Return the interface configuration parameters for all IP static - addressing. - """ - parameters = collections.OrderedDict() - parameters['VLAN'] = 'yes' - return parameters - - -def get_interface_config_slave(master): - """ - Return the interface configuration parameters for bond interface - slave devices. - """ - parameters = collections.OrderedDict() - parameters['MASTER'] = master - parameters['SLAVE'] = 'yes' - parameters['PROMISC'] = 'yes' - return parameters - - -def get_interface_config_bond(mode, txhash, miimon): - """ - Return the interface configuration parameters for bond interface - master devices. - """ - options = "mode=%s miimon=%s" % (mode, miimon) - - if txhash: - options += " xmit_hash_policy=%s" % txhash - - if mode == constants.LAG_MODE_8023AD: - options += " lacp_rate=fast" - - parameters = collections.OrderedDict() - parameters['BONDING_OPTS'] = "\"%s\"" % options - return parameters - - -def remove_interface_config_files(stdout=None, stderr=None): - """ - Remove all existing interface configuration files. - """ - files = glob.glob1(NETWORK_SCRIPTS_PATH, "%s-*" % NETWORK_SCRIPTS_PREFIX) - for file in [f for f in files if f != NETWORK_SCRIPTS_LOOPBACK]: - ifname = file[len(NETWORK_SCRIPTS_PREFIX) + 1:] # remove prefix - subprocess.check_call(["ifdown", ifname], - stdout=stdout, stderr=stderr) - os.remove(os.path.join(NETWORK_SCRIPTS_PATH, file)) - - -def remove_interface_ip_address(device, ip_address, ip_subnet, - stdout=None, stderr=None): - """Remove an IP address from an interface""" - subprocess.check_call( - ["ip", "addr", "del", - str(ip_address) + "/" + str(ip_subnet.prefixlen), - "dev", device], - stdout=stdout, stderr=stderr) - - -def send_interface_garp(device, ip_address, stdout=None, stderr=None): - """Send a GARP message for the supplied address""" - subprocess.call( - ["arping", "-c", "3", "-A", "-q", "-I", - device, str(ip_address)], - stdout=stdout, stderr=stderr) - - -def restart_networking(stdout=None, stderr=None): - """ - Restart networking services. - """ - # Kill any leftover dhclient process from the boot - subprocess.call(["pkill", "dhclient"]) - - # remove any existing IP addresses - ifs = glob.glob1('/sys/class/net', "*") - for i in [i for i in ifs if i != LOOPBACK_IFNAME]: - subprocess.call( - ["ip", "link", "set", "dev", i, "down"]) - subprocess.call( - ["ip", "addr", "flush", "dev", i]) - subprocess.call( - ["ip", "-6", "addr", "flush", "dev", i]) - - subprocess.check_call(["systemctl", "restart", "network"], - stdout=stdout, stderr=stderr) - - -def output_to_dict(output): - dict = {} - output = [_f for _f in output.split('\n') if _f] - - for row in output: - values = row.split() - if len(values) != 2: - raise Exception("The following output does not respect the " - "format: %s" % row) - dict[values[1]] = values[0] - - return dict - - -def get_install_uuid(): - """ Get the install uuid from the feed directory. """ - uuid_fname = None - try: - uuid_dir = '/www/pages/feed/rel-' + tsconfig.SW_VERSION - uuid_fname = os.path.join(uuid_dir, 'install_uuid') - with open(uuid_fname, 'r') as uuid_file: - install_uuid = uuid_file.readline().rstrip() - except IOError: - LOG.error("Failed to open file: %s", uuid_fname) - raise Exception("Failed to retrieve install UUID") - - return install_uuid - - def write_simplex_flag(): """ Write simplex flag. """ simplex_flag = "/etc/platform/simplex" @@ -634,37 +143,6 @@ def apply_manifest(controller_address_0, personality, manifest, hieradata, raise Exception(msg) -def create_system_controller_config(filename): - """ Create any additional parameters needed for system controller""" - # set keystone endpoint region name and sysinv keystone authtoken - # region name - config = { - 'keystone::endpoint::region': - sysinv_constants.SYSTEM_CONTROLLER_REGION, - 'sysinv::region_name': - sysinv_constants.SYSTEM_CONTROLLER_REGION, - } - try: - with open(filename, 'w') as f: - yaml.dump(config, f, default_flow_style=False) - except Exception: - LOG.exception("failed to write config file: %s" % filename) - raise - - -def create_static_config(): - cmd = ["/usr/bin/sysinv-puppet", - "create-static-config", - constants.HIERADATA_WORKDIR] - try: - os.makedirs(constants.HIERADATA_WORKDIR) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - msg = "Failed to create puppet hiera static config" - print(msg) - raise Exception(msg) - - def create_system_config(): cmd = ["/usr/bin/sysinv-puppet", "create-system-config", @@ -692,34 +170,6 @@ def create_host_config(hostname=None): raise Exception(msg) -def shutdown_file_systems(): - """ Shutdown filesystems """ - - umount("/var/lib/postgresql") - drbd_stop("drbd-pgsql") - - stop_service("www-pages-helm_charts.mount") - umount("/opt/platform") - drbd_stop("drbd-platform") - - umount("/opt/extension") - drbd_stop("drbd-extension") - - if os.path.exists("/opt/patch-vault"): - umount("/opt/patch-vault") - drbd_stop("drbd-patch-vault") - - umount("/var/lib/rabbitmq") - drbd_stop("drbd-rabbit") - - stop_service("etcd.service") - stop_service("opt-etcd.mount") - drbd_stop("drbd-etcd") - - umount("/var/lib/docker-distribution") - drbd_stop("drbd-dockerdistribution") - - def persist_config(): """Copy temporary config files into new DRBD filesystem""" @@ -862,24 +312,6 @@ def configure_hostname(hostname): raise Exception("Failed to configure hostname") -def progress(steps, step, action, result, newline=False): - """Display progress.""" - if steps == 0: - hashes = 45 - percentage = 100 - else: - hashes = (step * 45) / steps - percentage = (step * 100) / steps - - sys.stdout.write("\rStep {0:{width}d} of {1:d} [{2:45s}] " - "[{3:d}%]".format(min(step, steps), steps, - '#' * hashes, percentage, - width=len(str(steps)))) - if step == steps or newline: - sys.stdout.write("\n") - sys.stdout.flush() - - def touch(fname): with open(fname, 'a'): os.utime(fname, None) @@ -898,47 +330,6 @@ def is_ssh_parent(): return False -def is_valid_vlan(vlan): - """Determine whether vlan is valid.""" - try: - if 0 < int(vlan) < 4095: - return True - else: - return False - except (ValueError, TypeError): - return False - - -def is_mtu_valid(mtu): - """Determine whether a mtu is valid.""" - try: - if int(mtu) < 576: - return False - elif int(mtu) > 9216: - return False - else: - return True - except (ValueError, TypeError): - return False - - -def is_valid_hostname(hostname): - """Determine whether a hostname is valid as per RFC 1123.""" - - # Maximum length of 255 - if not hostname or len(hostname) > 255: - return False - # Allow a single dot on the right hand side - if hostname[-1] == ".": - hostname = hostname[:-1] - # Create a regex to ensure: - # - hostname does not begin or end with a dash - # - each segment is 1 to 63 characters long - # - valid characters are A-Z (any case) and 0-9 - valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?/dev/null -else - echo "Admin credentials not found" - exit -fi - -# Delete all the servers -echo "Deleting all servers [`openstack server list --all`]" -found=false -for i in $(openstack server list --all -c ID -f value); do - `openstack server delete $i &> /dev/null` - echo $i deleted - found=true -done -if $found; then - sleep 30 -fi -echo "Deleted all servers [`openstack server list --all`]" -# Delete all the volumes -echo "Deleting all volumes [`openstack volume list --all`]" -found=false -for i in $(openstack volume list --all -c ID -f value); do - `openstack volume delete $i &> /dev/null` - echo $i deleted - found=true -done -if $found; then - sleep 30 -fi -echo "Deleted all volumes [`openstack volume list --all`]" - diff --git a/controllerconfig/controllerconfig/scripts/install_clone.py b/controllerconfig/controllerconfig/scripts/install_clone.py deleted file mode 100755 index f9a639a59d..0000000000 --- a/controllerconfig/controllerconfig/scripts/install_clone.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import time -import uuid -import shutil -import tempfile -import subprocess -from six.moves import configparser - -import tsconfig.tsconfig as tsconfig -from controllerconfig.common import log -import controllerconfig.utils as utils -import controllerconfig.sysinv_api as sysinv -import controllerconfig.backup_restore as backup_restore -import controllerconfig.clone as clone -from controllerconfig.common.exceptions import CloneFail -from sysinv.common import constants as si_const - -LOG = log.get_logger("cloning") -DEVNULL = open(os.devnull, 'w') -INI_FILE = os.path.join("/", clone.CLONE_ARCHIVE_DIR, clone.CLONE_ISO_INI) -SECTION = "clone_iso" -parser = configparser.SafeConfigParser() -clone_name = "" - - -def console_log(str, err=False): - """ Log onto console also """ - if err: - str = "Failed to install clone-image. " + str - LOG.error(str) - else: - LOG.info(str) - print("\n" + str) - - -def persist(key, value): - """ Write into ini file """ - parser.set(SECTION, key, value) - with open(INI_FILE, 'w') as f: - parser.write(f) - - -def set_result(value): - """ Set the result of installation of clone image """ - persist(clone.RESULT, value) - persist(clone.INSTALLED, time.strftime("%Y-%m-%d %H:%M:%S %Z")) - - -def validate_hardware_compatibility(): - """ validate if cloned-image can be installed on this h/w """ - valid = True - disk_paths = "" - if parser.has_option(SECTION, "disks"): - disk_paths = parser.get(SECTION, "disks") - if not disk_paths: - console_log("Missing value [disks] in ini file") - valid = False - for d in disk_paths.split(): - disk_path, size = d.split('#') - if os.path.exists('/dev/disk/by-path/' + disk_path): - LOG.info("Disk [{}] exists".format(disk_path)) - disk_size = clone.get_disk_size('/dev/disk/by-path/' + - disk_path) - if int(disk_size) >= int(size): - LOG.info("Disk size is good: {} >= {}" - .format(utils.print_bytes(int(disk_size)), - utils.print_bytes(int(size)))) - else: - console_log("Not enough disk size[{}], " - "found:{} looking_for:{}".format( - disk_path, utils.print_bytes(int(disk_size)), - utils.print_bytes(int(size))), err=True) - valid = False - else: - console_log("Disk [{}] does not exist!" - .format(disk_path), err=True) - valid = False - - interfaces = "" - if parser.has_option(SECTION, "interfaces"): - interfaces = parser.get(SECTION, "interfaces") - if not interfaces: - console_log("Missing value [interfaces] in ini file") - valid = False - for f in interfaces.split(): - if os.path.exists('/sys/class/net/' + f): - LOG.info("Interface [{}] exists".format(f)) - else: - console_log("Interface [{}] does not exist!" - .format(f), err=True) - valid = False - - maxcpuid = "" - if parser.has_option(SECTION, "cpus"): - maxcpuid = parser.get(SECTION, "cpus") - if not maxcpuid: - console_log("Missing value [cpus] in ini file") - valid = False - else: - my_maxcpuid = clone.get_online_cpus() - if int(maxcpuid) <= int(my_maxcpuid): - LOG.info("Got enough cpus {},{}".format( - maxcpuid, my_maxcpuid)) - else: - console_log("Not enough CPUs, found:{} looking_for:{}" - .format(my_maxcpuid, maxcpuid), err=True) - valid = False - - mem_total = "" - if parser.has_option(SECTION, "mem"): - mem_total = parser.get(SECTION, "mem") - if not mem_total: - console_log("Missing value [mem] in ini file") - valid = False - else: - my_mem_total = clone.get_total_mem() - # relaxed RAM check: within 1 GiB - if (int(mem_total) - (1024 * 1024)) <= int(my_mem_total): - LOG.info("Got enough memory {},{}".format( - mem_total, my_mem_total)) - else: - console_log("Not enough memory; found:{} kB, " - "looking for a minimum of {} kB" - .format(my_mem_total, mem_total), err=True) - valid = False - - if not valid: - console_log("Validation failure!") - set_result(clone.FAIL) - time.sleep(20) - exit(1) - - console_log("Successful validation") - - -def update_sysuuid_in_archive(tmpdir): - """Update system uuid in system archive file.""" - sysuuid = str(uuid.uuid4()) - clone.find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - "CLONEISO_SYSTEM_UUID", sysuuid) - LOG.info("System uuid updated [%s]" % sysuuid) - - -def update_db(archive_dir, backup_name): - """ Update DB before restore """ - path_to_archive = os.path.join(archive_dir, backup_name) - LOG.info("Updating system archive [%s] DB." % path_to_archive) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - try: - subprocess.check_call( - ['gunzip', path_to_archive + '.tgz'], - stdout=DEVNULL, stderr=DEVNULL) - # Extract only postgres dir to update system uuid - subprocess.check_call( - ['tar', '-x', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'postgres'], - stdout=DEVNULL, stderr=DEVNULL) - update_sysuuid_in_archive(tmpdir) - subprocess.check_call( - ['tar', '--update', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'postgres'], - stdout=DEVNULL, stderr=DEVNULL) - subprocess.check_call(['gzip', path_to_archive + '.tar']) - shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') - - except Exception as e: - LOG.error("Update of system archive {} failed {}".format( - path_to_archive, str(e))) - raise CloneFail("Failed to update system archive") - - finally: - shutil.rmtree(tmpdir, ignore_errors=True) - - -def config_worker(): - """ - Enable worker functionality for AIO system. - :return: True if worker-config-complete is executed - """ - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - console_log("Applying worker manifests for {}. " - "Node will reboot on completion." - .format(utils.get_controller_hostname())) - sysinv.do_worker_config_complete(utils.get_controller_hostname()) - time.sleep(30) - # worker-config-complete has no logs to console. So, wait - # for some time before showing the login prompt. - for i in range(1, 10): - console_log("worker-config in progress..") - time.sleep(30) - console_log("Timed out on do_worker_config_complete") - raise CloneFail("Timed out on do_worker_config_complete") - return True - else: - # worker_config_complete is not needed. - return False - - -def finalize_install(): - """ Complete the installation """ - subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases']) - console_log("Updating system parameters...") - i = 1 - system_update = False - # Retries if sysinv is not yet ready - while i < 10: - time.sleep(20) - LOG.info("Attempt %d to update system parameters..." % i) - try: - if sysinv.update_clone_system('Cloned_from_' + clone_name, - utils.get_controller_hostname()): - system_update = True - break - except Exception: - # Sysinv might not be ready yet - pass - i += 1 - if not system_update: - LOG.error("System update failed") - raise CloneFail("System update failed") - - try: - output = subprocess.check_output(["finish_install_clone.sh"], - stderr=subprocess.STDOUT) - LOG.info("finish_install_clone out: {}".format(output)) - except Exception: - console_log("Failed to cleanup stale OpenStack resources. " - "Manually delete the Volumes and Instances.") - - -def cleanup(): - """ Cleanup after installation """ - LOG.info("Cleaning up...") - subprocess.call(['systemctl', 'disable', 'install-clone'], stderr=DEVNULL) - OLD_FILE = os.path.join(tsconfig.PLATFORM_CONF_PATH, clone.CLONE_ISO_INI) - if os.path.exists(OLD_FILE): - os.remove(OLD_FILE) - if os.path.exists(INI_FILE): - os.chmod(INI_FILE, 0o400) - shutil.move(INI_FILE, tsconfig.PLATFORM_CONF_PATH) - shutil.rmtree(os.path.join("/", clone.CLONE_ARCHIVE_DIR), - ignore_errors=True) - - -log.configure() -if os.path.exists(INI_FILE): - try: - parser.read(INI_FILE) - if parser.has_section(SECTION): - clone_name = parser.get(SECTION, clone.NAME) - LOG.info("System archive [%s] to be installed." % clone_name) - - first_boot = False - last_result = clone.IN_PROGRESS - if not parser.has_option(SECTION, clone.RESULT): - # first boot after cloning - first_boot = True - else: - last_result = parser.get(SECTION, clone.RESULT) - LOG.info("Last attempt to install clone was [{}]" - .format(last_result)) - - if last_result == clone.IN_PROGRESS: - if first_boot: - update_db(os.path.join("/", clone.CLONE_ARCHIVE_DIR), - clone_name + '_system') - else: - # Booting up after patch application, do validation - validate_hardware_compatibility() - - console_log("+++++ Starting to install clone-image [{}] +++++" - .format(clone_name)) - set_result(clone.IN_PROGRESS) - clone_arch_path = os.path.join("/", clone.CLONE_ARCHIVE_DIR, - clone_name) - if (backup_restore.RESTORE_RERUN_REQUIRED == - backup_restore.restore_system( - clone_arch_path + "_system.tgz", - clone=True)): - # If there are no patches to be applied, run validation - # code and resume restore. If patches were applied, node - # will be rebooted and validate will after reboot. - validate_hardware_compatibility() - LOG.info("validate passed, resuming restore...") - backup_restore.restore_system( - clone_arch_path + "_system.tgz", clone=True) - console_log("System archive installed from [%s]" % clone_name) - backup_restore.restore_images(clone_arch_path + "_images.tgz", - clone=True) - console_log("Images archive installed from [%s]" % clone_name) - finalize_install() - set_result(clone.OK) - if not config_worker(): - # do cleanup if worker_config_complete is not required - cleanup() - elif last_result == clone.OK: - # Installation completed successfully before last reboot - cleanup() - else: - LOG.error("Bad file: {}".format(INI_FILE)) - set_result(clone.FAIL) - exit(1) - except Exception as e: - console_log("Clone [%s] installation failed" % clone_name) - LOG.exception("install failed") - set_result(clone.FAIL) - exit(1) -else: - console_log("nothing to do, Not installing clone?") diff --git a/controllerconfig/controllerconfig/scripts/keyringstaging b/controllerconfig/controllerconfig/scripts/keyringstaging deleted file mode 100755 index 221bdd94bf..0000000000 --- a/controllerconfig/controllerconfig/scripts/keyringstaging +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python3 - -# -# Copyright (c) 2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import keyring -import os -import sys - -def get_stealth_password(): - """Get the stealth password vault for manifest to run""" - orig_root = os.environ.get('XDG_DATA_HOME', None) - os.environ["XDG_DATA_HOME"] = "/tmp" - - stealth_pw = keyring.get_password("CGCS", "admin") - - if orig_root is not None: - os.environ("XDG_DATA_HOME",orig_root) - else: - del os.environ["XDG_DATA_HOME"] - return stealth_pw - -if __name__ == "__main__": - sys.stdout.write(get_stealth_password()) - sys.stdout.flush() - sys.exit(0) - diff --git a/controllerconfig/controllerconfig/setup.py b/controllerconfig/controllerconfig/setup.py index b8ddf8a94c..c7e3a6b4a6 100644 --- a/controllerconfig/controllerconfig/setup.py +++ b/controllerconfig/controllerconfig/setup.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2017 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,9 +18,6 @@ setup( include_package_data=False, entry_points={ 'console_scripts': [ - 'config_controller = controllerconfig.systemconfig:main', - 'config_region = controllerconfig.regionconfig:region_main', - 'config_subcloud = controllerconfig.regionconfig:subcloud_main', 'config_management = controllerconfig.config_management:main', 'upgrade_controller = controllerconfig.upgrades.controller:main', 'upgrade_controller_simplex = ' diff --git a/controllerconfig/controllerconfig/tox.ini b/controllerconfig/controllerconfig/tox.ini index c8dafd41cd..9a834423c8 100644 --- a/controllerconfig/controllerconfig/tox.ini +++ b/controllerconfig/controllerconfig/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = flake8, py27, pylint +envlist = flake8, pylint # Tox does not work if the path to the workdir is too long, so move it to /tmp toxworkdir = /tmp/{env:USER}_cctox stxdir = {toxinidir}/../../.. @@ -41,21 +41,13 @@ commands = flake8 {posargs} # H101: Use TODO(NAME) # H102: Apache 2.0 license header not found # H104: File contains nothing but comments -# H238: old style class declaration, use new style (inherit from `object`) # H306: imports not in alphabetical order # H401: docstring should not start with a space -# H403: multi line docstrings should end on a new line # H404: multi line docstring should start without a leading new line # H405: multi line docstring summary not separated with an empty line -ignore = H101,H102,H104,H238,H306,H401,H403,H404,H405 +ignore = H101,H102,H104,H306,H401,H404,H405 exclude = build -[testenv:py27] -basepython = python2.7 -commands = - find . -type f -name "*.pyc" -delete - py.test {posargs} - [testenv:cover] basepython = python2.7 deps = {[testenv]deps} diff --git a/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py b/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py index 390e26a6cc..514687d170 100755 --- a/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py +++ b/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py @@ -14,9 +14,9 @@ import psycopg2 import sys from psycopg2.extras import RealDictCursor -from controllerconfig.common import log +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) def main(): diff --git a/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py b/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py index 5edf7f5ab0..afcbbdbecd 100644 --- a/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py +++ b/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py @@ -13,9 +13,9 @@ import sys from sysinv.common import constants from psycopg2.extras import RealDictCursor -from controllerconfig.common import log +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) # Sections that need to be removed from retired Ceph cache tiering feature SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering' diff --git a/controllerconfig/opensuse/controllerconfig.spec b/controllerconfig/opensuse/controllerconfig.spec index eb9e0219c8..b578a9dc53 100644 --- a/controllerconfig/opensuse/controllerconfig.spec +++ b/controllerconfig/opensuse/controllerconfig.spec @@ -55,10 +55,7 @@ Configuration for the Controller node. #install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password -install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone -install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh install -d -m 755 %{buildroot}%{local_goenabledd} install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh diff --git a/doc/source/conf.py b/doc/source/conf.py index 2efaeefba1..e272002e86 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -21,7 +21,7 @@ # -- Project information ----------------------------------------------------- -project = u'stx-config' +project = u'StarlingX Configuration' copyright = u'2018, StarlingX' author = u'StarlingX' diff --git a/doc/source/index.rst b/doc/source/index.rst index dc4c43f426..eb7020e1a0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,32 +1,35 @@ -======================== -stx-config Documentation -======================== +=============================== +StarlingX Configuration Project +=============================== -This is the documentation for StarlingX system configuration management. +The starlingx/config project provides system configuration management. -Release Notes +------------- +Release notes ------------- .. toctree:: :maxdepth: 1 - Release Notes + Release notes -API Reference +------------- +API reference ------------- .. toctree:: :maxdepth: 1 - API Reference + API reference +----- Links ----- -* Source: `stx-config`_ -* Code Review: `Gerrit`_ -* Bugs: `Storyboard`_ +* Source: `starlingx/config`_ +* Code review: `Gerrit`_ +* Project tracking: `Storyboard`_ -.. _stx-config: https://opendev.org/starlingx/config/ +.. _starlingx/config: https://opendev.org/starlingx/config/ .. _Gerrit: https://review.opendev.org/#/q/project:starlingx/config .. _Storyboard: https://storyboard.openstack.org/#!/project/starlingx/config diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py index 5d45135ea4..7e81f4d4fb 100644 --- a/releasenotes/source/conf.py +++ b/releasenotes/source/conf.py @@ -57,7 +57,7 @@ source_suffix = '.rst' master_doc = 'index' # General information about the project. -project = 'stx-config' +project = u'StarlingX Configuration' # Release notes are version independent, no need to set version and release release = '' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index ad0b35bc85..45a8567a00 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -1,6 +1,6 @@ -======================== -stx-config Release Notes -======================== +===================================== +StarlingX Configuration Release Notes +===================================== .. toctree:: :maxdepth: 2 diff --git a/sysinv/cgts-client/centos/build_srpm.data b/sysinv/cgts-client/centos/build_srpm.data index 4e6fbe9ae4..d47350caef 100644 --- a/sysinv/cgts-client/centos/build_srpm.data +++ b/sysinv/cgts-client/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="cgts-client" -TIS_PATCH_VER=73 +TIS_PATCH_VER=74 diff --git a/sysinv/cgts-client/centos/cgts-client.spec b/sysinv/cgts-client/centos/cgts-client.spec index 57ea0e1e0b..f74a8ecd0c 100644 --- a/sysinv/cgts-client/centos/cgts-client.spec +++ b/sysinv/cgts-client/centos/cgts-client.spec @@ -18,6 +18,9 @@ Requires: python3-prettytable Requires: bash-completion Requires: python3-neutronclient Requires: python3-keystoneclient +Requires: python3-oslo-i18n +Requires: python3-oslo-serialization +Requires: python3-oslo-utils # Needed for python2 and python3 compatible Requires: python3-six diff --git a/sysinv/cgts-client/cgts-client/.coveragerc b/sysinv/cgts-client/cgts-client/.coveragerc new file mode 100644 index 0000000000..b8f4e06728 --- /dev/null +++ b/sysinv/cgts-client/cgts-client/.coveragerc @@ -0,0 +1,8 @@ +[run] +branch = True +source = cgtsclient +omit = cgtsclient/tests/* + +[report] +ignore_errors = True + diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/_i18n.py b/sysinv/cgts-client/cgts-client/cgtsclient/_i18n.py new file mode 100644 index 0000000000..fe3dba1d19 --- /dev/null +++ b/sysinv/cgts-client/cgts-client/cgtsclient/_i18n.py @@ -0,0 +1,32 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +"""oslo.i18n integration module. + +See https://docs.openstack.org/oslo.i18n/latest/user/usage.html + +""" + +import oslo_i18n + +DOMAIN = 'python-cgtsclient' + +_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# The contextual translation function using the name "_C" +# requires oslo.i18n >=2.1.0 +_C = _translators.contextual_form + +# The plural translation function using the name "_P" +# requires oslo.i18n >=2.1.0 +_P = _translators.plural_form + + +def get_available_languages(): + return oslo_i18n.get_available_languages(DOMAIN) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/client.py b/sysinv/cgts-client/cgts-client/cgtsclient/client.py index f093b396ea..d1241a0afa 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/client.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/client.py @@ -4,9 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 # +from cgtsclient._i18n import _ from cgtsclient.common import utils from cgtsclient import exc -from cgtsclient.openstack.common.gettextutils import _ def _get_ksclient(**kwargs): diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/common/base.py b/sysinv/cgts-client/cgts-client/cgtsclient/common/base.py index df638d5fce..bdd9e8c55f 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/common/base.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/common/base.py @@ -119,6 +119,22 @@ class Resource(object): else: return self.__dict__[k] + # deepcopy is invoked on this object which causes infinite recursion in python3 + # unless the copy and deepcopy methods are overridden + def __copy__(self): + cls = self.__class__ + result = cls.__new__(cls) + result.__dict__.update(self.__dict__) + return result + + def __deepcopy__(self, memo): + cls = self.__class__ + result = cls.__new__(cls) + memo[id(self)] = result + for k, v in self.__dict__.items(): + setattr(result, k, copy.deepcopy(v, memo)) + return result + def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/common/http.py b/sysinv/cgts-client/cgts-client/cgtsclient/common/http.py index 48f3037363..9235df91b1 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/common/http.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/common/http.py @@ -528,7 +528,7 @@ class ResponseBodyIterator(object): def __iter__(self): while True: - yield self.next() + yield six.next() def next(self): chunk = self.resp.read(CHUNKSIZE) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/common/utils.py b/sysinv/cgts-client/cgts-client/cgtsclient/common/utils.py index f214efdbda..1fa3f6dc12 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/common/utils.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/common/utils.py @@ -41,13 +41,10 @@ from prettytable import NONE from datetime import datetime from dateutil import parser - -from cgtsclient import exc -from cgtsclient.openstack.common import importutils from functools import wraps -# noinspection PyProtectedMember -from wrapping_formatters import _get_width +from cgtsclient import exc +from oslo_utils import importutils from cgtsclient.common import wrapping_formatters from six.moves import input @@ -300,7 +297,7 @@ def pt_builder(field_labels, fields, formatters, paging, printer=default_printer output = self.pt.get_string() if wrapping_formatters.is_nowrap_set(): return output - output_width = _get_width(output) + output_width = wrapping_formatters._get_width(output) if output_width <= self.terminal_width: return output # At this point pretty Table (self.pt) does not fit the terminal width so let's @@ -476,7 +473,7 @@ def print_dict_with_format(data, wrap=0, output_format=None): def print_dict_value(d): # Print values on a single line separated by spaces # e.g. 'available ntp' - print (' '.join(map(str, d.values()))) + print(' '.join(map(str, d.values()))) def print_dict(d, dict_property="Property", wrap=0): diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/common/wrapping_formatters.py b/sysinv/cgts-client/cgts-client/cgtsclient/common/wrapping_formatters.py index e1a5981e18..2f53265c91 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/common/wrapping_formatters.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/common/wrapping_formatters.py @@ -801,6 +801,7 @@ def _simpleTestHarness(no_wrap): print("nowrap = {}".format(is_nowrap_set())) + if __name__ == "__main__": _simpleTestHarness(True) _simpleTestHarness(False) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/__init__.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/__init__.py deleted file mode 100644 index 265c2d9f65..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/__init__.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/__init__.py deleted file mode 100644 index 265c2d9f65..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py deleted file mode 100644 index 526f71ef01..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py +++ /dev/null @@ -1,254 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 SINA Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Zhongyue Luo, SINA Corporation. -# -"""Extracts OpenStack config option info from module(s).""" - -import imp -import os -import re -import six -import socket -import sys -import textwrap - -from oslo_config import cfg - -from cgtsclient.openstack.common import gettextutils -from cgtsclient.openstack.common import importutils - -gettextutils.install('python-cgtsclient') - -STROPT = "StrOpt" -BOOLOPT = "BoolOpt" -INTOPT = "IntOpt" -FLOATOPT = "FloatOpt" -LISTOPT = "ListOpt" -MULTISTROPT = "MultiStrOpt" - -OPT_TYPES = { - STROPT: 'string value', - BOOLOPT: 'boolean value', - INTOPT: 'integer value', - FLOATOPT: 'floating point value', - LISTOPT: 'list value', - MULTISTROPT: 'multi valued', -} - -OPTION_COUNT = 0 -OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, - FLOATOPT, LISTOPT, - MULTISTROPT])) - -PY_EXT = ".py" -BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), - "../../../../")) -WORDWRAP_WIDTH = 60 - - -def generate(srcfiles): - mods_by_pkg = dict() - for filepath in srcfiles: - pkg_name = filepath.split(os.sep)[1] - mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), - os.path.basename(filepath).split('.')[0]]) - mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) - # NOTE(lzyeval): place top level modules before packages - pkg_names = [x for x in mods_by_pkg.keys() if x.endswith(PY_EXT)] - pkg_names.sort() - ext_names = [x for x in mods_by_pkg.keys() if x not in pkg_names] - ext_names.sort() - pkg_names.extend(ext_names) - - # opts_by_group is a mapping of group name to an options list - # The options list is a list of (module, options) tuples - opts_by_group = {'DEFAULT': []} - - for pkg_name in pkg_names: - mods = mods_by_pkg.get(pkg_name) - mods.sort() - for mod_str in mods: - if mod_str.endswith('.__init__'): - mod_str = mod_str[:mod_str.rfind(".")] - - mod_obj = _import_module(mod_str) - if not mod_obj: - continue - - for group, opts in _list_opts(mod_obj): - opts_by_group.setdefault(group, []).append((mod_str, opts)) - - print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', [])) - for group, opts in opts_by_group.items(): - print_group_opts(group, opts) - - print("# Total option count: %d" % OPTION_COUNT) - - -def _import_module(mod_str): - try: - if mod_str.startswith('bin.'): - imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:])) - return sys.modules[mod_str[4:]] - else: - return importutils.import_module(mod_str) - except ImportError as ie: - sys.stderr.write("%s\n" % str(ie)) - return None - except Exception: - return None - - -def _is_in_group(opt, group): - "Check if opt is in group." - for key, value in group._opts.items(): - if value['opt'] == opt: - return True - return False - - -def _guess_groups(opt, mod_obj): - # is it in the DEFAULT group? - if _is_in_group(opt, cfg.CONF): - return 'DEFAULT' - - # what other groups is it in? - for key, value in cfg.CONF.items(): - if isinstance(value, cfg.CONF.GroupAttr): - if _is_in_group(opt, value._group): - return value._group.name - - raise RuntimeError( - "Unable to find group for option %s, " - "maybe it's defined twice in the same group?" - % opt.name - ) - - -def _list_opts(obj): - def is_opt(o): - return (isinstance(o, cfg.Opt) and - not isinstance(o, cfg.SubCommandOpt)) - - opts = list() - for attr_str in dir(obj): - attr_obj = getattr(obj, attr_str) - if is_opt(attr_obj): - opts.append(attr_obj) - elif (isinstance(attr_obj, list) and - all([is_opt(x) for x in attr_obj])): - opts.extend(attr_obj) - - ret = {} - for opt in opts: - ret.setdefault(_guess_groups(opt, obj), []).append(opt) - return ret.items() - - -def print_group_opts(group, opts_by_module): - print("[%s]" % group) - print('') - global OPTION_COUNT - for mod, opts in opts_by_module: - OPTION_COUNT += len(opts) - print('#') - print('# Options defined in %s' % mod) - print('#') - print('') - for opt in opts: - _print_opt(opt) - print('') - - -def _get_my_ip(): - try: - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr - except socket.error: - return None - - -def _sanitize_default(s): - """Set up a reasonably sensible default for pybasedir, my_ip and host.""" - if s.startswith(BASEDIR): - return s.replace(BASEDIR, '/usr/lib/python/site-packages') - elif BASEDIR in s: - return s.replace(BASEDIR, '') - elif s == _get_my_ip(): - return '10.0.0.1' - elif s == socket.gethostname(): - return 'python-cgtsclient' - elif s.strip() != s: - return '"%s"' % s - return s - - -def _print_opt(opt): - opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help - if not opt_help: - sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) - opt_type = None - try: - opt_type = OPTION_REGEX.search(str(type(opt))).group(0) - except (ValueError, AttributeError) as err: - sys.stderr.write("%s\n" % str(err)) - sys.exit(1) - opt_help += ' (' + OPT_TYPES[opt_type] + ')' - print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) - try: - if opt_default is None: - print('#%s=' % opt_name) - elif opt_type == STROPT: - assert(isinstance(opt_default, six.string_types)) - print('#%s=%s' % (opt_name, _sanitize_default(opt_default))) - elif opt_type == BOOLOPT: - assert(isinstance(opt_default, bool)) - print('#%s=%s' % (opt_name, str(opt_default).lower())) - elif opt_type == INTOPT: - assert(isinstance(opt_default, int) and - not isinstance(opt_default, bool)) - print('#%s=%s' % (opt_name, opt_default)) - elif opt_type == FLOATOPT: - assert(isinstance(opt_default, float)) - print('#%s=%s' % (opt_name, opt_default)) - elif opt_type == LISTOPT: - assert(isinstance(opt_default, list)) - print('#%s=%s' % (opt_name, ','.join(opt_default))) - elif opt_type == MULTISTROPT: - assert(isinstance(opt_default, list)) - if not opt_default: - opt_default = [''] - for default in opt_default: - print('#%s=%s' % (opt_name, default)) - print('') - except Exception: - sys.stderr.write('Error in option "%s"\n' % opt_name) - sys.exit(1) - - -def main(): - if len(sys.argv) < 2: - print("usage: %s [srcfile]...\n" % sys.argv[0]) - sys.exit(0) - generate(sys.argv[1:]) - -if __name__ == '__main__': - main() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/gettextutils.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/gettextutils.py deleted file mode 100644 index 15962e6979..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/gettextutils.py +++ /dev/null @@ -1,50 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -gettext for openstack-common modules. - -Usual usage in an openstack.common module: - - from cgts.openstack.common.gettextutils import _ -""" - -import gettext -import os - -_localedir = os.environ.get('cgtsclient'.upper() + '_LOCALEDIR') -_t = gettext.translation('cgtsclient', localedir=_localedir, fallback=True) - - -def _(msg): - return _t.ugettext(msg) - - -def install(domain): - """Install a _() function using the given translation domain. - - Given a translation domain, install a _() function using gettext's - install() function. - - The main difference from gettext.install() is that we allow - overriding the default localedir (e.g. /usr/share/locale) using - a translation-domain-specific environment variable (e.g. - NOVA_LOCALEDIR). - """ - gettext.install(domain, - localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), - unicode=True) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/importutils.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/importutils.py deleted file mode 100644 index f19403bcc8..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/importutils.py +++ /dev/null @@ -1,66 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """Import a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/__init__.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/__init__.py deleted file mode 100644 index 2d32e4ef31..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/cmd.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/cmd.py deleted file mode 100644 index dcc373efc1..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/cmd.py +++ /dev/null @@ -1,118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Root wrapper for OpenStack services - -""" - -from __future__ import print_function - -import logging -import os -import pwd -import signal -import subprocess -import sys - -from six.moves import configparser - -RC_UNAUTHORIZED = 99 -RC_NOCOMMAND = 98 -RC_BADCONFIG = 97 -RC_NOEXECFOUND = 96 - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def _exit_error(execname, message, errorcode, log=True): - print("%s: %s" % (execname, message)) - if log: - logging.error(message) - sys.exit(errorcode) - - -def main(): - # Split arguments, require at least a command - execname = sys.argv.pop(0) - if len(sys.argv) < 2: - _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) - - configfile = sys.argv.pop(0) - userargs = sys.argv[:] - - # Add ../ to sys.path to allow running from branch - possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), - os.pardir, os.pardir)) - if os.path.exists(os.path.join(possible_topdir, "cgtsclient", - "__init__.py")): - sys.path.insert(0, possible_topdir) - - from cgtsclient.openstack.common.rootwrap import wrapper - - # Load configuration - try: - rawconfig = configparser.RawConfigParser() - rawconfig.read(configfile) - config = wrapper.RootwrapConfig(rawconfig) - except ValueError as exc: - msg = "Incorrect value in %s: %s" % (configfile, exc.message) - _exit_error(execname, msg, RC_BADCONFIG, log=False) - except configparser.Error: - _exit_error(execname, "Incorrect configuration file: %s" % configfile, - RC_BADCONFIG, log=False) - - if config.use_syslog: - wrapper.setup_syslog(execname, - config.syslog_log_facility, - config.syslog_log_level) - - # Execute command if it matches any of the loaded filters - filters = wrapper.load_filters(config.filters_path) - try: - filtermatch = wrapper.match_filter(filters, userargs, - exec_dirs=config.exec_dirs) - if filtermatch: - command = filtermatch.get_command(userargs, - exec_dirs=config.exec_dirs) - if config.use_syslog: - logging.info("(%s > %s) Executing %s (filter match = %s)" % ( - os.getlogin(), pwd.getpwuid(os.getuid())[0], - command, filtermatch.name)) - - obj = subprocess.Popen(command, - stdin=sys.stdin, - stdout=sys.stdout, - stderr=sys.stderr, - preexec_fn=_subprocess_setup, - env=filtermatch.get_environment(userargs)) - obj.wait() - sys.exit(obj.returncode) - - except wrapper.FilterMatchNotExecutable as exc: - msg = ("Executable not found: %s (filter match = %s)" - % (exc.match.exec_path, exc.match.name)) - _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) - - except wrapper.NoFilterMatched: - msg = ("Unauthorized command: %s (no filter matched)" - % ' '.join(userargs)) - _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/filters.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/filters.py deleted file mode 100644 index ae7c62cada..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/filters.py +++ /dev/null @@ -1,228 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - - -class CommandFilter(object): - """Command filter only checking that the 1st argument matches exec_path.""" - - def __init__(self, exec_path, run_as, *args): - self.name = '' - self.exec_path = exec_path - self.run_as = run_as - self.args = args - self.real_exec = None - - def get_exec(self, exec_dirs=[]): - """Returns existing executable, or empty string if none found.""" - if self.real_exec is not None: - return self.real_exec - self.real_exec = "" - if self.exec_path.startswith('/'): - if os.access(self.exec_path, os.X_OK): - self.real_exec = self.exec_path - else: - for binary_path in exec_dirs: - expanded_path = os.path.join(binary_path, self.exec_path) - if os.access(expanded_path, os.X_OK): - self.real_exec = expanded_path - break - return self.real_exec - - def match(self, userargs): - """Only check that the first argument (command) matches exec_path.""" - return os.path.basename(self.exec_path) == userargs[0] - - def get_command(self, userargs, exec_dirs=[]): - """Returns command to execute (with sudo -u if run_as != root).""" - to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path - if (self.run_as != 'root'): - # Used to run commands at lesser privileges - return ['sudo', '-u', self.run_as, to_exec] + userargs[1:] - return [to_exec] + userargs[1:] - - def get_environment(self, userargs): - """Returns specific environment to set, None if none.""" - return None - - -class RegExpFilter(CommandFilter): - """Command filter doing regexp matching for every argument.""" - - def match(self, userargs): - # Early skip if command or number of args don't match - if (len(self.args) != len(userargs)): - # DENY: argument numbers don't match - return False - # Compare each arg (anchoring pattern explicitly at end of string) - for (pattern, arg) in zip(self.args, userargs): - try: - if not re.match(pattern + '$', arg): - break - except re.error: - # DENY: Badly-formed filter - return False - else: - # ALLOW: All arguments matched - return True - - # DENY: Some arguments did not match - return False - - -class PathFilter(CommandFilter): - """Command filter checking that path arguments are within given dirs - - One can specify the following constraints for command arguments: - 1) pass - pass an argument as is to the resulting command - 2) some_str - check if an argument is equal to the given string - 3) abs path - check if a path argument is within the given base dir - - A typical rootwrapper filter entry looks like this: - # cmdname: filter name, raw command, user, arg_i_constraint [, ...] - chown: PathFilter, /bin/chown, root, nova, /var/lib/images - - """ - - def match(self, userargs): - command, arguments = userargs[0], userargs[1:] - - equal_args_num = len(self.args) == len(arguments) - exec_is_valid = super(PathFilter, self).match(userargs) - args_equal_or_pass = all( - arg == 'pass' or arg == value - for arg, value in zip(self.args, arguments) - if not os.path.isabs(arg) # arguments not specifying abs paths - ) - paths_are_within_base_dirs = all( - os.path.commonprefix([arg, os.path.realpath(value)]) == arg - for arg, value in zip(self.args, arguments) - if os.path.isabs(arg) # arguments specifying abs paths - ) - - return (equal_args_num and - exec_is_valid and - args_equal_or_pass and - paths_are_within_base_dirs) - - def get_command(self, userargs, exec_dirs=[]): - command, arguments = userargs[0], userargs[1:] - - # convert path values to canonical ones; copy other args as is - args = [os.path.realpath(value) if os.path.isabs(arg) else value - for arg, value in zip(self.args, arguments)] - - return super(PathFilter, self).get_command([command] + args, - exec_dirs) - - -class DnsmasqFilter(CommandFilter): - """Specific filter for the dnsmasq call (which includes env).""" - - CONFIG_FILE_ARG = 'CONFIG_FILE' - - def match(self, userargs): - if (userargs[0] == 'env' and - userargs[1].startswith(self.CONFIG_FILE_ARG) and - userargs[2].startswith('NETWORK_ID=') and - userargs[3] == 'dnsmasq'): - return True - return False - - def get_command(self, userargs, exec_dirs=[]): - to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path - dnsmasq_pos = userargs.index('dnsmasq') - return [to_exec] + userargs[dnsmasq_pos + 1:] - - def get_environment(self, userargs): - env = os.environ.copy() - env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1] - env['NETWORK_ID'] = userargs[2].split('=')[-1] - return env - - -class DeprecatedDnsmasqFilter(DnsmasqFilter): - """Variant of dnsmasq filter to support old-style FLAGFILE.""" - CONFIG_FILE_ARG = 'FLAGFILE' - - -class KillFilter(CommandFilter): - """Specific filter for the kill calls. - 1st argument is the user to run /bin/kill under - 2nd argument is the location of the affected executable - Subsequent arguments list the accepted signals (if any) - - This filter relies on /proc to accurately determine affected - executable, so it will only work on procfs-capable systems (not OSX). - """ - - def __init__(self, *args): - super(KillFilter, self).__init__("/bin/kill", *args) - - def match(self, userargs): - if userargs[0] != "kill": - return False - args = list(userargs) - if len(args) == 3: - # A specific signal is requested - signal = args.pop(1) - if signal not in self.args[1:]: - # Requested signal not in accepted list - return False - else: - if len(args) != 2: - # Incorrect number of arguments - return False - if len(self.args) > 1: - # No signal requested, but filter requires specific signal - return False - try: - command = os.readlink("/proc/%d/exe" % int(args[1])) - # NOTE(yufang521247): /proc/PID/exe may have '\0' on the - # end, because python doen't stop at '\0' when read the - # target path. - command = command.split('\0')[0] - # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on - # the end if an executable is updated or deleted - if command.endswith(" (deleted)"): - command = command[:command.rindex(" ")] - if command != self.args[0]: - # Affected executable does not match - return False - except (ValueError, OSError): - # Incorrect PID - return False - return True - - -class ReadFileFilter(CommandFilter): - """Specific filter for the utils.read_file_as_root call.""" - - def __init__(self, file_path, *args): - self.file_path = file_path - super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) - - def match(self, userargs): - if userargs[0] != 'cat': - return False - if userargs[1] != self.file_path: - return False - if len(userargs) != 2: - return False - return True diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/wrapper.py b/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/wrapper.py deleted file mode 100644 index 0117b308ee..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/wrapper.py +++ /dev/null @@ -1,151 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import logging -import logging.handlers -import os -import string - -from cgtsclient.openstack.common.rootwrap import filters -from six.moves import configparser - - -class NoFilterMatched(Exception): - """This exception is raised when no filter matched.""" - pass - - -class FilterMatchNotExecutable(Exception): - """raise if filter matche but not executable - - This exception is raised when a filter matched but no executable was - found. - """ - def __init__(self, match=None, **kwargs): - self.match = match - - -class RootwrapConfig(object): - - def __init__(self, config): - # filters_path - self.filters_path = config.get("DEFAULT", "filters_path").split(",") - - # exec_dirs - if config.has_option("DEFAULT", "exec_dirs"): - self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") - else: - # Use system PATH if exec_dirs is not specified - self.exec_dirs = os.environ["PATH"].split(':') - - # syslog_log_facility - if config.has_option("DEFAULT", "syslog_log_facility"): - v = config.get("DEFAULT", "syslog_log_facility") - facility_names = logging.handlers.SysLogHandler.facility_names - self.syslog_log_facility = getattr(logging.handlers.SysLogHandler, - v, None) - if self.syslog_log_facility is None and v in facility_names: - self.syslog_log_facility = facility_names.get(v) - if self.syslog_log_facility is None: - raise ValueError('Unexpected syslog_log_facility: %s' % v) - else: - default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG - self.syslog_log_facility = default_facility - - # syslog_log_level - if config.has_option("DEFAULT", "syslog_log_level"): - v = config.get("DEFAULT", "syslog_log_level") - self.syslog_log_level = logging.getLevelName(v.upper()) - if (self.syslog_log_level == "Level %s" % v.upper()): - raise ValueError('Unexepected syslog_log_level: %s' % v) - else: - self.syslog_log_level = logging.ERROR - - # use_syslog - if config.has_option("DEFAULT", "use_syslog"): - self.use_syslog = config.getboolean("DEFAULT", "use_syslog") - else: - self.use_syslog = False - - -def setup_syslog(execname, facility, level): - rootwrap_logger = logging.getLogger() - rootwrap_logger.setLevel(level) - handler = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - handler.setFormatter(logging.Formatter( - os.path.basename(execname) + ': %(message)s')) - rootwrap_logger.addHandler(handler) - - -def build_filter(class_name, *args): - """Returns a filter object of class class_name.""" - if not hasattr(filters, class_name): - logging.warning("Skipping unknown filter class (%s) specified " - "in filter definitions" % class_name) - return None - filterclass = getattr(filters, class_name) - return filterclass(*args) - - -def load_filters(filters_path): - """Load filters from a list of directories.""" - filterlist = [] - for filterdir in filters_path: - if not os.path.isdir(filterdir): - continue - for filterfile in os.listdir(filterdir): - filterconfig = configparser.RawConfigParser() - filterconfig.read(os.path.join(filterdir, filterfile)) - for (name, value) in filterconfig.items("Filters"): - filterdefinition = [string.strip(s) for s in value.split(',')] - newfilter = build_filter(*filterdefinition) - if newfilter is None: - continue - newfilter.name = name - filterlist.append(newfilter) - return filterlist - - -def match_filter(filter_list, userargs, exec_dirs=[]): - """check user command and args - - Checks user command and arguments through command filters and - returns the first matching filter. - Raises NoFilterMatched if no filter matched. - Raises FilterMatchNotExecutable if no executable was found for the - best filter match. - """ - first_not_executable_filter = None - - for f in filter_list: - if f.match(userargs): - # Try other filters if executable is absent - if not f.get_exec(exec_dirs=exec_dirs): - if not first_not_executable_filter: - first_not_executable_filter = f - continue - # Otherwise return matching filter for execution - return f - - if first_not_executable_filter: - # A filter matched, but no executable was found for it - raise FilterMatchNotExecutable(match=first_not_executable_filter) - - # No filter matched - raise NoFilterMatched() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/shell.py index 3b249ac605..75ccba814f 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/shell.py @@ -349,5 +349,6 @@ def main(): print(e, file=sys.stderr) sys.exit(1) + if __name__ == "__main__": main() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/health.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/health.py index 47c889fef8..18c4a10a8a 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/health.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/health.py @@ -20,3 +20,8 @@ class HealthManager(base.Manager): path = '/v1/health/upgrade' resp, body = self.api.json_request('GET', path) return body + + def get_kube_upgrade(self): + path = '/v1/health/kube-upgrade' + resp, body = self.api.json_request('GET', path) + return body diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/health_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/health_shell.py index 5ece9c4807..f13e283cec 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/health_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/health_shell.py @@ -17,3 +17,8 @@ def do_health_query(cc, args): def do_health_query_upgrade(cc, args): """Run the Health Check for an Upgrade.""" print(cc.health.get_upgrade()) + + +def do_health_query_kube_upgrade(cc, args): + """Run the Health Check for a Kubernetes Upgrade.""" + print(cc.health.get_kube_upgrade()) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py index f3f29071de..6421885907 100755 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py @@ -13,10 +13,10 @@ from collections import OrderedDict import datetime import os +from cgtsclient._i18n import _ from cgtsclient.common import constants from cgtsclient.common import utils from cgtsclient import exc -from cgtsclient.openstack.common.gettextutils import _ from cgtsclient.v1 import icpu as icpu_utils from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import iinterface as iinterface_utils @@ -513,6 +513,7 @@ def _list_storage(cc, host): fields = ['uuid', 'lvm_pv_name', 'disk_or_part_device_path', 'lvm_vg_name'] utils.print_list(ipvs, fields, field_labels, sortby=0) + """ NOTE (neid): all three "do_host_apply_profile" methods can be replaced diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py index ff4bbd5694..60fe5fdb90 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py @@ -6,9 +6,9 @@ # +from cgtsclient._i18n import _ from cgtsclient.common import base from cgtsclient import exc -from cgtsclient.openstack.common.gettextutils import _ CREATION_ATTRIBUTES = ['ihost_uuid', 'inode_uuid', 'cpu', 'core', 'thread', diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/isystem_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/isystem_shell.py index c702b601bc..76fced8dec 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/isystem_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/isystem_shell.py @@ -86,7 +86,9 @@ def do_show(cc, args): @utils.arg('-S', '--security_feature', metavar='', choices=['spectre_meltdown_v1', 'spectre_meltdown_all'], - help='Use spectre_meltdown_v1 for spectre/meltdown v1 fixes, or spectre_meltdown_all to use all fixes') + help='Use spectre_meltdown_v1 to add linux bootargs "nopti ' + 'nospectre_v2 nospectre_v1", or spectre_meltdown_all to not ' + 'add any mitigation disabling bootargs') def do_modify(cc, args): """Modify system attributes.""" isystems = cc.isystem.list() diff --git a/sysinv/cgts-client/cgts-client/requirements.txt b/sysinv/cgts-client/cgts-client/requirements.txt index fae039f467..be7aee85cf 100644 --- a/sysinv/cgts-client/cgts-client/requirements.txt +++ b/sysinv/cgts-client/cgts-client/requirements.txt @@ -1,2 +1,5 @@ python-neutronclient keyring +oslo.i18n # Apache-2.0 +oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0 +oslo.utils>=3.5.0 # Apache-2.0 diff --git a/sysinv/cgts-client/cgts-client/test-requirements.txt b/sysinv/cgts-client/cgts-client/test-requirements.txt index ad0a4024f7..9beb4450d2 100644 --- a/sysinv/cgts-client/cgts-client/test-requirements.txt +++ b/sysinv/cgts-client/cgts-client/test-requirements.txt @@ -3,7 +3,8 @@ # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +hacking>=1.1.0,<=2.0.0 # Apache-2.0 +pycodestyle>=2.0.0 # MIT License bandit>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD diff --git a/sysinv/cgts-client/cgts-client/tox.ini b/sysinv/cgts-client/cgts-client/tox.ini index a32c792bbe..a3c44394d3 100644 --- a/sysinv/cgts-client/cgts-client/tox.ini +++ b/sysinv/cgts-client/cgts-client/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,pep8,pylint +envlist = py27,py36,pep8,pylint minversion = 1.6 #skipsdist = True @@ -47,9 +47,11 @@ commands = stestr slowest [testenv:pep8] +basepython = python3 deps = -r{toxinidir}/test-requirements.txt + flake8-bugbear commands = - flake8 cgtsclient + flake8 cgtsclient [testenv:venv] commands = {posargs} @@ -75,16 +77,32 @@ commands = show-source = true exclude=.*,dist,*lib/python*,*egg,build max-complexity=25 -ignore = H102,H104,H105,H238,H404,H405,E501,F841 -#H102 Apache 2.0 license header not found -#H104 File contains nothing but comments -#H105 Don't use author tags -#H238 old style class declaration, use new style (inherit from `object`) -#H404 multi line docstring should start without a leading new line -#H405 multi line docstring summary not separated with an empty line -#E501 line too long -#F841 local variable 'X' is assigned to but never used - +# H102 Apache 2.0 license header not found +# H104 File contains nothing but comments +# H105 Don't use author tags +# H238 old style class declaration, use new style (inherit from `object`) +# H404 multi line docstring should start without a leading new line +# H405 multi line docstring summary not separated with an empty line +# -B- codes are bugbear +# B004 Using `hasattr(x, '__call__')` to test if `x` is callable is unreliable. +# B005 Using .strip() with multi-character strings is misleading the reader. +# B006 Do not use mutable data structures for argument defaults +# B009 Do not call getattr with a constant attribute value +# B010 Do not call setattr with a constant attribute value +# -W- codes are warnings +# W503 line break before binary operator +# W504 line break after binary operator +# W605 invalid escape sequence +# -E- codes are errors +# E501 line too long +# E731 do not assign a lambda expression, use a def +# -F- codes are errors +# F841 local variable 'X' is assigned to but never used +ignore = H102,H104,H105,H238,H404,H405, + B004,B005,B006,B009,B010, + W503,W504,W605, + E501,E731, + F841 [testenv:pylint] basepython = python2.7 diff --git a/sysinv/sysinv/centos/build_srpm.data b/sysinv/sysinv/centos/build_srpm.data index 7daffe0850..847aaa23d1 100644 --- a/sysinv/sysinv/centos/build_srpm.data +++ b/sysinv/sysinv/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="sysinv" -TIS_PATCH_VER=342 +TIS_PATCH_VER=344 diff --git a/sysinv/sysinv/centos/sysinv.spec b/sysinv/sysinv/centos/sysinv.spec index 84c6ecd9bd..24ec182d30 100644 --- a/sysinv/sysinv/centos/sysinv.spec +++ b/sysinv/sysinv/centos/sysinv.spec @@ -19,11 +19,21 @@ Requires: python3-botocore >= 1.13.21 Requires: python3-docker Requires: python3-eventlet Requires: python3-ipaddr +Requires: python3-jsonpatch Requires: python3-keyring +Requires: python3-keystoneauth1 +Requires: python3-keystonemiddleware Requires: python3-kubernetes Requires: python3-netaddr -Requires: python3-pyudev +Requires: python3-paste Requires: python3-pbr +Requires: python3-psutil +Requires: python3-pyudev +Requires: python3-requests +Requires: python3-retrying +Requires: python3-sqlalchemy +Requires: python3-stevedore +Requires: python3-webob Requires: python3-webtest Requires: python3-wsme Requires: python3-six @@ -34,6 +44,8 @@ Requires: python3-oslo-config Requires: python3-oslo-concurrency Requires: python3-oslo-db Requires: python3-oslo-log +Requires: python3-oslo-serialization +Requires: python3-oslo-service Requires: python3-oslo-utils Requires: python3-pecan Requires: tsconfig diff --git a/sysinv/sysinv/debian/control b/sysinv/sysinv/debian/control index 5e5ee0b201..a2ddeb6773 100644 --- a/sysinv/sysinv/debian/control +++ b/sysinv/sysinv/debian/control @@ -23,6 +23,7 @@ Depends: ${misc:Depends}, python-pyudev, python-pbr, python-ipaddr, + python-jsonpatch, python-kubernetes, python-eventlet, python-oslo-utils, @@ -30,12 +31,23 @@ Depends: ${misc:Depends}, python-oslo-concurrency, python-oslo-db, python-oslo-log, + python-oslo-serialization, + python-oslo-service, python-netaddr, + python-webob, python-webtest, python-wsme, python-mock, python-keyring, + python-keystoneauth1, + python-keystonemiddleware, + python-paste, python-pecan, + python-psutil, + python-requests, + python-retrying, + python-sqlalchemy, + python-stevedore, python-mox3, python-pytest, python-testtools, diff --git a/sysinv/sysinv/opensuse/sysinv.spec b/sysinv/sysinv/opensuse/sysinv.spec index db22d81baf..99e5089432 100644 --- a/sysinv/sysinv/opensuse/sysinv.spec +++ b/sysinv/sysinv/opensuse/sysinv.spec @@ -23,21 +23,32 @@ Requires: python2-coverage Requires: python2-docker Requires: python2-eventlet Requires: python2-ipaddr +Requires: python2-jsonpatch Requires: python2-keyring +Requires: python2-keystoneauth1 Requires: python2-keystonemiddleware Requires: python2-kubernetes Requires: python2-netaddr +Requires: python2-paste Requires: python2-pyudev Requires: python2-pbr +Requires: python2-psutil +Requires: python2-requests +Requires: python2-retrying +Requires: python2-webob Requires: python2-WebTest Requires: python2-WSME Requires: python2-six +Requires: python2-sqlalchemy +Requires: python2-stevedore Requires: python2-mox3 Requires: python2-oslo.i18n Requires: python2-oslo.config Requires: python2-oslo.concurrency Requires: python2-oslo.db Requires: python2-oslo.log +Requires: python2-oslo.serialization +Requires: python2-oslo.service Requires: python2-oslo.utils Requires: python2-pecan Requires: tsconfig diff --git a/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf.sample b/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf.sample index bcdd5fabcc..41ce2e3f9c 100644 --- a/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf.sample +++ b/sysinv/sysinv/sysinv/etc/sysinv/sysinv.conf.sample @@ -116,7 +116,7 @@ # -# Options defined in sysinv.openstack.common.lockutils +# Options defined in oslo_concurrency.lockutils # # Whether to disable inter-process locks (boolean value) diff --git a/sysinv/sysinv/sysinv/openstack-common.conf b/sysinv/sysinv/sysinv/openstack-common.conf index d7efd9b375..35ea154d64 100644 --- a/sysinv/sysinv/sysinv/openstack-common.conf +++ b/sysinv/sysinv/sysinv/openstack-common.conf @@ -3,11 +3,9 @@ module=config.generator module=context module=db module=db.sqlalchemy -module=fixture module=flakes module=install_venv_common module=local -module=lockutils module=loopingcall module=notifier module=patch_tox_venv diff --git a/sysinv/sysinv/sysinv/pylint.rc b/sysinv/sysinv/sysinv/pylint.rc index b64edd8c43..8ee8c16d76 100755 --- a/sysinv/sysinv/sysinv/pylint.rc +++ b/sysinv/sysinv/sysinv/pylint.rc @@ -7,7 +7,7 @@ rcfile=pylint.rc #init-hook= # Add files or directories to the blacklist. Should be base names, not paths. -ignore=tests +ignore= # Pickle collected data for later comparisons. persistent=yes @@ -84,19 +84,16 @@ extension-pkg-whitelist=lxml.etree,greenlet # E0213: no-self-argument # E0401: import-error # E0604: invalid-all-object -# E0611: no-name-in-module # E0633: unpacking-non-sequence # E0701: bad-except-order # E1102: not-callable # E1120: no-value-for-parameter # E1121: too-many-function-args -# E1124: redundant-keyword-arg disable=C, R, fixme, W0101, W0105, W0106, W0107, W0108, W0110, W0123, W0150, W0201, W0211, W0212, W0221, W0223, W0231, W0235, W0311, W0402, W0403, W0404, W0603, W0612, W0613, W0621, W0622, W0631, W0632, W0701, W0703, W1113, W1201, W1401, W1505, - E0213, E0401, E0604, E0611, E0633, E0701, - E1102, E1120, E1121, E1124 + E0213, E0401, E0604, E0633, E0701, E1102, E1120, E1121 [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs @@ -109,7 +106,7 @@ output-format=text files-output=no # Tells whether to display a full report or only the messages -reports=no +reports=yes # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which diff --git a/sysinv/sysinv/sysinv/setup.cfg b/sysinv/sysinv/sysinv/setup.cfg index 264737707d..2bc0f75781 100644 --- a/sysinv/sysinv/sysinv/setup.cfg +++ b/sysinv/sysinv/sysinv/setup.cfg @@ -69,6 +69,11 @@ systemconfig.helm_applications = stx-openstack = systemconfig.helm_plugins.stx_openstack platform-integ-apps = systemconfig.helm_plugins.platform_integ_apps stx-monitor = systemconfig.helm_plugins.stx_monitor + oidc-auth-apps = systemconfig.helm_plugins.oidc_auth_apps + +systemconfig.helm_plugins.oidc_auth_apps = + 001_dex = sysinv.helm.dex:Dex + 002_oidc-client = sysinv.helm.oidc_client:OidcClientHelm systemconfig.helm_plugins.platform_integ_apps = 001_helm-toolkit = sysinv.helm.helm_toolkit:HelmToolkitHelm diff --git a/sysinv/sysinv/sysinv/sysinv/agent/disk.py b/sysinv/sysinv/sysinv/sysinv/agent/disk.py index 6591188585..8f728129d8 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/disk.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/disk.py @@ -338,11 +338,11 @@ class DiskOperator(object): device_type = device.device_type rotation_rate = constants.DEVICE_TYPE_UNDETERMINED - if rotational is '1': + if rotational == '1': device_type = constants.DEVICE_TYPE_HDD if 'ID_ATA_ROTATION_RATE_RPM' in device: rotation_rate = device['ID_ATA_ROTATION_RATE_RPM'] - elif rotational is '0': + elif rotational == '0': if constants.DEVICE_NAME_NVME in device.device_node: device_type = constants.DEVICE_TYPE_NVME else: diff --git a/sysinv/sysinv/sysinv/sysinv/agent/lldp/plugin.py b/sysinv/sysinv/sysinv/sysinv/agent/lldp/plugin.py index 2e7244969f..d1b84a8521 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/lldp/plugin.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/lldp/plugin.py @@ -29,9 +29,9 @@ class Key(object): return hash((self.chassisid, self.portid, self.portname)) def __cmp__(self, rhs): - return (cmp(self.chassisid, rhs.chassisid) or - cmp(self.portid, rhs.portid) or - cmp(self.portname, rhs.portname)) + return ((self.chassisid < rhs.chassisid) or + (self.portid < rhs.portid) or + (self.portname < rhs.portname)) def __eq__(self, rhs): return (self.chassisid == rhs.chassisid and diff --git a/sysinv/sysinv/sysinv/sysinv/agent/manager.py b/sysinv/sysinv/sysinv/sysinv/agent/manager.py index d0733d1b40..f029d766a2 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/manager.py @@ -760,7 +760,7 @@ class AgentManager(service.PeriodicService): except Timeout: LOG.info("get_ihost_by_macs rpc Timeout.") return # wait for next audit cycle - except Exception as ex: + except Exception: LOG.warn("Conductor RPC get_ihost_by_macs exception " "response") diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/address.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/address.py index 49b5246f94..0e4ef4e1e1 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/address.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/address.py @@ -224,8 +224,8 @@ class AddressController(rest.RestController): sort_key=sort_key, sort_dir=sort_dir) else: addresses = pecan.request.dbapi.addresses_get_all( - family=0, limit=limit, marker=marker_obj, - sort_key=sort_key, sort_dir=sort_dir) + limit=limit, marker=marker_obj, sort_key=sort_key, + sort_dir=sort_dir) return AddressCollection.convert_with_links( addresses, limit, url=resource_url, expand=expand, @@ -461,8 +461,10 @@ class AddressController(rest.RestController): def delete(self, address_uuid): """Delete an IP address.""" address = self._get_one(address_uuid) - interface_uuid = getattr(address, 'interface_uuid') - self._check_orphaned_routes(interface_uuid, address.as_dict()) - self._check_host_state(getattr(address, 'forihostid')) + if address.interface_uuid: + self._check_orphaned_routes(address.interface_uuid, + address.as_dict()) + if address.forihostid: + self._check_host_state(address.forihostid) self._check_from_pool(getattr(address, 'pool_uuid')) pecan.request.dbapi.address_destroy(address_uuid) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/health.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/health.py index 14d6fd6d7c..1ca188cc77 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/health.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/health.py @@ -36,11 +36,24 @@ class HealthController(rest.RestController): @wsme_pecan.wsexpose(wtypes.text, wtypes.text) def get_one(self, upgrade): """Validates the health of the system for an upgrade""" - try: - success, output = pecan.request.rpcapi.get_system_health( - pecan.request.context, upgrade=True) - except Exception as e: - LOG.exception(e) + if upgrade == 'upgrade': + try: + success, output = pecan.request.rpcapi.get_system_health( + pecan.request.context, upgrade=True) + except Exception as e: + LOG.exception(e) + raise wsme.exc.ClientSideError(_( + "Unable to perform health upgrade query.")) + return output + elif upgrade == 'kube-upgrade': + try: + success, output = pecan.request.rpcapi.get_system_health( + pecan.request.context, kube_upgrade=True) + except Exception as e: + LOG.exception(e) + raise wsme.exc.ClientSideError(_( + "Unable to perform kubernetes health upgrade query.")) + return output + else: raise wsme.exc.ClientSideError(_( - "Unable to perform health upgrade query.")) - return output + "Unsupported upgrade type %s." % upgrade)) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 650614d652..f56f11dbae 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -41,7 +41,6 @@ import wsme import wsmeext.pecan as wsme_pecan from wsme import types as wtypes -from controllerconfig import HOST_XML_ATTRIBUTES from fm_api import constants as fm_constants from fm_api import fm_api from pecan import expose @@ -98,6 +97,12 @@ from sysinv.common import health LOG = log.getLogger(__name__) KEYRING_BM_SERVICE = "BM" ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003" +HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions', + 'mgmt_mac', 'mgmt_ip', + 'bm_ip', 'bm_type', 'bm_username', + 'bm_password', 'boot_device', 'rootfs_device', + 'install_output', 'console', 'vsc_controllers', + 'power_on', 'location'] def _get_controller_address(hostname): @@ -6581,8 +6586,7 @@ class HostController(rest.RestController): # Verify the upgrade is in the correct state if kube_upgrade_obj.state in [ kubernetes.KUBE_UPGRADE_DOWNLOADED_IMAGES, - kubernetes.KUBE_UPGRADED_NETWORKING, - kubernetes.KUBE_UPGRADED_FIRST_MASTER]: + kubernetes.KUBE_UPGRADED_NETWORKING]: # We are upgrading a control plane pass elif kube_upgrade_obj.state in [ diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py index a9d0f082d1..af70d0b69f 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface.py @@ -758,12 +758,26 @@ def _check_interface_mtu(interface, ihost, from_profile=False): return interface +def _get_host_mgmt_interface(ihost): + for iface in pecan.request.dbapi.iinterface_get_by_ihost(ihost['id']): + for ni in pecan.request.dbapi.interface_network_get_by_interface(iface['id']): + network = pecan.request.dbapi.network_get(ni.network_id) + if network.type == constants.NETWORK_TYPE_MGMT: + return iface + return None + + def _check_interface_sriov(interface, ihost, from_profile=False): sriov_update = False if 'ifclass' in interface.keys() and not interface['ifclass']: return sriov_update + if (interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and + _get_host_mgmt_interface(ihost) is None): + raise wsme.exc.ClientSideError(_("Unable to provision pci-sriov interface " + "without configured mgmt interface.")) + if (interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and 'sriov_numvfs' not in interface.keys()): raise wsme.exc.ClientSideError(_("A network type of pci-sriov must specify " diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py index 40edd05f23..1b60b71d4a 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py @@ -310,12 +310,14 @@ class KubeAppController(rest.RestController): # Examine all the required labels on the given hosts # and build up our actual and good label counts. + host_info = {} for host in hosts: labels = pecan.request.dbapi.label_get_by_host(host.uuid) host_good = (host.administrative == constants.ADMIN_UNLOCKED and host.operational == constants.OPERATIONAL_ENABLED) + host_labels_dict = {} for label in labels: if label.label_key in required_label_counts: if label.label_value == helm_common.LABEL_VALUE_ENABLED: @@ -323,6 +325,10 @@ class KubeAppController(rest.RestController): if host_good: good_label_counts[label.label_key] += 1 + host_labels_dict[label.label_key] = label.label_value + + host_info[host.hostname] = {"personality": host.personality, "labels": host_labels_dict} + # If we are short of labels on unlocked and enabled hosts # inform the user with a detailed message. msg = "" @@ -333,6 +339,11 @@ class KubeAppController(rest.RestController): (k, helm_common.LABEL_VALUE_ENABLED, v, label_counts[k], good_label_counts[k])) + if msg: + app_helper = KubeAppHelper(pecan.request.dbapi) + msg += "\n" + msg += app_helper._extract_missing_labels_message(host_info, required_label_counts) + if msg: raise wsme.exc.ClientSideError( _("Operation rejected: application stx-monitor " @@ -724,3 +735,45 @@ class KubeAppHelper(object): p, os.path.join(p, f), demote_user): raise exception.SysinvException(_( "failed to extract tar file {}.".format(os.path.basename(f)))) + + def _extract_missing_labels_message(self, host_info_dict, required_label_counts): + msg = "" + have_workers = False + one_worker_have_required_label = False + for host_name, host_info in host_info_dict.items(): + host_personality = host_info.get("personality") + labels = host_info.get("labels") + host_msg = ("%s " % host_name) + missing_labels = False + invalid_labels = False + if (host_personality == constants.CONTROLLER): + for required_label_name, required_label_value in required_label_counts.items(): + if required_label_name not in labels: + missing_labels = True + host_msg += ("%s=%s " % (required_label_name, helm_common.LABEL_VALUE_ENABLED)) + elif labels.get(required_label_name) != helm_common.LABEL_VALUE_ENABLED: + invalid_labels = True + host_msg += (" %s=%s " % (required_label_name, labels.get(required_label_name))) + + if missing_labels: + msg += (", Please use [system host-label-assign %s] " + "to set the missing label\n" % + host_msg) + if invalid_labels: + msg += (", Please correct host labels values to be enabled [%s]\n" % + host_msg) + elif (host_personality == constants.WORKER): + have_workers = True + if (labels.get(helm_common.LABEL_MONITOR_MASTER) == helm_common.LABEL_VALUE_ENABLED): + one_worker_have_required_label = True + elif (labels.get(helm_common.LABEL_MONITOR_MASTER) != helm_common.LABEL_VALUE_ENABLED): + msg += (", Please correct host labels values to be enabled [%s %s=%s]\n" % + (host_name, helm_common.LABEL_MONITOR_MASTER, + labels.get(helm_common.LABEL_MONITOR_MASTER))) + + if (have_workers and not one_worker_have_required_label): + msg += (", Please use [system host-label-assign %s=%s] to" + " set the missing label on one of the worker(s)" % + (helm_common.LABEL_MONITOR_MASTER, helm_common.LABEL_VALUE_ENABLED)) + + return msg diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py index 8d16a5d31b..968f25e636 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_upgrade.py @@ -170,6 +170,16 @@ class KubeUpgradeController(rest.RestController): force = body.get('force', False) is True + # There must not be a platform upgrade in progress + try: + pecan.request.dbapi.software_upgrade_get_one() + except exception.NotFound: + pass + else: + raise wsme.exc.ClientSideError(_( + "A kubernetes upgrade cannot be done while a platform upgrade " + "is in progress")) + # There must not already be a kubernetes upgrade in progress try: pecan.request.dbapi.kube_upgrade_get_one() @@ -214,9 +224,9 @@ class KubeUpgradeController(rest.RestController): # TODO: check that all installed applications support new k8s version # TODO: check that tiller/armada support new k8s version - # The system must be healthy from the platform perspective + # The system must be healthy success, output = pecan.request.rpcapi.get_system_health( - pecan.request.context, force=force) + pecan.request.context, force=force, kube_upgrade=True) if not success: LOG.info("Health query failure during kubernetes upgrade start: %s" % output) @@ -225,9 +235,7 @@ class KubeUpgradeController(rest.RestController): else: raise wsme.exc.ClientSideError(_( "System is not in a valid state for kubernetes upgrade. " - "Run system health-query-upgrade for more details.")) - - # TODO: kubernetes related health checks... + "Run system health-query for more details.")) # Create upgrade record. create_values = {'from_version': current_kube_version, @@ -327,6 +335,15 @@ class KubeUpgradeController(rest.RestController): "Kubernetes upgrade must be in %s state to complete" % kubernetes.KUBE_UPGRADING_KUBELETS)) + # Make sure no hosts are in a transitory or failed state + kube_host_upgrades = \ + pecan.request.dbapi.kube_host_upgrade_get_list() + for kube_host_upgrade in kube_host_upgrades: + if kube_host_upgrade.status is not None: + raise wsme.exc.ClientSideError(_( + "At least one host has not completed the kubernetes " + "upgrade")) + # Make sure the target version is active version_states = self._kube_operator.kube_get_version_states() if version_states.get(kube_upgrade_obj.to_version, None) != \ diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/network.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/network.py index 100118dff0..451e10ca10 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/network.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/network.py @@ -366,6 +366,7 @@ class NetworkController(rest.RestController): network['type'] in [constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_OAM, constants.NETWORK_TYPE_CLUSTER_HOST, + constants.NETWORK_TYPE_PXEBOOT, constants.NETWORK_TYPE_CLUSTER_POD, constants.NETWORK_TYPE_CLUSTER_SERVICE]: msg = _("Cannot delete type {} network {} after initial " diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py index 0c3994a9a1..6870ae8508 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/profile.py @@ -1024,7 +1024,7 @@ class ProfileController(rest.RestController): @cutils.synchronized(LOCK_NAME) @expose('json') def import_profile(self, file): - class ProfileObj(object): + class ProfileObj(object): # noqa: F823 display = "" proc = None @@ -1470,7 +1470,7 @@ def _create_if_profile(profile_name, profile_node): try: pecan.request.dbapi.iinterface_update(i.uuid, idict) - except Exception as e: + except Exception: raise wsme.exc.ClientSideError(_("Failed to link interface uses.")) except Exception as exc: ihost.ethernet_ports = \ diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py index 69e409956f..dcddc99964 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py @@ -347,7 +347,8 @@ class RouteController(rest.RestController): self._check_reachable_gateway(interface_id, route) # Attempt to create the new route record result = pecan.request.dbapi.route_create(interface_id, route) - pecan.request.rpcapi.update_route_config(pecan.request.context) + pecan.request.rpcapi.update_route_config(pecan.request.context, + result.forihostid) return Route.convert_with_links(result) @@ -378,6 +379,9 @@ class RouteController(rest.RestController): @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, route_uuid): """Delete an IP route.""" - self._get_one(route_uuid) + try: + route = objects.route.get_by_uuid(pecan.request.context, route_uuid) + except exception.RouteNotFound: + raise pecan.request.dbapi.route_destroy(route_uuid) - pecan.request.rpcapi.update_route_config(pecan.request.context) + pecan.request.rpcapi.update_route_config(pecan.request.context, route.forihostid) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py index bafb92aebc..afa2152b9f 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py @@ -198,6 +198,16 @@ class UpgradeController(rest.RestController): "upgrade-start rejected: An upgrade can only be started " "when %s is active." % constants.CONTROLLER_0_HOSTNAME)) + # There must not be a kubernetes upgrade in progress + try: + pecan.request.dbapi.kube_upgrade_get_one() + except exception.NotFound: + pass + else: + raise wsme.exc.ClientSideError(_( + "upgrade-start rejected: A platform upgrade cannot be done " + "while a kubernetes upgrade is in progress.")) + # There must not already be an upgrade in progress try: pecan.request.dbapi.software_upgrade_get_one() diff --git a/sysinv/sysinv/sysinv/sysinv/common/ceph.py b/sysinv/sysinv/sysinv/sysinv/common/ceph.py index 9abe2f3d26..bce9427fcf 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/common/ceph.py @@ -562,7 +562,7 @@ class CephApiOperator(object): try: resp, body = self._ceph_api.osd_stat(body='json', timeout=timeout) - except ReadTimeout as e: + except ReadTimeout: resp = type('Response', (), dict(ok=False, reason=('Ceph API osd_stat() timeout ' @@ -695,7 +695,7 @@ class CephApiOperator(object): if rc: break - except Exception as e: + except Exception: pass return rc diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index ad4abc7022..960449e542 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -1355,7 +1355,7 @@ GLANCE_REGISTRY_DATA_API = 'glance.db.registry.api' # kernel options for various security feature selections SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1 = 'spectre_meltdown_v1' -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS = 'nopti nospectre_v2' +SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS = 'nopti nospectre_v2 nospectre_v1' SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL = 'spectre_meltdown_all' SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS = '' SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS = { @@ -1370,6 +1370,7 @@ SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_DEFAULT_OPTS = SYSTEM_SECURITY_FEATURE_ HELM_APP_OPENSTACK = 'stx-openstack' HELM_APP_PLATFORM = 'platform-integ-apps' HELM_APP_MONITOR = 'stx-monitor' +HELM_APP_OIDC_AUTH = 'oidc-auth-apps' # Apply mode for openstack app OPENSTACK_RESTORE_DB = 'restore_db' @@ -1389,6 +1390,7 @@ HELM_APP_APPLY_MODES = { HELM_APPS_PLATFORM_MANAGED = [ HELM_APP_PLATFORM, + HELM_APP_OIDC_AUTH, ] # The order in which apps are listed here is important. diff --git a/sysinv/sysinv/sysinv/sysinv/common/health.py b/sysinv/sysinv/sysinv/sysinv/common/health.py index 98fb2c5d66..9f9fe8022a 100755 --- a/sysinv/sysinv/sysinv/sysinv/common/health.py +++ b/sysinv/sysinv/sysinv/sysinv/common/health.py @@ -1,12 +1,11 @@ # -# Copyright (c) 2018-2019 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # from eventlet.green import subprocess import os -from controllerconfig import backup_restore from fm_api import fm_api @@ -14,6 +13,7 @@ from oslo_log import log from sysinv._i18n import _ from sysinv.common import ceph from sysinv.common import constants +from sysinv.common import kubernetes from sysinv.common import utils from sysinv.common.fm import fmclient from sysinv.common.storage_backend_conf import StorageBackendConfig @@ -33,6 +33,7 @@ class Health(object): def __init__(self, dbapi): self._dbapi = dbapi self._ceph = ceph.CephApiOperator() + self._kube_operator = kubernetes.KubeOperator() def _check_hosts_provisioned(self, hosts): """Checks that each host is provisioned""" @@ -206,22 +207,73 @@ class Health(object): def _check_simplex_available_space(self): """Ensures there is free space for the backup""" - try: - backup_restore.check_size("/opt/backups", True) - except backup_restore.BackupFail: - return False - return True + # TODO: Switch this over to use Ansible + # try: + # backup_restore.check_size("/opt/backups", True) + # except backup_restore.BackupFail: + # return False + # return True + LOG.info("Skip the check of the enough free space.") + + def _check_kube_nodes_ready(self): + """Checks that each kubernetes node is ready""" + fail_node_list = [] + + nodes = self._kube_operator.kube_get_nodes() + for node in nodes: + for condition in node.status.conditions: + if condition.type == "Ready" and condition.status != "True": + # This node is not ready + fail_node_list.append(node.metadata.name) + + success = not fail_node_list + return success, fail_node_list + + def _check_kube_control_plane_pods(self): + """Checks that each kubernetes control plane pod is ready""" + fail_pod_list = [] + + pod_ready_status = self._kube_operator.\ + kube_get_control_plane_pod_ready_status() + + for pod_name, ready_status in pod_ready_status.items(): + if ready_status != "True": + # This pod is not ready + fail_pod_list.append(pod_name) + + success = not fail_pod_list + return success, fail_pod_list + + def _check_kube_applications(self): + """Checks that each kubernetes application is in a valid state""" + + fail_app_list = [] + apps = self._dbapi.kube_app_get_all() + + for app in apps: + # The following states are valid during kubernetes upgrade + if app.status not in [constants.APP_UPLOAD_SUCCESS, + constants.APP_APPLY_SUCCESS, + constants.APP_INACTIVE_STATE]: + fail_app_list.append(app.name) + + success = not fail_app_list + return success, fail_app_list def get_system_health(self, context, force=False): - """Returns the general health of the system""" - # Checks the following: - # All hosts are provisioned - # All hosts are patch current - # All hosts are unlocked/enabled - # All hosts having matching configs - # No management affecting alarms - # For ceph systems: The storage cluster is healthy + """Returns the general health of the system + + Checks the following: + - All hosts are provisioned + - All hosts are patch current + - All hosts are unlocked/enabled + - All hosts having matching configs + - No management affecting alarms + - For ceph systems: The storage cluster is healthy + - All kubernetes nodes are ready + - All kubernetes control plane pods are ready + """ hosts = self._dbapi.ihost_get_list() output = _('System Health:\n') @@ -289,6 +341,24 @@ class Health(object): health_ok = health_ok and success + success, error_nodes = self._check_kube_nodes_ready() + output += _('All kubernetes nodes are ready: [%s]\n') \ + % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) + if not success: + output += _('Kubernetes nodes not ready: %s\n') \ + % ', '.join(error_nodes) + + health_ok = health_ok and success + + success, error_nodes = self._check_kube_control_plane_pods() + output += _('All kubernetes control plane pods are ready: [%s]\n') \ + % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) + if not success: + output += _('Kubernetes control plane pods not ready: %s\n') \ + % ', '.join(error_nodes) + + health_ok = health_ok and success + return health_ok, output def get_system_health_upgrade(self, context, force=False): @@ -358,3 +428,24 @@ class Health(object): health_ok = health_ok and success return health_ok, output + + def get_system_health_kube_upgrade(self, context, force=False): + """Ensures the system is in a valid state for a kubernetes upgrade + + Does a general health check then does the following: + - All kubernetes applications are in a stable state + """ + + health_ok, output = self.get_system_health(context, force) + + success, apps_not_valid = self._check_kube_applications() + output += _( + 'All kubernetes applications are in a valid state: [%s]\n') \ + % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) + if not success: + output += _('Kubernetes applications not in a valid state: %s\n') \ + % ', '.join(apps_not_valid) + + health_ok = health_ok and success + + return health_ok, output diff --git a/sysinv/sysinv/sysinv/sysinv/common/kubernetes.py b/sysinv/sysinv/sysinv/sysinv/common/kubernetes.py index ad12f4e4ff..eff23fca07 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/kubernetes.py +++ b/sysinv/sysinv/sysinv/sysinv/common/kubernetes.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -82,6 +82,22 @@ def get_kube_versions(): ] +def get_kube_networking_upgrade_version(kube_upgrade): + """Determine the version that kubernetes networking + should be upgraded to.""" + if kube_upgrade.state in [ + KUBE_UPGRADE_STARTED, + KUBE_UPGRADE_DOWNLOADING_IMAGES, + KUBE_UPGRADE_DOWNLOADING_IMAGES_FAILED, + KUBE_UPGRADE_DOWNLOADED_IMAGES, + KUBE_UPGRADING_FIRST_MASTER, + KUBE_UPGRADING_FIRST_MASTER_FAILED, + KUBE_UPGRADED_FIRST_MASTER]: + return kube_upgrade.from_version + else: + return kube_upgrade.to_version + + class KubeOperator(object): def __init__(self): @@ -387,6 +403,42 @@ class KubeOperator(object): % (namespace, e)) raise + def kube_get_control_plane_pod_ready_status(self): + """Returns the ready status of the control plane pods.""" + c = self._get_kubernetesclient_core() + + # First get a list of master nodes + master_nodes = list() + api_response = c.list_node( + label_selector="node-role.kubernetes.io/master") + for node in api_response.items: + master_nodes.append(node.metadata.name) + + # Populate status dictionary + ready_status = dict() + for node_name in master_nodes: + for component in [KUBE_APISERVER, + KUBE_CONTROLLER_MANAGER, + KUBE_SCHEDULER]: + # Control plane pods are named by component and node. + # E.g. kube-apiserver-controller-0 + pod_name = component + '-' + node_name + ready_status[pod_name] = None + + # Retrieve the control plane pods + api_response = c.list_pod_for_all_namespaces( + label_selector="component in (%s,%s,%s)" % ( + KUBE_APISERVER, KUBE_CONTROLLER_MANAGER, KUBE_SCHEDULER) + ) + pods = api_response.items + for pod in pods: + if pod.status.conditions is not None: + for condition in pod.status.conditions: + if condition.type == "Ready": + ready_status[pod.metadata.name] = condition.status + + return ready_status + def kube_get_control_plane_versions(self): """Returns the lowest control plane component version on each master node.""" diff --git a/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py b/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py index 1fa2af2e6e..5543b49e12 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py +++ b/sysinv/sysinv/sysinv/sysinv/common/service_parameter.py @@ -63,7 +63,7 @@ def _validate_float(name, value): def _validate_not_empty(name, value): - if not value or value is '': + if not value or value == '': raise wsme.exc.ClientSideError(_( "Parameter '%s' must not be an empty value." % name)) diff --git a/sysinv/sysinv/sysinv/sysinv/common/utils.py b/sysinv/sysinv/sysinv/sysinv/common/utils.py index 378a2cbd27..cee2f342b7 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/utils.py @@ -609,7 +609,7 @@ def file_open(*args, **kwargs): be able to provide a stub module that doesn't alter system state at all (for unit tests) """ - return file(*args, **kwargs) + return open(*args, **kwargs) def get_file_content(filename): diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py b/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py index 5ef010a806..68208d6427 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/ceph.py @@ -845,7 +845,7 @@ class CephOperator(object): try: resp, body = self._ceph_api.osd_stat(body='json', timeout=timeout) - except ReadTimeout as e: + except ReadTimeout: resp = type('Response', (), dict(ok=False, reason=('Ceph API osd_stat() timeout ' diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py index 1f0f786b44..3ca2fe0496 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py @@ -855,7 +855,7 @@ class AppOperator(object): db_app.id, chart, namespace, {'system_overrides': system_overrides}) except exception.HelmOverrideNotFound: - LOG.exception(e) + LOG.exception("Helm Override Not Found") def _validate_labels(self, labels): expr = re.compile(r'[a-z0-9]([-a-z0-9]*[a-z0-9])') @@ -913,6 +913,19 @@ class AppOperator(object): if null_labels: self._update_kubernetes_labels(host.hostname, null_labels) + def _storage_provisioner_required(self, app_name): + check_storage_provisioner_apps = [constants.HELM_APP_MONITOR] + + if app_name not in check_storage_provisioner_apps: + return True + + system = self._dbapi.isystem_get_one() + if system.distributed_cloud_role == \ + constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD: + return False + else: + return True + def _create_storage_provisioner_secrets(self, app_name): """ Provide access to the system persistent storage provisioner. @@ -1949,7 +1962,8 @@ class AppOperator(object): if AppOperator.is_app_aborted(app.name): raise exception.KubeAppAbort() - self._create_storage_provisioner_secrets(app.name) + if self._storage_provisioner_required(app.name): + self._create_storage_provisioner_secrets(app.name) self._create_app_specific_resources(app.name) self._update_app_status( @@ -2224,7 +2238,8 @@ class AppOperator(object): try: self._delete_local_registry_secrets(app.name) if app.system_app: - self._delete_storage_provisioner_secrets(app.name) + if self._storage_provisioner_required(app.name): + self._delete_storage_provisioner_secrets(app.name) self._delete_app_specific_resources(app.name) except Exception as e: self._abort_operation(app, constants.APP_REMOVE_OP) @@ -2720,8 +2735,8 @@ class DockerHelper(object): elif request == constants.APP_APPLY_OP: cmd = ("/bin/bash -c 'set -o pipefail; armada apply " "--enable-chart-cleanup --debug {m} {o} {t} | " - "tee {l}'".format(m=manifest_file, o=overrides_str, - t=tiller_host, l=logfile)) + "tee {lf}'".format(m=manifest_file, o=overrides_str, + t=tiller_host, lf=logfile)) LOG.info("Armada apply command = %s" % cmd) (exit_code, exec_logs) = armada_svc.exec_run(cmd) if exit_code == 0: @@ -2835,8 +2850,10 @@ class DockerHelper(object): LOG.info("Image %s download started from local registry" % img_tag) client = docker.APIClient(timeout=INSTALLATION_TIMEOUT) - client.pull(img_tag, auth_config=local_registry_auth) - except docker.errors.NotFound: + auth = '{0}:{1}'.format(local_registry_auth['username'], + local_registry_auth['password']) + subprocess.check_call(["crictl", "pull", "--creds", auth, img_tag]) + except subprocess.CalledProcessError: try: # Pull the image from the public/private registry LOG.info("Image %s is not available in local registry, " @@ -2859,6 +2876,16 @@ class DockerHelper(object): except Exception as e: rc = False LOG.error("Image %s push failed to local registry: %s" % (img_tag, e)) + return img_tag, rc + + try: + # remove docker container image after it is pushed to local registry. + LOG.info("Remove image %s after push to local registry." % (target_img_tag)) + client.remove_image(target_img_tag) + client.remove_image(img_tag) + except Exception as e: + LOG.warning("Image %s remove failed: %s" % (target_img_tag, e)) + except Exception as e: rc = False LOG.error("Image %s download failed from local registry: %s" % (img_tag, e)) diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index c13f7e91c4..56b36e1bab 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2020 Wind River Systems, Inc. # """Conduct all activity related system inventory. @@ -192,6 +192,9 @@ class ConductorManager(service.PeriodicService): # Timeouts for adding & removing operations self._pv_op_timeouts = {} self._stor_bck_op_timeouts = {} + # struct {'host_uuid':[config_uuid_0,config_uuid_1]} + # this will track the config w/ reboot request to apply + self._host_reboot_config_uuid = {} def start(self): self._start() @@ -5277,12 +5280,27 @@ class ConductorManager(service.PeriodicService): wait_fixed=(CONF.conductor.kube_upgrade_downgrade_retry_interval * 1000)) def _upgrade_downgrade_kube_networking(self): try: - LOG.info( - "_upgrade_downgrade_kube_networking executing playbook: %s " % - constants.ANSIBLE_KUBE_NETWORKING_PLAYBOOK) + # Get the kubernetes version from the upgrade table + # if an upgrade exists + kube_upgrade = self.dbapi.kube_upgrade_get_one() + kube_version = \ + kubernetes.get_kube_networking_upgrade_version(kube_upgrade) + except exception.NotFound: + # Not upgrading kubernetes, get the kubernetes version + # from the kubeadm config map + kube_version = self._kube.kube_get_kubernetes_version() + + if not kube_version: + LOG.error("Unable to get the current kubernetes version.") + return False + + try: + LOG.info("_upgrade_downgrade_kube_networking executing" + " playbook: %s for version %s" % + (constants.ANSIBLE_KUBE_NETWORKING_PLAYBOOK, kube_version)) proc = subprocess.Popen( - ['ansible-playbook', + ['ansible-playbook', '-e', 'kubernetes_version=%s' % kube_version, constants.ANSIBLE_KUBE_NETWORKING_PLAYBOOK], stdout=subprocess.PIPE) out, _ = proc.communicate() @@ -5632,21 +5650,25 @@ class ConductorManager(service.PeriodicService): % flag_file) pass - def update_route_config(self, context): + def update_route_config(self, context, host_id): """add or remove a static route :param context: an admin context. + :param host_id: the host id """ # update manifest files and notifiy agents to apply them personalities = [constants.CONTROLLER, constants.WORKER, constants.STORAGE] - config_uuid = self._config_update_hosts(context, personalities) + host = self.dbapi.ihost_get(host_id) + config_uuid = self._config_update_hosts(context, personalities, + host_uuids=[host.uuid]) config_dict = { "personalities": personalities, - "classes": 'platform::network::runtime' + 'host_uuids': [host.uuid], + "classes": 'platform::network::routes::runtime' } self._config_apply_runtime_manifest(context, config_uuid, config_dict) @@ -7271,18 +7293,6 @@ class ConductorManager(service.PeriodicService): data['name'], data['logical_volume'], data['size'])) self.dbapi.controller_fs_create(data) - else: - values = { - 'services': constants.SB_SVC_GLANCE, - 'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_EXTERNAL], - 'state': constants.SB_STATE_CONFIGURED, - 'backend': constants.SB_TYPE_EXTERNAL, - 'task': constants.SB_TASK_NONE, - 'capabilities': {}, - 'forsystemid': system.id - } - self.dbapi.storage_external_create(values) - def update_service_config(self, context, service=None, do_apply=False): """Update the service parameter configuration""" @@ -7904,9 +7914,38 @@ class ConductorManager(service.PeriodicService): # We avoid re-raising this as it may brake critical operations after this one return constants.CINDER_RESIZE_FAILURE + def _remove_config_from_reboot_config_list(self, ihost_uuid, config_uuid): + LOG.info("_remove_config_from_reboot_config_list host: %s,config_uuid: %s" % + (ihost_uuid, config_uuid)) + if ihost_uuid in self._host_reboot_config_uuid: + try: + self._host_reboot_config_uuid[ihost_uuid].remove(config_uuid) + except ValueError: + LOG.info("_remove_config_from_reboot_config_list fail" + " host:%s", ihost_uuid) + pass + + def _clear_config_from_reboot_config_list(self, ihost_uuid): + LOG.info("_clear_config_from_reboot_config_list host:%s", ihost_uuid) + if ihost_uuid in self._host_reboot_config_uuid: + try: + del self._host_reboot_config_uuid[ihost_uuid][:] + except ValueError: + LOG.info("_clear_config_from_reboot_config_list fail" + " host: %s", ihost_uuid) + pass + def _config_out_of_date(self, ihost_obj): target = ihost_obj.config_target applied = ihost_obj.config_applied + applied_reboot = None + if applied is not None: + try: + applied_reboot = self._config_set_reboot_required(applied) + except ValueError: + # for worker node, the applied might be 'install' + applied_reboot = applied + pass hostname = ihost_obj.hostname if not hostname: @@ -7917,6 +7956,7 @@ class ConductorManager(service.PeriodicService): (hostname, applied)) return False elif target == applied: + self._clear_config_from_reboot_config_list(ihost_obj.uuid) if ihost_obj.personality == constants.CONTROLLER: controller_fs_list = self.dbapi.controller_fs_get_list() @@ -7933,6 +7973,13 @@ class ConductorManager(service.PeriodicService): LOG.info("%s: iconfig up to date: target %s, applied %s " % (hostname, target, applied)) return False + elif target == applied_reboot: + if ihost_obj.uuid in self._host_reboot_config_uuid: + if len(self._host_reboot_config_uuid[ihost_obj.uuid]) == 0: + return False + return True + else: + return False else: LOG.warn("%s: iconfig out of date: target %s, applied %s " % (hostname, target, applied)) @@ -8124,6 +8171,8 @@ class ConductorManager(service.PeriodicService): @cutils.synchronized(lock_name, external=False) def _sync_update_host_config_applied(self, context, ihost_obj, config_uuid): + self._remove_config_from_reboot_config_list(ihost_obj.uuid, + config_uuid) if ihost_obj.config_applied != config_uuid: ihost_obj.config_applied = config_uuid ihost_obj.save(context) @@ -8171,6 +8220,12 @@ class ConductorManager(service.PeriodicService): for host in hosts: if host.personality and host.personality in personalities: + if reboot: + if host.uuid in self._host_reboot_config_uuid: + self._host_reboot_config_uuid[host.uuid].append(config_uuid) + else: + self._host_reboot_config_uuid[host.uuid] = [] + self._host_reboot_config_uuid[host.uuid].append(config_uuid) self._update_host_config_target(context, host, config_uuid) LOG.info("_config_update_hosts config_uuid=%s" % config_uuid) @@ -8192,12 +8247,18 @@ class ConductorManager(service.PeriodicService): for host in hosts: if host.personality in personalities: + # Never generate hieradata for uninventoried hosts, as their + # interface config will be incomplete. + if host.inv_state != constants.INV_STATE_INITIAL_INVENTORIED: + LOG.info( + "Cannot generate the configuration for %s, " + "the host is not inventoried yet." % host.hostname) # We will allow controller nodes to re-generate manifests # when in an "provisioning" state. This will allow for # example the ntp configuration to be changed on an CPE # node before the "worker_config_complete" has been # executed. - if (force or + elif (force or host.invprovision == constants.PROVISIONED or (host.invprovision == constants.PROVISIONING and host.personality == constants.CONTROLLER)): @@ -9166,19 +9227,25 @@ class ConductorManager(service.PeriodicService): return - def get_system_health(self, context, force=False, upgrade=False): + def get_system_health(self, context, force=False, upgrade=False, + kube_upgrade=False): """ Performs a system health check. :param context: request context. :param force: set to true to ignore minor and warning alarms :param upgrade: set to true to perform an upgrade health check + :param kube_upgrade: set to true to perform a kubernetes upgrade health + check """ health_util = health.Health(self.dbapi) if upgrade is True: return health_util.get_system_health_upgrade(context=context, force=force) + elif kube_upgrade is True: + return health_util.get_system_health_kube_upgrade(context=context, + force=force) else: return health_util.get_system_health(context=context, force=force) @@ -10450,7 +10517,7 @@ class ConductorManager(service.PeriodicService): with open(f, 'rb') as file: new_hash[f] = hashlib.md5(file.read()).hexdigest() - if cmp(old_hash, new_hash) != 0: + if old_hash != new_hash: LOG.info("There has been an overrides change, setting up " "reapply of %s", app.name) self._app.set_reapply(app.name) @@ -10605,7 +10672,8 @@ class ConductorManager(service.PeriodicService): config_dict = { "personalities": personalities, "host_uuids": [host.uuid], - "classes": ['openstack::keystone::endpoint::runtime'] + "classes": ['openstack::keystone::endpoint::runtime', + 'openstack::barbican::runtime'] } self._config_apply_runtime_manifest( context, config_uuid, config_dict, force=True) @@ -10976,8 +11044,26 @@ class ConductorManager(service.PeriodicService): def kube_upgrade_networking(self, context, kube_version): """Upgrade kubernetes networking for this kubernetes version""" - # TODO: Upgrade kubernetes networking. - LOG.info("Upgrade kubernetes networking here") + LOG.info("executing playbook: %s for version %s" % + (constants.ANSIBLE_KUBE_NETWORKING_PLAYBOOK, kube_version)) + + proc = subprocess.Popen( + ['ansible-playbook', '-e', 'kubernetes_version=%s' % kube_version, + constants.ANSIBLE_KUBE_NETWORKING_PLAYBOOK], + stdout=subprocess.PIPE) + out, _ = proc.communicate() + + LOG.info("ansible-playbook: %s." % out) + + if proc.returncode: + LOG.warning("ansible-playbook returned an error: %s" % + proc.returncode) + # Update the upgrade state + kube_upgrade_obj = objects.kube_upgrade.get_one(context) + kube_upgrade_obj.state = \ + kubernetes.KUBE_UPGRADING_NETWORKING_FAILED + kube_upgrade_obj.save() + return # Indicate that networking upgrade is complete kube_upgrade_obj = objects.kube_upgrade.get_one(context) diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py index 5c4854ca5f..2cb6e3172a 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py @@ -515,7 +515,7 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): self.make_msg('delete_flag_file', flag_file=flag_file)) - def update_route_config(self, context): + def update_route_config(self, context, host_id): """Synchronously, have a conductor configure static route. Does the following tasks: @@ -524,10 +524,13 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): - who each apply the route manifest :param context: request context. + :param host_id: the host id """ - LOG.debug("ConductorApi.update_route_config: sending" - " update_route_config to conductor") - return self.call(context, self.make_msg('update_route_config')) + LOG.debug("ConductorApi.update_route_config: sending " + " update_route_config to conductor for " + "host_id(%s)" % host_id) + return self.call(context, self.make_msg('update_route_config', + host_id=host_id)) def update_sriov_config(self, context, host_uuid): """Synchronously, have a conductor configure sriov config. @@ -1285,17 +1288,21 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): return self.cast(context, self.make_msg('complete_simplex_backup', success=success)) - def get_system_health(self, context, force=False, upgrade=False): + def get_system_health(self, context, force=False, upgrade=False, + kube_upgrade=False): """ Performs a system health check. :param context: request context. :param force: set to true to ignore minor and warning alarms :param upgrade: set to true to perform an upgrade health check + :param kube_upgrade: set to true to perform a kubernetes upgrade health + check """ return self.call(context, self.make_msg('get_system_health', - force=force, upgrade=upgrade)) + force=force, upgrade=upgrade, + kube_upgrade=kube_upgrade)) def reserve_ip_for_first_storage_node(self, context): """ diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migration.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migration.py index 53e4a72585..593b211a9c 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migration.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migration.py @@ -16,11 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -import distutils.version as dist_version import os - -import migrate -from migrate.versioning import util as migrate_util import sqlalchemy from oslo_db.sqlalchemy import enginefacade @@ -28,31 +24,6 @@ from sysinv._i18n import _ from sysinv.common import exception from sysinv.db import migration - -@migrate_util.decorator -def patched_with_engine(f, *a, **kw): - url = a[0] - engine = migrate_util.construct_engine(url, **kw) - - try: - kw['engine'] = engine - return f(*a, **kw) - finally: - if isinstance(engine, migrate_util.Engine) and engine is not url: - migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) - engine.dispose() - - -# TODO(jkoelker) When migrate 0.7.3 is released and nova depends -# on that version or higher, this can be removed -MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') -if (not hasattr(migrate, '__version__') or - dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): - - migrate_util.with_engine = patched_with_engine - - -# NOTE(jkoelker) Delay importing migrate until we are patched from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository diff --git a/sysinv/sysinv/sysinv/sysinv/helm/base.py b/sysinv/sysinv/sysinv/sysinv/helm/base.py index 8a3328f8cb..1805bf0f84 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/base.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/base.py @@ -208,6 +208,22 @@ class BaseHelm(object): availability=availability, vim_progress_status=vim_progress_status)) + def _num_provisioned_controllers(self): + """ + This is used by platform managed applications + Takes care of the case where one controller is provisioned + and the other is installed but not provisioned. + when the second controller is provisioned, the unlock will + will check if the overrides are different and reapply platform + managed applications appropriately + """ + return self._num_controllers_matching_criteria( + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=[constants.AVAILABILITY_AVAILABLE, + constants.AVAILABILITY_DEGRADED], + vim_progress_status=constants.VIM_SERVICES_ENABLED) + def _get_address_by_name(self, name, networktype): """ Retrieve an address entry by name and scoped by network type diff --git a/sysinv/sysinv/sysinv/sysinv/helm/common.py b/sysinv/sysinv/sysinv/sysinv/helm/common.py index 3d9a213e4c..4fd6d94f92 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/common.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/common.py @@ -58,6 +58,8 @@ HELM_CHART_KEYSTONE_API_PROXY = 'keystone-api-proxy' HELM_CHART_SWIFT = 'ceph-rgw' HELM_CHART_NGINX_PORTS_CONTROL = "nginx-ports-control" HELM_CHART_DCDBSYNC = 'dcdbsync' +HELM_CHART_DEX = 'dex' +HELM_CHART_OIDC_CLIENT = 'oidc-client' HELM_CHART_ELASTICSEARCH_MASTER = 'elasticsearch-master' HELM_CHART_ELASTICSEARCH_DATA = 'elasticsearch-data' diff --git a/sysinv/sysinv/sysinv/sysinv/helm/dex.py b/sysinv/sysinv/sysinv/sysinv/helm/dex.py new file mode 100644 index 0000000000..bedc1f558a --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/helm/dex.py @@ -0,0 +1,67 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sysinv.common import exception + +from sysinv.helm import common +from sysinv.helm.dex_base import DexBaseHelm + + +class Dex(DexBaseHelm): + """Class to encapsulate helm operations for the dex chart""" + + CHART = common.HELM_CHART_DEX + + SERVICE_NAME = 'dex' + + def get_namespaces(self): + return self.SUPPORTED_NAMESPACES + + def _get_static_clients(self): + static_clients = [] + + oidc_client = { + 'id': self._get_client_id(), + 'redirectURIs': ["http://%s:%s/callback" % + (self._format_url_address(self._get_oam_address()), self.OIDC_CLIENT_NODE_PORT)], + 'name': 'STX OIDC Client app', + 'secret': self._get_client_secret() + } + + static_clients.append(oidc_client) + + return static_clients + + def get_overrides(self, namespace=None): + + ports = [] + dex_port = { + 'name': 'http', + 'containerPort': 5556, + 'protocol': 'TCP', + 'nodePort': self.DEX_NODE_PORT, + } + ports.append(dex_port) + + overrides = { + common.HELM_NS_KUBE_SYSTEM: { + 'config': { + 'issuer': "https://%s:%s/dex" % (self._format_url_address(self._get_oam_address()), + self.DEX_NODE_PORT), + 'staticClients': self._get_static_clients(), + }, + 'ports': ports, + 'replicas': self._num_provisioned_controllers(), + } + } + + if namespace in self.SUPPORTED_NAMESPACES: + return overrides[namespace] + elif namespace: + raise exception.InvalidHelmNamespace(chart=self.CHART, + namespace=namespace) + else: + return overrides diff --git a/sysinv/sysinv/sysinv/sysinv/helm/dex_base.py b/sysinv/sysinv/sysinv/sysinv/helm/dex_base.py new file mode 100644 index 0000000000..d60dbd20cc --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/helm/dex_base.py @@ -0,0 +1,40 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sysinv.common import constants + +from sysinv.helm import base +from sysinv.helm import common + + +class DexBaseHelm(base.BaseHelm): + """Class to encapsulate helm operations for the dex chart""" + + SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \ + [common.HELM_NS_KUBE_SYSTEM] + SUPPORTED_APP_NAMESPACES = { + constants.HELM_APP_OIDC_AUTH: + base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_KUBE_SYSTEM], + } + + # OIDC client and DEX Node ports + OIDC_CLIENT_NODE_PORT = 30555 + DEX_NODE_PORT = 30556 + + @property + def CHART(self): + # subclasses must define the property: CHART='name of chart' + # if an author of a new chart forgets this, NotImplementedError is raised + raise NotImplementedError + + def get_namespaces(self): + return self.SUPPORTED_NAMESPACES + + def _get_client_id(self): + return 'stx-oidc-client-app' + + def _get_client_secret(self): + return 'St8rlingX' diff --git a/sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_curator.py b/sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_curator.py index 05e3e29ccd..40b4b8573b 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_curator.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_curator.py @@ -62,9 +62,13 @@ class ElasticsearchCuratorHelm(elastic.ElasticBaseHelm): # Give 50% of elasticsearch data volume # to filebeat, 40% to metricbeat and 10% to collectd, all - # modified by a safety margin due to cronjob running every 6 hours. + # modified by a safety margin due to elasticsearch disk allocation + # settings and cronjob running every half hour. - volume_size_with_margin_factor = 0.95 * data_volume_size_gb + # Set margin factor to 83%, as elasticsearch currently has + # default cluster.routing.allocation.disk settings set to: + # low: 85%, high 90%, flood_stage: 95%. + volume_size_with_margin_factor = 0.83 * data_volume_size_gb filebeat_limit_int = int(0.5 * volume_size_with_margin_factor) filebeat_limit = str(filebeat_limit_int) @@ -83,8 +87,8 @@ class ElasticsearchCuratorHelm(elastic.ElasticBaseHelm): 'METRICBEAT_INDEX_LIMIT_GB': metricbeat_limit, 'COLLECTD_INDEX_LIMIT_GB': collectd_limit, }, - # Run job every 6 hours. - 'cronjob': {'schedule': "0 */6 * * *"}, + # Run job every half hour. + 'cronjob': {'schedule': "*/30 * * * *"}, } } diff --git a/sysinv/sysinv/sysinv/sysinv/helm/helm.py b/sysinv/sysinv/sysinv/sysinv/helm/helm.py index 5b43668a61..05416d9a68 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/helm.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/helm.py @@ -32,6 +32,12 @@ LOG = logging.getLogger(__name__) # The convention here is for the helm plugins to be named ###_PLUGINNAME. HELM_PLUGIN_PREFIX_LENGTH = 4 +# Number of optional characters appended to Armada manifest operator name, +# to allow overriding with a newer version of the Armada manifest operator. +# The convention here is for the Armada operator plugins to allow an +# optional suffix, as in PLUGINNAME_###. +ARMADA_PLUGIN_SUFFIX_LENGTH = 4 + def helm_context(func): """Decorate to initialize the local threading context""" @@ -85,8 +91,19 @@ class HelmOperator(object): namespace='systemconfig.armada.manifest_ops', invoke_on_load=True, invoke_args=()) - for op in armada_manifest_operators: - operators_dict[op.name] = op.obj + sorted_armada_manifest_operators = sorted( + armada_manifest_operators.extensions, key=lambda x: x.name) + + for op in sorted_armada_manifest_operators: + if (op.name[-(ARMADA_PLUGIN_SUFFIX_LENGTH - 1):].isdigit() and + op.name[-ARMADA_PLUGIN_SUFFIX_LENGTH:-3] == '_'): + op_name = op.name[0:-ARMADA_PLUGIN_SUFFIX_LENGTH] + LOG.info("_load_armada_manifest_operators op.name=%s " + "adjust to op_name=%s" % (op.name, op_name)) + else: + op_name = op.name + + operators_dict[op_name] = op.obj return operators_dict diff --git a/sysinv/sysinv/sysinv/sysinv/helm/metricbeat.py b/sysinv/sysinv/sysinv/sysinv/helm/metricbeat.py index 9cfeea0f0f..b20175e77e 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/metricbeat.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/metricbeat.py @@ -202,7 +202,8 @@ class MetricbeatHelm(elastic.ElasticBaseHelm): "state_replicaset", "state_pod", "state_container", - "event" + "event", + "state_statefulset" ], "period": "60s", "host": "${NODE_NAME}", diff --git a/sysinv/sysinv/sysinv/sysinv/helm/oidc_client.py b/sysinv/sysinv/sysinv/sysinv/helm/oidc_client.py new file mode 100644 index 0000000000..ceed2b15d7 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/helm/oidc_client.py @@ -0,0 +1,48 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sysinv.common import exception + +from sysinv.helm import common +from sysinv.helm.dex_base import DexBaseHelm + + +class OidcClientHelm(DexBaseHelm): + """Class to encapsulate helm operations for the OIDC client chart""" + + CHART = common.HELM_CHART_OIDC_CLIENT + + SERVICE_NAME = 'oidc_client' + + def get_namespaces(self): + return self.SUPPORTED_NAMESPACES + + def get_overrides(self, namespace=None): + oam_url = self._format_url_address(self._get_oam_address()) + overrides = { + common.HELM_NS_KUBE_SYSTEM: { + 'config': { + 'client_id': self._get_client_id(), + 'client_secret': self._get_client_secret(), + 'issuer': "https://%s:%s/dex" % (oam_url, self.DEX_NODE_PORT), + 'issuer_root_ca': '/home/dex-ca.pem', + 'listen': 'http://0.0.0.0:5555', + 'redirect_uri': "http://%s:%s/callback" % (oam_url, self.OIDC_CLIENT_NODE_PORT), + }, + 'service': { + 'nodePort': self.OIDC_CLIENT_NODE_PORT + }, + 'replicas': self._num_provisioned_controllers(), + } + } + + if namespace in self.SUPPORTED_NAMESPACES: + return overrides[namespace] + elif namespace: + raise exception.InvalidHelmNamespace(chart=self.CHART, + namespace=namespace) + else: + return overrides diff --git a/sysinv/sysinv/sysinv/sysinv/helm/rbd_provisioner.py b/sysinv/sysinv/sysinv/sysinv/helm/rbd_provisioner.py index a8d1e0b422..c0739e8ac1 100644 --- a/sysinv/sysinv/sysinv/sysinv/helm/rbd_provisioner.py +++ b/sysinv/sysinv/sysinv/sysinv/helm/rbd_provisioner.py @@ -81,12 +81,7 @@ class RbdProvisionerHelm(base.BaseHelm): classes.append(cls) global_settings = { - "replicas": self._num_controllers_matching_criteria( - administrative=constants.ADMIN_UNLOCKED, - operational=constants.OPERATIONAL_ENABLED, - availability=[constants.AVAILABILITY_AVAILABLE, - constants.AVAILABILITY_DEGRADED], - vim_progress_status=constants.VIM_SERVICES_ENABLED), + "replicas": self._num_provisioned_controllers(), "defaultStorageClass": constants.K8S_RBD_PROV_STOR_CLASS_NAME } diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/__init__.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/mockpatch.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/mockpatch.py deleted file mode 100644 index cd0d6ca6b5..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/mockpatch.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - - -class PatchObject(fixtures.Fixture): - """Deal with code around mock.""" - - def __init__(self, obj, attr, **kwargs): - self.obj = obj - self.attr = attr - self.kwargs = kwargs - - def setUp(self): - super(PatchObject, self).setUp() - _p = mock.patch.object(self.obj, self.attr, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) - - -class Patch(fixtures.Fixture): - - """Deal with code around mock.patch.""" - - def __init__(self, obj, **kwargs): - self.obj = obj - self.kwargs = kwargs - - def setUp(self): - super(Patch, self).setUp() - _p = mock.patch(self.obj, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/moxstubout.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/moxstubout.py deleted file mode 100644 index 30c270791a..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/fixture/moxstubout.py +++ /dev/null @@ -1,37 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import stubout -from mox3 import mox - - -class MoxStubout(fixtures.Fixture): - """Deal with code around mox and stubout as a fixture.""" - - def setUp(self): - super(MoxStubout, self).setUp() - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - self.mox = mox.Mox() - self.stubs = stubout.StubOutForTesting() - self.addCleanup(self.mox.UnsetStubs) - self.addCleanup(self.stubs.UnsetAll) - self.addCleanup(self.stubs.SmartUnsetAll) - self.addCleanup(self.mox.VerifyAll) diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/lockutils.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/lockutils.py deleted file mode 100644 index 544b0e9805..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/lockutils.py +++ /dev/null @@ -1,277 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import errno -import functools -import os -import shutil -import tempfile -import time -import weakref - -from eventlet import semaphore - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -from sysinv._i18n import _ -from sysinv.openstack.common import local - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), - cfg.StrOpt('lock_path', - help=('Directory to use for lock files. Default to a ' - 'temp directory')) -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def __enter__(self): - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - return self - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - self.unlock() - self.lockfile.close() - except IOError: - LOG.exception(_("Could not release the acquired lock `%s`"), - self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_InterProcessLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _PosixLock(_InterProcessLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock -else: - import fcntl - InterProcessLock = _PosixLock - -_semaphores = weakref.WeakValueDictionary() - - -def synchronized(name, lock_file_prefix, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. The prefix should end with a hyphen ('-') if specified. - - The external keyword argument denotes whether this lock should work across - multiple processes. This means that if two different workers both run a - a method decorated with @synchronized('mylock', external=True), only one - of them will execute at a time. - - The lock_path keyword argument is used to specify a special location for - external lock files to live. If nothing is set, then CONF.lock_path is - used as a default. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - # NOTE(soren): If we ever go natively threaded, this will be racy. - # See http://stackoverflow.com/questions/5390569/dyn - # amically-allocating-and-destroying-mutexes - sem = _semaphores.get(name, semaphore.Semaphore()) - if name not in _semaphores: - # this check is not racy - we're already holding ref locally - # so GC won't remove the item and there was no IO switch - # (only valid in greenthreads) - _semaphores[name] = sem - - with sem: - LOG.debug(_('Got semaphore "%(lock)s" for method ' - '"%(method)s"...'), {'lock': name, - 'method': f.__name__}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s" ' - 'for method "%(method)s"...'), - {'lock': name, 'method': f.__name__}) - cleanup_dir = False - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path - if not local_lock_path: - local_lock_path = CONF.lock_path - - if not local_lock_path: - cleanup_dir = True - local_lock_path = tempfile.mkdtemp() - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - - # NOTE(mikal): the lock name cannot contain directory - # separators - safe_name = name.replace(os.sep, '_') - lock_file_name = '%s%s' % (lock_file_prefix, safe_name) - lock_file_path = os.path.join(local_lock_path, - lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock: - LOG.debug(_('Got file lock "%(lock)s" at ' - '%(path)s for method ' - '"%(method)s"...'), - {'lock': name, - 'path': lock_file_path, - 'method': f.__name__}) - retval = f(*args, **kwargs) - finally: - LOG.debug(_('Released file lock "%(lock)s" at ' - '%(path)s for method "%(method)s"...'), - {'lock': name, - 'path': lock_file_path, - 'method': f.__name__}) - # NOTE(vish): This removes the tempdir if we needed - # to create one. This is used to - # cleanup the locks left behind by unit - # tests. - if cleanup_dir: - shutil.rmtree(local_lock_path) - else: - retval = f(*args, **kwargs) - - finally: - local.strong_store.locks_held.remove(name) - - return retval - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in nova/utils.py) - from nova.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('nova-') - - - (in nova/foo.py) - from nova import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. The prefix should end with a hyphen ('-') if specified. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) diff --git a/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/impl_kombu.py b/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/impl_kombu.py index 38d614c2cc..fcef36a617 100644 --- a/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/impl_kombu.py +++ b/sysinv/sysinv/sysinv/sysinv/openstack/common/rpc/impl_kombu.py @@ -519,7 +519,7 @@ class Connection(object): try: self._connect(params) return - except (IOError, self.connection_errors) as e: + except (IOError, self.connection_errors) as e: # noqa: F841 pass except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib @@ -532,7 +532,7 @@ class Connection(object): raise log_info = {} - log_info['err_str'] = str(e) + log_info['err_str'] = str(e) # noqa: F821 log_info['max_retries'] = self.max_retries log_info.update(params) diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/barbican.py b/sysinv/sysinv/sysinv/sysinv/puppet/barbican.py index b20f9733af..b77b235974 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/barbican.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/barbican.py @@ -19,6 +19,8 @@ class BarbicanPuppet(openstack.OpenstackBasePuppet): return { 'barbican::db::postgresql::user': dbuser, + 'barbican::keystone::authtoken::region_name': + self._keystone_region_name(), } def get_secure_static_config(self): diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/interface.py b/sysinv/sysinv/sysinv/sysinv/puppet/interface.py index bffbee7fc3..184b728e34 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/interface.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/interface.py @@ -51,9 +51,11 @@ MANUAL_METHOD = 'manual' DHCP_METHOD = 'dhcp' NETWORK_CONFIG_RESOURCE = 'platform::interfaces::network_config' -ROUTE_CONFIG_RESOURCE = 'platform::interfaces::route_config' -SRIOV_CONFIG_RESOURCE = 'platform::interfaces::sriov_config' -ADDRESS_CONFIG_RESOURCE = 'platform::addresses::address_config' +SRIOV_CONFIG_RESOURCE = 'platform::interfaces::sriov::sriov_config' +ADDRESS_CONFIG_RESOURCE = 'platform::network::addresses::address_config' +ROUTE_CONFIG_RESOURCE = 'platform::network::routes::route_config' + +DATA_IFACE_LIST_RESOURCE = 'platform::lmon::params::data_iface_devices' class InterfacePuppet(base.BasePuppet): @@ -86,6 +88,7 @@ class InterfacePuppet(base.BasePuppet): ROUTE_CONFIG_RESOURCE: {}, ADDRESS_CONFIG_RESOURCE: {}, SRIOV_CONFIG_RESOURCE: {}, + DATA_IFACE_LIST_RESOURCE: [], } system = self._get_system() @@ -106,6 +109,9 @@ class InterfacePuppet(base.BasePuppet): # Generate driver specific configuration generate_driver_config(context, config) + # Generate data iface list configuration + generate_data_iface_list_config(context, config) + # Update the global context with generated interface context self.context.update(context) @@ -1356,6 +1362,16 @@ def generate_mlx4_core_options(context, config): config['platform::networking::mlx4_core_options'] = mlx4_core_options +def generate_data_iface_list_config(context, config): + """ + Generate the puppet resource for data-network iface name. + """ + for iface in context['interfaces'].values(): + if is_data_interface(context, iface): + ifname = get_interface_os_ifname(context, iface) + config[DATA_IFACE_LIST_RESOURCE].append(ifname) + + def generate_driver_config(context, config): """ Generate custom configuration for driver specific parameters. diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py b/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py index 2890ff3638..4dfe310e17 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py @@ -8,7 +8,6 @@ from __future__ import absolute_import from eventlet.green import subprocess import json import netaddr -import os import re from oslo_log import log as logging @@ -39,16 +38,6 @@ class KubernetesPuppet(base.BasePuppet): config = {} config.update( {'platform::kubernetes::params::enabled': True, - 'platform::kubernetes::params::pod_network_cidr': - self._get_pod_network_cidr(), - 'platform::kubernetes::params::pod_network_ipversion': - self._get_pod_network_ipversion(), - 'platform::kubernetes::params::service_network_cidr': - self._get_cluster_service_subnet(), - 'platform::kubernetes::params::apiserver_advertise_address': - self._get_cluster_host_address(), - 'platform::kubernetes::params::etcd_endpoint': - self._get_etcd_endpoint(), 'platform::kubernetes::params::service_domain': self._get_dns_service_domain(), 'platform::kubernetes::params::dns_service_ip': @@ -59,44 +48,6 @@ class KubernetesPuppet(base.BasePuppet): return config - def get_secure_system_config(self): - config = {} - # This retrieves the certificates that were used during the bootstrap - # ansible playbook. - if os.path.exists(constants.KUBERNETES_PKI_SHARED_DIR): - # Store required certificates in configuration. - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, 'ca.crt'), 'r') as f: - ca_crt = f.read() - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, 'ca.key'), 'r') as f: - ca_key = f.read() - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, 'sa.key'), 'r') as f: - sa_key = f.read() - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, 'sa.pub'), 'r') as f: - sa_pub = f.read() - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, - 'front-proxy-ca.crt'), 'r') as f: - front_proxy_ca_crt = f.read() - with open(os.path.join( - constants.KUBERNETES_PKI_SHARED_DIR, - 'front-proxy-ca.key'), 'r') as f: - front_proxy_ca_key = f.read() - config.update( - {'platform::kubernetes::params::ca_crt': ca_crt, - 'platform::kubernetes::params::ca_key': ca_key, - 'platform::kubernetes::params::sa_key': sa_key, - 'platform::kubernetes::params::sa_pub': sa_pub, - 'platform::kubernetes::params::front_proxy_ca_crt': - front_proxy_ca_crt, - 'platform::kubernetes::params::front_proxy_ca_key': - front_proxy_ca_key, - }) - return config - def get_host_config(self, host): config = {} @@ -122,21 +73,31 @@ class KubernetesPuppet(base.BasePuppet): def _get_host_join_command(self, host): config = {} - - if host.personality != constants.WORKER: + if not utils.is_initial_config_complete(): return config # The token expires after 24 hours and is needed for a reinstall. # The puppet manifest handles the case where the node already exists. try: + join_cmd_additions = '' + if host.personality == constants.CONTROLLER: + # Upload the certificates used during kubeadm join + # The cert key will be printed in the last line of the output + cmd = ['kubeadm', 'init', 'phase', 'upload-certs', '--upload-certs', '--config', + '/etc/kubernetes/kubeadm.yaml'] + cmd_output = subprocess.check_output(cmd) + cert_key = cmd_output.strip().split('\n')[-1] + join_cmd_additions = " --control-plane --certificate-key %s" % cert_key + cmd = ['kubeadm', 'token', 'create', '--print-join-command', '--description', 'Bootstrap token for %s' % host.hostname] join_cmd = subprocess.check_output(cmd) - config.update( - {'platform::kubernetes::worker::params::join_cmd': join_cmd, }) + join_cmd_additions += " --cri-socket /var/run/containerd/containerd.sock" + join_cmd = join_cmd.strip() + join_cmd_additions except subprocess.CalledProcessError: - raise exception.SysinvException( - 'Failed to generate bootstrap token') + raise exception.SysinvException('Failed to generate bootstrap token') + + config.update({'platform::kubernetes::params::join_cmd': join_cmd}) return config diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/base.py b/sysinv/sysinv/sysinv/sysinv/tests/api/base.py index 204d4faec8..81aaed7107 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/base.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/base.py @@ -55,9 +55,6 @@ class FunctionalTest(base.TestCase): p.start() self.addCleanup(p.stop) - # mock.patch('lockutils.set_defaults', - # side_effect=lambda a: lambda f: lambda *args: f(*args)) - def _make_app(self, enable_acl=False): # Determine where we are so we can set up paths in the config root_dir = self.path_get() @@ -142,7 +139,9 @@ class FunctionalTest(base.TestCase): return response def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=[], path_prefix=PATH_PREFIX, **params): + extra_environ=None, q=None, path_prefix=PATH_PREFIX, **params): + if q is None: + q = [] full_path = path_prefix + path query_params = {'q.field': [], 'q.value': [], diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_acl.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_acl.py index e4a1928260..e01760c48e 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_acl.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_acl.py @@ -36,7 +36,7 @@ class TestACL(base.FunctionalTest): self.dbapi = db_api.get_instance() self.node_path = '/ihosts/%s' % self.fake_node['uuid'] - def get_json(self, path, expect_errors=False, headers=None, q=[], **param): + def get_json(self, path, expect_errors=False, headers=None, q=None, **param): return super(TestACL, self).get_json(path, expect_errors=expect_errors, headers=headers, @@ -52,23 +52,6 @@ class TestACL(base.FunctionalTest): response = self.get_json(self.node_path, expect_errors=True) self.assertEqual(response.status_int, 401) - def test_authenticated(self): - # Test skipped to prevent error message in Jenkins. Error thrown is: - # webtest.app.AppError: Bad response: 401 Unauthorized (not 200 OK or - # 3xx redirect for - # http://localhost/v1/ihosts/1be26c0b-03f2-4d2e-ae87-c02d7f33c123) - # 'Authentication required' - self.skipTest("Skipping to prevent failure notification on Jenkins") - self.mox.StubOutWithMock(self.dbapi, 'ihost_get') - self.dbapi.ihost_get(self.fake_node['uuid']).AndReturn( - self.fake_node) - self.mox.ReplayAll() - - response = self.get_json(self.node_path, - headers={'X-Auth-Token': utils.ADMIN_TOKEN}) - - self.assertEqual(response['uuid'], self.fake_node['uuid']) - def test_non_admin(self): # Test skipped to prevent error message in Jenkins. Error thrown is: # raise mismatch_error diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_address.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_address.py new file mode 100644 index 0000000000..0ceedfd612 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_address.py @@ -0,0 +1,300 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API / address / methods. +""" + +import netaddr +from six.moves import http_client + +from oslo_utils import uuidutils +from sysinv.common import constants + +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class AddressTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): + # can perform API operations on this object at a sublevel of host + HOST_PREFIX = '/ihosts' + + # can perform API operations on this object at a sublevel of interface + IFACE_PREFIX = '/iinterfaces' + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/addresses' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'addresses' + + # COMMON_FIELD is a field that is known to exist for inputs and outputs + COMMON_FIELD = 'address' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['id', + 'uuid', + 'address_pool_id', + 'address', + 'pool_uuid', + ] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = ['forihostid'] + + def setUp(self): + super(AddressTestCase, self).setUp() + self.host = self._create_test_host(constants.CONTROLLER) + + def get_single_url(self, uuid): + return '%s/%s' % (self.API_PREFIX, uuid) + + def get_host_scoped_url(self, host_uuid): + return '%s/%s%s' % (self.HOST_PREFIX, host_uuid, self.API_PREFIX) + + def get_iface_scoped_url(self, interface_uuid): + return '%s/%s%s' % (self.IFACE_PREFIX, interface_uuid, self.API_PREFIX) + + def assert_fields(self, api_object): + # check the uuid is a uuid + assert(uuidutils.is_uuid_like(api_object['uuid'])) + + # Verify that expected attributes are returned + for field in self.expected_api_fields: + self.assertIn(field, api_object) + + # Verify that hidden attributes are not returned + for field in self.hidden_api_fields: + self.assertNotIn(field, api_object) + + def get_post_object(self, name='test_address', ip_address='127.0.0.1', + prefix=8, address_pool_id=None, interface_id=None): + addr = netaddr.IPAddress(ip_address) + addr_db = dbutils.get_test_address( + address=str(addr), + prefix=prefix, + name=name, + address_pool_id=address_pool_id, + interface_id=interface_id, + ) + + # pool_uuid in api corresponds to address_pool_id in db + addr_db['pool_uuid'] = addr_db.pop('address_pool_id') + addr_db['interface_uuid'] = addr_db.pop('interface_id') + addr_db.pop('family') + + return addr_db + + +class TestPostMixin(AddressTestCase): + + def setUp(self): + super(TestPostMixin, self).setUp() + + def _test_create_address_success(self, name, ip_address, prefix, + address_pool_id, interface_id): + # Test creation of object + + addr_db = self.get_post_object(name=name, ip_address=ip_address, + prefix=prefix, + address_pool_id=address_pool_id, + interface_id=interface_id) + response = self.post_json(self.API_PREFIX, + addr_db, + headers=self.API_HEADERS) + + # Check HTTP response is successful + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, http_client.OK) + + # Check that an expected field matches. + self.assertEqual(response.json[self.COMMON_FIELD], + addr_db[self.COMMON_FIELD]) + + def _test_create_address_fail(self, name, ip_address, prefix, + address_pool_id, interface_id, + status_code, error_message): + # Test creation of object + + addr_db = self.get_post_object(name=name, ip_address=ip_address, + prefix=prefix, + address_pool_id=address_pool_id, + interface_id=interface_id) + response = self.post_json(self.API_PREFIX, + addr_db, + headers=self.API_HEADERS, + expect_errors=True) + + # Check HTTP response is failed + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, status_code) + self.assertIn(error_message, response.json['error_message']) + + def test_create_address(self): + self._test_create_address_success( + "fake-address", + str(self.oam_subnet[25]), self.oam_subnet.prefixlen, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + ) + + def test_create_address_wrong_address_pool(self): + self._test_create_address_fail( + "fake-address", + str(self.oam_subnet[25]), self.oam_subnet.prefixlen, + address_pool_id=self.address_pools[1].uuid, + interface_id=None, + status_code=http_client.CONFLICT, + error_message="does not match pool network", + ) + + def test_create_address_wrong_prefix_len(self): + self._test_create_address_fail( + "fake-address", + str(self.oam_subnet[25]), self.oam_subnet.prefixlen - 1, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + status_code=http_client.CONFLICT, + error_message="does not match pool network", + ) + + def test_create_address_zero_prefix(self): + error_message = ("Address prefix must be greater than 1 for " + "data network type") + self._test_create_address_fail( + "fake-address", + str(self.oam_subnet[25]), 0, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + status_code=http_client.INTERNAL_SERVER_ERROR, + error_message=error_message, + ) + + def test_create_address_zero_address(self): + error_message = ("Address must not be null") + if self.oam_subnet.version == 4: + zero_address = "0.0.0.0" + else: + zero_address = "::" + self._test_create_address_fail( + "fake-address", + zero_address, self.oam_subnet.prefixlen, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + status_code=http_client.INTERNAL_SERVER_ERROR, + error_message=error_message, + ) + + def test_create_address_invalid_name(self): + self._test_create_address_fail( + "fake_address", + str(self.oam_subnet[25]), self.oam_subnet.prefixlen, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + status_code=http_client.BAD_REQUEST, + error_message="Please configure valid hostname.", + ) + + def test_create_address_multicast(self): + self._test_create_address_fail( + "fake-address", + str(self.multicast_subnet[1]), self.oam_subnet.prefixlen, + address_pool_id=self.address_pools[2].uuid, + interface_id=None, + status_code=http_client.INTERNAL_SERVER_ERROR, + error_message="Address must be a unicast address", + ) + + +class TestDelete(AddressTestCase): + """ Tests deletion. + Typically delete APIs return NO CONTENT. + python2 and python3 libraries may return different + content_type (None, or empty json) when NO_CONTENT returned. + """ + + def setUp(self): + super(TestDelete, self).setUp() + + def test_delete(self): + # Delete the API object + delete_object = self.mgmt_addresses[0] + uuid = delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS) + + # Verify the expected API response for the delete + self.assertEqual(response.status_code, http_client.NO_CONTENT) + + # TODO: Add unit tests to verify deletion is rejected as expected by + # _check_orphaned_routes, _check_host_state, and _check_from_pool. + # + # Currently blocked by bug in dbapi preventing testcase setup: + # https://bugs.launchpad.net/starlingx/+bug/1861131 + + +class TestList(AddressTestCase): + """ Network list operations + """ + + def setUp(self): + super(TestList, self).setUp() + + def test_list_default_addresses_all(self): + response = self.get_json(self.API_PREFIX) + for result in response[self.RESULT_KEY]: + self.assertIn("address", result) + + def test_list_default_addresses_host(self): + response = self.get_json(self.get_host_scoped_url(self.host.uuid)) + self.assertEqual([], response[self.RESULT_KEY]) + + def test_list_default_addresses_interface(self): + ifaces = self._create_test_host_platform_interface(self.host) + interface_id = ifaces[0].uuid + response = self.get_json(self.get_iface_scoped_url(interface_id)) + self.assertEqual([], response[self.RESULT_KEY]) + + +class TestPatch(AddressTestCase): + + def setUp(self): + super(TestPatch, self).setUp() + + def test_patch_not_allowed(self): + # Try and patch an unmodifiable value + + patch_object = self.mgmt_addresses[0] + + response = self.patch_json(self.get_single_url(patch_object.uuid), + [{'path': '/name', + 'value': 'test', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.METHOD_NOT_ALLOWED) + self.assertIn("The method PATCH is not allowed for this resource.", + response.json['error_message']) + + +class IPv4TestPost(TestPostMixin, + AddressTestCase): + pass + + +class IPv6TestPost(TestPostMixin, + dbbase.BaseIPv6Mixin, + AddressTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_dns.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_dns.py new file mode 100644 index 0000000000..a4c8e09992 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_dns.py @@ -0,0 +1,402 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API / dns / methods. +""" + +import mock +import unittest +from six.moves import http_client +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class FakeConductorAPI(object): + + def __init__(self): + self.update_dns_config = mock.MagicMock() + + +class FakeException(Exception): + pass + + +class ApiDNSTestCaseMixin(object): + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/idns' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'idnss' + + # COMMON_FIELD is a field that is known to exist for inputs and outputs + COMMON_FIELD = 'nameservers' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['uuid', + 'nameservers', + 'isystem_uuid'] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = ['forisystemid'] + + def setUp(self): + super(ApiDNSTestCaseMixin, self).setUp() + self.fake_conductor_api = FakeConductorAPI() + p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') + self.mock_conductor_api = p.start() + self.mock_conductor_api.return_value = self.fake_conductor_api + self.addCleanup(p.stop) + # This field decides if the DNS IP address will be IPv4 or IPv6 + self.is_ipv4 = isinstance(self, PlatformIPv4ControllerApiDNSPatchTestCase) + + def get_single_url(self, uuid): + return '%s/%s' % (self.API_PREFIX, uuid) + + def _create_db_object(self, obj_id=None): + if(self.is_ipv4): + return dbutils.create_test_dns(id=obj_id, + forisystemid=self.system.id, + nameservers='8.8.8.8,8.8.4.4') + else: + return dbutils.create_test_dns(id=obj_id, + forisystemid=self.system.id, + nameservers='2001:4860:4860::8888,2001:4860:4860::8844') + + +class ApiDNSPostTestSuiteMixin(ApiDNSTestCaseMixin): + """ DNS post operations + """ + def setUp(self): + super(ApiDNSPostTestSuiteMixin, self).setUp() + + def get_post_object(self): + return dbutils.post_get_test_dns(forisystemid=self.system.id, + nameservers='8.8.8.8,8.8.4.4') + + # Test that a valid POST operation is blocked by the API + # API should return 400 BAD_REQUEST or FORBIDDEN 403 + def test_create_not_allowed(self): + ndict = self.get_post_object() + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertIn("Operation not permitted", response.json['error_message']) + + +class ApiDNSDeleteTestSuiteMixin(ApiDNSTestCaseMixin): + """ DNS delete operations + """ + def setUp(self): + super(ApiDNSDeleteTestSuiteMixin, self).setUp() + self.delete_object = self._create_db_object() + + # Test that a valid DELETE operation is blocked by the API + def test_delete_not_allowed(self): + # Test that a valid DELETE operation is blocked by the API + uuid = self.delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertIn("Operation not permitted", response.json['error_message']) + + +class ApiDNSPatchTestSuiteMixin(ApiDNSTestCaseMixin): + """ DNS patch operations + """ + patch_path_nameserver = '/nameservers' + patch_path_action = '/action' + patch_field = 'nameservers' + + def setUp(self): + super(ApiDNSPatchTestSuiteMixin, self).setUp() + self.patch_object = self._create_db_object() + if(self.is_ipv4): + self.patch_value_no_change = '8.8.8.8,8.8.4.4' + self.patch_value_changed = '8.8.8.8' + self.patch_value_more_than_permitted = '8.8.8.8,8.8.4.4,9.9.9.9,9.8.8.9' + self.patch_value_hostname = "dns.google" + else: + self.patch_value_no_change = '2001:4860:4860::8888,2001:4860:4860::8844' + self.patch_value_changed = '2001:4860:4860::8888' + self.patch_value_more_than_permitted = '2001:4860:4860::8888,2001:4860:4860::8844,'\ + '2001:4860:4860::4444,2001:4860:4860::8888' + self.patch_value_hostname = "dns.google" + + def exception_dns(self): + print('Raised a fake exception') + raise FakeException + + def test_patch_invalid_field(self): + # Pass a non existant field to be patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': '/junk_field', + 'value': self.patch_value_no_change, + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + + def test_patch_no_change(self): + # Ensure No DNS Config changes are made when same value is passed + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_no_change, + 'op': 'replace'}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the attribute remains unchanged + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertEqual(response[self.patch_field], self.patch_value_no_change) + + # Verify that the method that updates dns config is not called + self.fake_conductor_api.update_dns_config.assert_not_called() + + def test_patch_exception(self): + # Raise an exception and ensure the DNS configuration is not updated + self.fake_conductor_api.update_dns_config.side_effect = self.exception_dns + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_changed, + 'op': 'replace'}, + {"path": self.patch_path_action, + "value": "apply", + "op": "replace"}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Failed to update the DNS configuration", response.json['error_message']) + + # Verify that the attribute was not updated + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertNotEqual(response[self.patch_field], self.patch_value_changed) + + def test_patch_valid_change(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_changed, + 'op': 'replace'}, + {"path": self.patch_path_action, + "value": "apply", + "op": "replace"}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the attribute was updated + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertEqual(response[self.patch_field], self.patch_value_changed) + + # Verify that the method that updates dns config is called once + self.fake_conductor_api.update_dns_config.assert_called_once() + + def test_patch_invalid_value(self): + # Pass a value that fails a semantic check when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': 'invalid_list', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Invalid DNS nameserver target address invalid_list Please configure a valid DNS address.", + response.json['error_message']) + + def test_patch_max_dns_server(self): + # Pass DNS server list which is more than the maximum number supported so that it fails when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_more_than_permitted, + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Please configure a valid list of DNS nameservers.", + response.json['error_message']) + + def test_patch_empty_value(self): + # Pass an empty DNS server list that fails when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': '', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("At least one DNS server must be used when any NTP server address is using FQDN.", + response.json['error_message']) + + def test_patch_hostname(self): + # Pass a hostname that fails when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_hostname, + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Please configure a valid DNS address.", response.json['error_message']) + + +class ApiDNSListTestSuiteMixin(ApiDNSTestCaseMixin): + """ DNS GET operations + """ + def setUp(self): + super(ApiDNSListTestSuiteMixin, self).setUp() + self.dns_uuid = self.dns.uuid + + def test_fetch_dns_object(self): + response = self.get_json(self.API_PREFIX) + self.assertEqual(response[self.RESULT_KEY][0]['uuid'], self.dns_uuid) + + +# ============= IPv4 environment tests ============== +# Tests DNS Api operations for a Controller (defaults to IPv4) +class PlatformIPv4ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiDNSListTestCase(ApiDNSListTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiDNSPostTestCase(ApiDNSPostTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiDNSDeleteTestCase(ApiDNSDeleteTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +# ============= IPv6 environment tests ============== +# Tests DNS Api operations for a Controller (defaults to IPv6) +class PlatformIPv6ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiDNSListTestCase(ApiDNSListTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiDNSPostTestCase(ApiDNSPostTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiDNSDeleteTestCase(ApiDNSDeleteTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +# ============= IPv6 DNS in IPv4 environment tests ============== +class PlatformIPv6inIPv4OAMControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + def setUp(self): + super(PlatformIPv6inIPv4OAMControllerApiDNSPatchTestCase, self).setUp() + self.is_ipv4 = False + self.patch_object = self._create_db_object() + self.patch_value_no_change = '2001:4860:4860::8888,2001:4860:4860::8844' + self.patch_value_changed = '2001:4860:4860::8888' + self.patch_value_more_than_permitted = '2001:4860:4860::8888,2001:4860:4860::8844,'\ + '2001:4860:4860::4444,2001:4860:4860::8888' + self.patch_value_hostname = "dns.google" + + # See https://bugs.launchpad.net/starlingx/+bug/1860489 + @unittest.expectedFailure + def test_patch_valid_change(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_changed, + 'op': 'replace'}, + {"path": self.patch_path_action, + "value": "apply", + "op": "replace"}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + + pass + + +# ============= IPv4 DNS in IPv6 environment tests ============== +class PlatformIPv4inIPv6ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + def setUp(self): + super(PlatformIPv4inIPv6ControllerApiDNSPatchTestCase, self).setUp() + self.is_ipv4 = False + self.patch_object = self._create_db_object() + self.patch_value_no_change = '8.8.8.8,8.8.4.4' + self.patch_value_changed = '8.8.8.8' + self.patch_value_more_than_permitted = '8.8.8.8,8.8.4.4,9.9.9.9,9.8.8.9' + self.patch_value_hostname = "dns.google" + + # See https://bugs.launchpad.net/starlingx/+bug/1860489 + @unittest.expectedFailure + def test_patch_valid_change(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_nameserver, + 'value': self.patch_value_changed, + 'op': 'replace'}, + {"path": self.patch_path_action, + "value": "apply", + "op": "replace"}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_helm_charts.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_helm_charts.py new file mode 100644 index 0000000000..552ab6489b --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_helm_charts.py @@ -0,0 +1,541 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the helm chart methods. +""" + +import mock +from six.moves import http_client +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class FakeConductorAPI(object): + + def __init__(self): + self.get_helm_application_namespaces = mock.MagicMock() + self.get_helm_applications = mock.MagicMock() + self.get_helm_chart_overrides = mock.MagicMock() + self.merge_overrides = mock.MagicMock() + + +class FakeException(Exception): + pass + + +class ApiHelmChartTestCaseMixin(base.FunctionalTest, + dbbase.ControllerHostTestCase): + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/helm_charts' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'charts' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['name', + 'namespace', + 'user_overrides', + 'system_overrides', + 'app_id'] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = ['app_id'] + + def setUp(self): + super(ApiHelmChartTestCaseMixin, self).setUp() + self.fake_conductor_api = FakeConductorAPI() + p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') + self.mock_conductor_api = p.start() + self.mock_conductor_api.return_value = self.fake_conductor_api + self.addCleanup(p.stop) + self.helm_app = self._create_db_app() + self.helm_override_obj_one = self._create_db_overrides( + appid=self.helm_app.id, + chart_name='ceph-pools-audit', + chart_namespace='kube-system', + system_override_attr={"enabled": True}, + user_override="global:\n replicas: \"2\"\n") + self.helm_override_obj_two = self._create_db_overrides( + appid=self.helm_app.id, + chart_name='rbd-provisioner', + chart_namespace='kube-system', + system_override_attr={"enabled": False}, + user_override="global:\n replicas: \"3\"\n") + self.fake_helm_apps = self.fake_conductor_api.get_helm_applications + self.fake_ns = self.fake_conductor_api.get_helm_application_namespaces + self.fake_override = self.fake_conductor_api.get_helm_chart_overrides + self.fake_merge_overrides = self.fake_conductor_api.merge_overrides + + def exception_helm_override(self): + print('Raised a fake exception') + raise FakeException + + def get_single_url_helm_override_list(self, app_name): + return '%s/?app_name=%s' % (self.API_PREFIX, app_name) + + def get_single_url_helm_override(self, app_name, chart_name, namespace): + return '%s/%s?name=%s&namespace=%s' % (self.API_PREFIX, app_name, + chart_name, namespace) + + def _create_db_app(self, obj_id=None): + return dbutils.create_test_app(id=obj_id, name='platform-integ-apps', + app_version='1.0-8', + manifest_name='platform-integration-manifest', + manifest_file='manifest.yaml', + status='applied', + active=True) + + def _create_db_overrides(self, appid, chart_name, chart_namespace, + system_override_attr, user_override, obj_id=None): + return dbutils.create_test_helm_overrides(id=obj_id, + app_id=appid, + name=chart_name, + namespace=chart_namespace, + system_overrides=system_override_attr, + user_overrides=user_override) + + +class ApiHelmChartListTestSuiteMixin(ApiHelmChartTestCaseMixin): + """ Helm Override List GET operations + """ + def setUp(self): + super(ApiHelmChartListTestSuiteMixin, self).setUp() + + def test_fetch_success_helm_override_list(self): + # Return a namespace dictionary + self.fake_ns.return_value = {'ceph-pools-audit': ['kube-system'], + 'rbd-provisioner': ['kube-system']} + url = self.get_single_url_helm_override_list('platform-integ-apps') + response = self.get_json(url) + + # Verify the values of the response with the object values in database + self.assertEqual(len(response[self.RESULT_KEY]), 2) + + # py36 preserves insertion order, whereas py27 does not + result_one = response[self.RESULT_KEY][0] + result_two = response[self.RESULT_KEY][1] + self.assertTrue(result_one['name'] == self.helm_override_obj_one.name or + result_two['name'] == self.helm_override_obj_one.name) + self.assertTrue(result_one['name'] == self.helm_override_obj_two.name or + result_two['name'] == self.helm_override_obj_two.name) + if(result_one['name'] == self.helm_override_obj_one.name): + self.assertTrue(result_one['enabled'] == [True]) + self.assertTrue(result_two['enabled'] == [False]) + else: + self.assertTrue(result_two['enabled'] == [True]) + self.assertTrue(result_one['enabled'] == [False]) + + def test_fetch_helm_override_list_exception(self): + # Raise an exception while finding helm charts for an application + self.fake_ns.side_effect = self.exception_helm_override + url = self.get_single_url_helm_override_list('platform-integ-apps') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Unable to get the helm charts for application " + "platform-integ-apps", + response.json['error_message']) + + def test_fetch_helm_override_list_invalid_value(self): + self.fake_ns.return_value = {'ceph-pools-audit': ['kube-system']} + url = self.get_single_url_helm_override_list('invalid_app_name') + # Pass an invalid value for app name + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Application invalid_app_name not found.", + response.json['error_message']) + + +class ApiHelmChartShowTestSuiteMixin(ApiHelmChartTestCaseMixin): + """ Helm Override Show GET operations + """ + def setUp(self): + super(ApiHelmChartShowTestSuiteMixin, self).setUp() + + def test_no_system_override(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', 'kube-system') + response = self.get_json(url) + + # Verify the values of the response with the values stored in database + self.assertEqual(response['name'], self.helm_override_obj_one.name) + self.assertIn(self.helm_override_obj_one.namespace, + response['namespace']) + + def test_fetch_helm_override_show_invalid_application(self): + url = self.get_single_url_helm_override('invalid_value', + 'ceph-pools-audit', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Application invalid_value not found.", + response.json['error_message']) + + def test_fetch_helm_override_show_invalid_helm_chart(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'invalid_value', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Unable to get the helm chart attributes for chart " + "invalid_value under Namespace kube-system", + response.json['error_message']) + + def test_fetch_helm_override_show_invalid_namespace(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', + 'invalid_value') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Unable to get the helm chart attributes for chart " + "ceph-pools-audit under Namespace invalid_value", + response.json['error_message']) + + def test_fetch_helm_override_show_empty_name(self): + url = self.get_single_url_helm_override('platform-integ-apps', + '', + 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Name must be specified.", + response.json['error_message']) + + def test_fetch_helm_override_show_empty_namespace(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', + '') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Namespace must be specified.", + response.json['error_message']) + + def test_fetch_helm_override_no_system_overrides_fetched(self): + # Return system apps + self.fake_helm_apps.return_value = ['platform-integ-apps'] + + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Unable to get the helm chart overrides for chart " + "ceph-pools-audit under Namespace kube-system", + response.json['error_message']) + + def test_fetch_success_helm_override_show(self): + # Return system apps + self.fake_helm_apps.return_value = ['platform-integ-apps'] + # Return helm chart overrides + self.fake_override.return_value = {"enabled": True} + + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.status_code, http_client.OK) + self.assertEqual(response.content_type, 'application/json') + + # Verify the values of the response with the values in database + self.assertEqual(response.json['name'], + self.helm_override_obj_one.name) + self.assertEqual(response.json['namespace'], + self.helm_override_obj_one.namespace) + self.assertEqual(response.json['attributes'], + "enabled: true\n") + self.assertEqual(response.json['system_overrides'], + "{enabled: true}\n") + self.assertEqual(response.json['user_overrides'], + "global:\n replicas: \"2\"\n") + self.assertEqual(response.json['combined_overrides'], {}) + + +class ApiHelmChartDeleteTestSuiteMixin(ApiHelmChartTestCaseMixin): + """ Helm Override delete operations + """ + def setUp(self): + super(ApiHelmChartDeleteTestSuiteMixin, self).setUp() + + # Test that a valid DELETE operation is successful + def test_delete_helm_override_success(self): + + # Verify that user override exists initially + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.json['user_overrides'], + 'global:\n replicas: \"3\"\n') + + # Perform delete operation + response = self.delete(url, expect_errors=True) + + # Verify the expected API response for the delete + self.assertEqual(response.status_code, http_client.NO_CONTENT) + + # Verify that the user override is deleted + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.json['user_overrides'], None) + + def test_delete_helm_override_empty_name(self): + url = self.get_single_url_helm_override('platform-integ-apps', + '', + 'kube-system') + response = self.delete(url, expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Name must be specified.", response.json['error_message']) + + def test_delete_helm_override_empty_namespace(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'ceph-pools-audit', + '') + response = self.delete(url, expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Namespace must be specified.", + response.json['error_message']) + + def test_delete_helm_override_invalid_application(self): + url = self.get_single_url_helm_override('invalid_application', + 'ceph-pools-audit', 'kube-system') + response = self.delete(url, expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Application invalid_application not found.", + response.json['error_message']) + + def test_delete_helm_override_invalid_helm_override(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'invalid_name', 'invalid_namespace') + response = self.delete(url, expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.NO_CONTENT) + + +class ApiHelmChartPatchTestSuiteMixin(ApiHelmChartTestCaseMixin): + """ Helm Override patch operations + """ + + def setUp(self): + super(ApiHelmChartPatchTestSuiteMixin, self).setUp() + + def test_success_helm_override_patch(self): + # Return system apps + self.fake_helm_apps.return_value = ['platform-integ-apps'] + # Return helm chart overrides + self.fake_override.return_value = {"enabled": True} + self.fake_merge_overrides.return_value = "global:\n replicas: \"2\"\n" + + # Pass a non existant field to be patched by the API + response = self.patch_json(self.get_single_url_helm_override( + 'platform-integ-apps', + 'rbd-provisioner', 'kube-system'), + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the helm override was updated + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.json['user_overrides'], + 'global:\n replicas: \"2\"\n') + + def test_helm_override_patch_attribute(self): + # Return system apps + self.fake_helm_apps.return_value = ['platform-integ-apps'] + # Return helm chart overrides + self.fake_override.return_value = {"enabled": False} + self.fake_merge_overrides.return_value = "global:\n replicas: \"2\"\n" + + # Pass a non existant field to be patched by the API + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.patch_json(url, + {'attributes': {"enabled": "false"}, + 'flag': '', + 'values': {}}, + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the helm chart attribute was updated + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.json['attributes'], 'enabled: false\n') + + def test_patch_invalid_application(self): + url = self.get_single_url_helm_override('invalid_app_name', + 'rbd-provisioner', 'kube-system') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Application invalid_app_name not found.", + response.json['error_message']) + + def test_patch_empty_name(self): + url = self.get_single_url_helm_override('platform-integ-apps', + '', + 'kube-system') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Name must be specified.", response.json['error_message']) + + def test_patch_empty_namespace(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', + '') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Namespace must be specified.", + response.json['error_message']) + + def test_patch_invalid_attribute(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.patch_json(url, + {'attributes': {"invalid_attr": "false"}, + 'flag': '', + 'values': {}}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Invalid chart attribute: invalid_attr must " + "be one of [enabled]", + response.json['error_message']) + + def test_patch_invalid_flag(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'invalid_flag', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Invalid flag: invalid_flag must be either 'reuse' " + "or 'reset'.", + response.json['error_message']) + + def test_patch_invalid_helm_override(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'invalid_name', 'invalid_namespace') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2']}}, + headers=self.API_HEADERS, + expect_errors=True) + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.status_code, http_client.OK) + # Verify the values of the response with the values in database + self.assertEqual(response.json['name'], 'invalid_name') + self.assertIn('invalid_namespace', response.json['namespace']) + + def test_patch_multiple_values(self): + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', 'kube-system') + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reuse', + 'values': {'files': [], + 'set': ['global.replicas=2,' + 'global.defaultStorageClass=generic']}}, + headers=self.API_HEADERS, + expect_errors=True) + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Invalid input: One (or more) set overrides contains " + "multiple values. Consider using --values " + "option instead.", response.json['error_message']) + + def test_success_helm_override_patch_reset_flag(self): + # Return system apps + self.fake_helm_apps.return_value = ['platform-integ-apps'] + # Return helm chart overrides + self.fake_override.return_value = {"enabled": True} + self.fake_merge_overrides.return_value = "global:\n replicas: \"2\"\n" + url = self.get_single_url_helm_override('platform-integ-apps', + 'rbd-provisioner', + 'kube-system') + # Pass a non existant field to be patched by the API + response = self.patch_json(url, + {'attributes': {}, + 'flag': 'reset', + 'values': {}}, + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the helm override was updated + response = self.get_json(url, expect_errors=True) + self.assertEqual(response.json['user_overrides'], None) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py index 507a2da624..172df2c002 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py @@ -2,7 +2,7 @@ # -*- encoding: utf-8 -*- # # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -542,7 +542,7 @@ class TestPostKubeUpgrades(TestHost): kube_upgrade = dbutils.create_test_kube_upgrade( from_version='v1.42.1', to_version='v1.42.2', - state=kubernetes.KUBE_UPGRADED_FIRST_MASTER, + state=kubernetes.KUBE_UPGRADED_NETWORKING, ) # Mark the first kube host upgrade as OK diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py index 3d12e85f10..07bdaf1b58 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface.py @@ -1082,10 +1082,12 @@ class InterfacePTP(InterfaceTestCase): super(InterfacePTP, self).setUp() def test_modify_ptp_interface_valid(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port0, if0 = self._create_ethernet('if0', host=self.worker) sriovif = dbutils.create_test_interface(forihostid=self.worker.id, datanetworks='group0-data0') dbutils.create_test_ethernet_port( - id=1, name='if1', host_id=self.worker.id, interface_id=sriovif.id, pciaddr='0000:00:00.11', dev_id=0, + id=2, name='if1', host_id=self.worker.id, interface_id=sriovif.id, pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=1, sriov_numvfs=1, driver='i40e', sriov_vf_driver='i40evf' ) if0_uuid = if0['uuid'] @@ -1372,15 +1374,22 @@ class TestPatchMixin(object): self.assertEqual(vf_driver, response.json['sriov_vf_driver']) def test_create_sriov_vf_driver_netdevice_valid(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) self._create_sriov_vf_driver_valid( constants.SRIOV_DRIVER_TYPE_NETDEVICE) def test_create_sriov_vf_driver_vfio_valid(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) self._create_sriov_vf_driver_valid(constants.SRIOV_DRIVER_TYPE_VFIO) def test_create_sriov_vf_driver_invalid(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) self._create_sriov_vf_driver_valid('bad_driver', expect_errors=True) + def test_create_sriov_no_mgmt(self): + self._create_sriov_vf_driver_valid(constants.SRIOV_DRIVER_TYPE_VFIO, + expect_errors=True) + class TestPostMixin(object): def setUp(self): @@ -1873,6 +1882,8 @@ class TestPostMixin(object): # The number of virtual functions _ must be less than or equal to the # available VFs _ available on the underlying interface _ def test_create_invalid_vf_interface_numvfs(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov', host=self.worker, sriov_numvfs=4) self._create_vf('vf1', lower_iface=lower_iface, @@ -1882,6 +1893,8 @@ class TestPostMixin(object): # The number of virtual functions _ must be less than or equal to the # available VFs _ available on the underlying interface _ def test_create_invalid_vf_interface_numvfs_multiple_children(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov', host=self.worker, sriov_numvfs=4) self._create_vf('vf1', lower_iface=lower_iface, @@ -1893,6 +1906,8 @@ class TestPostMixin(object): # Interface _ is being used by VF interface _ and therefore the interface # class cannot be changed from 'pci-sriov'. def test_modify_sriov_interface_invalid_class_with_upper_vf(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov', host=self.worker, sriov_numvfs=4) self._create_vf('vf1', lower_iface=lower_iface, @@ -1912,6 +1927,8 @@ class TestPostMixin(object): # The number of virtual functions _ must be greater than the number of # consumed VFs _ used by the upper VF interfaces _ def test_modify_sriov_interface_invalid_numvfs_with_upper_vf(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov', host=self.worker, sriov_numvfs=4) self._create_vf('vf1', lower_iface=lower_iface, @@ -1928,6 +1945,8 @@ class TestPostMixin(object): response.json['error_message']) def test_interface_vf_usesmodify_success(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov', host=self.worker, sriov_numvfs=4) vf = self._create_vf('vf1', lower_iface=lower_iface, @@ -1944,6 +1963,8 @@ class TestPostMixin(object): self.assertEqual(http_client.OK, patch_result.status_code) def test_interface_vf_usesmodify_invalid(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, + host=self.worker) port, lower_iface = self._create_sriov( 'sriov1', host=self.worker, sriov_numvfs=4) vf = self._create_vf('vf1', lower_iface=lower_iface, @@ -2112,6 +2133,7 @@ class TestAIOPatch(InterfaceTestCase): # Expected error: Value for number of SR-IOV VFs must be > 0. def test_invalid_sriov_numvfs(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) port, interface = self._create_ethernet('eth0', constants.NETWORK_TYPE_NONE) response = self.patch_dict_json( @@ -2120,9 +2142,12 @@ class TestAIOPatch(InterfaceTestCase): expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) + self.assertIn('Value for number of SR-IOV VFs must be > 0.', + response.json['error_message']) # Expected error: SR-IOV can't be configured on this interface def test_invalid_sriov_totalvfs_zero(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) interface = dbutils.create_test_interface(forihostid='1') dbutils.create_test_ethernet_port( id=1, name='eth1', host_id=1, interface_id=interface.id, @@ -2134,9 +2159,12 @@ class TestAIOPatch(InterfaceTestCase): expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) + self.assertIn('SR-IOV can\'t be configured on this interface', + response.json['error_message']) # Expected error: The interface support a maximum of ___ VFs def test_invalid_sriov_exceeded_totalvfs(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) interface = dbutils.create_test_interface(forihostid='1') dbutils.create_test_ethernet_port( id=1, name='eth1', host_id=1, interface_id=interface.id, @@ -2150,9 +2178,12 @@ class TestAIOPatch(InterfaceTestCase): expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) + self.assertIn('The interface support a maximum of', + response.json['error_message']) # Expected error: Corresponding port has invalid driver def test_invalid_driver_for_sriov(self): + self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT) interface = dbutils.create_test_interface(forihostid='1') dbutils.create_test_ethernet_port( id=1, name='eth1', host_id=1, interface_id=interface.id, @@ -2166,6 +2197,8 @@ class TestAIOPatch(InterfaceTestCase): expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) + self.assertIn('Corresponding port has invalid driver', + response.json['error_message']) class IPv4TestPost(TestPostMixin, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py index f035e7b3f7..aeaddbf1e2 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py @@ -11,6 +11,7 @@ Tests for the API /kube_upgrade/ methods. import mock from six.moves import http_client +from sysinv.common import constants from sysinv.common import kubernetes from sysinv.tests.api import base @@ -56,16 +57,17 @@ class FakeConductorAPI(object): def __init__(self): self.kube_download_images = mock.MagicMock() self.kube_upgrade_networking = mock.MagicMock() - self.get_system_health_return = (True, "System is super healthy") + self.get_system_health_return = ( + True, "System is super healthy") - def get_system_health(self, context, force=False): + def get_system_health(self, context, force=False, kube_upgrade=False): if force: return True, "System is healthy because I was forced to say that" else: return self.get_system_health_return -class TestKubeUpgrade(base.FunctionalTest, dbbase.BaseSystemTestCase): +class TestKubeUpgrade(base.FunctionalTest, dbbase.BaseHostTestCase): def setUp(self): super(TestKubeUpgrade, self).setUp() @@ -132,6 +134,14 @@ class TestKubeUpgrade(base.FunctionalTest, dbbase.BaseSystemTestCase): self.mocked_kube_get_version_states.start() self.addCleanup(self.mocked_kube_get_version_states.stop) + def _create_controller_0(self, subfunction=None, numa_nodes=1, **kw): + return self._create_test_host( + personality=constants.CONTROLLER, + subfunction=subfunction, + numa_nodes=numa_nodes, + unit=0, + **kw) + class TestListKubeUpgrade(TestKubeUpgrade): @@ -183,6 +193,24 @@ class TestPostKubeUpgrade(TestKubeUpgrade, dbbase.ControllerHostTestCase): self.host.id) self.assertEqual('v1.43.1', kube_host_upgrade.target_version) + def test_create_platform_upgrade_exists(self): + # Test creation of upgrade when platform upgrade in progress + dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW, + compatible_version=dbutils.SW_VERSION, + state=constants.IMPORTED_LOAD_STATE) + dbutils.create_test_upgrade() + + create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2') + result = self.post_json('/kube_upgrade', create_dict, + headers={'User-Agent': 'sysinv-test'}, + expect_errors=True) + + # Verify the failure + self.assertEqual(result.content_type, 'application/json') + self.assertEqual(http_client.BAD_REQUEST, result.status_int) + self.assertIn("upgrade cannot be done while a platform upgrade", + result.json['error_message']) + def test_create_upgrade_exists(self): # Test creation of upgrade when upgrade already exists dbutils.create_test_kube_upgrade( @@ -546,6 +574,43 @@ class TestPatch(TestKubeUpgrade): self.assertEqual(result['to_version'], 'v1.43.2') self.assertEqual(result['state'], new_state) + def test_update_state_complete_incomplete_host(self): + # Test updating the state of an upgrade to complete when a host has + # not completed its upgrade + self.kube_get_version_states_result = {'v1.42.1': 'available', + 'v1.42.2': 'available', + 'v1.43.1': 'available', + 'v1.43.2': 'active', + 'v1.43.3': 'available'} + + # Create host + self._create_controller_0() + + # Create the upgrade + dbutils.create_test_kube_upgrade( + from_version='v1.43.1', + to_version='v1.43.2', + state=kubernetes.KUBE_UPGRADING_KUBELETS) + + # Mark the kube host upgrade as failed + values = {'status': kubernetes.KUBE_HOST_UPGRADING_CONTROL_PLANE_FAILED} + self.dbapi.kube_host_upgrade_update(1, values) + + # Update state + new_state = kubernetes.KUBE_UPGRADE_COMPLETE + result = self.patch_json('/kube_upgrade', + [{'path': '/state', + 'value': new_state, + 'op': 'replace'}], + headers={'User-Agent': 'sysinv-test'}, + expect_errors=True) + + # Verify the failure + self.assertEqual(result.content_type, 'application/json') + self.assertEqual(http_client.BAD_REQUEST, result.status_int) + self.assertIn("At least one host has not completed", + result.json['error_message']) + def test_update_state_no_upgrade(self): # Test updating the state when an upgrade doesn't exist diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py index c1812ad39c..1e6298f913 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py @@ -4,50 +4,44 @@ # import mock -import platform - from six.moves import http_client +from six.moves.urllib.parse import urlencode from sysinv.common import constants from sysinv.db import api as dbapi from sysinv.tests.api import base from sysinv.tests.db import utils as dbutils -if platform.python_version().startswith('2.7'): - from urllib import urlencode -else: - from urllib.parse import urlencode HEADER = {'User-Agent': 'sysinv'} +es_labels = {'elastic-data': 'enabled', + 'elastic-controller': 'enabled', + 'elastic-client': 'enabled', + 'elastic-master': 'enabled', } + +es_worker_labels = {'elastic-master': 'enabled'} +es_invalid_worker_labels = {'elastic-master': 'mandalorian'} + + +def mock_helm_override_get(dbapi, app_name, chart_name, namespace): + return True + + +def mock_get_system_enabled_k8s_plugins_return_plugins(): + return {"intel-gpu-plugin": "intelgpu=enabled", + "intel-qat-plugin": "intelqat=enabled"} + + +def mock_get_system_enabled_k8s_plugins_return_none(): + return None class LabelTestCase(base.FunctionalTest): - def setUp(self): super(LabelTestCase, self).setUp() self.dbapi = dbapi.get_instance() self.system = dbutils.create_test_isystem() self.load = dbutils.create_test_load() - self.controller = dbutils.create_test_ihost( - id='1', - uuid=None, - forisystemid=self.system.id, - hostname='controller-0', - personality=constants.CONTROLLER, - subfunctions=constants.CONTROLLER, - invprovision=constants.PROVISIONED, - ) - self.worker = dbutils.create_test_ihost( - id='2', - uuid=None, - forisystemid=self.system.id, - hostname='worker-0', - personality=constants.WORKER, - subfunctions=constants.WORKER, - mgmt_mac='01:02.03.04.05.C0', - mgmt_ip='192.168.24.12', - invprovision=constants.PROVISIONED, - ) def _get_path(self, host=None, params=None): if host: @@ -59,16 +53,6 @@ class LabelTestCase(base.FunctionalTest): path += '?' + urlencode(params) return path - -class LabelAssignTestCase(LabelTestCase): - def setUp(self): - super(LabelAssignTestCase, self).setUp() - - generic_labels = { - 'apps': 'enabled', - 'foo': 'bar' - } - def validate_labels(self, input_data, response_data): self.assertEqual(len(input_data), len(response_data)) for label in response_data: @@ -92,6 +76,36 @@ class LabelAssignTestCase(LabelTestCase): response = self.get_json("/ihosts/%s/labels" % host_uuid) return response['labels'] + +class LabelAssignTestCase(LabelTestCase): + def setUp(self): + super(LabelAssignTestCase, self).setUp() + self.controller = dbutils.create_test_ihost( + id='1', + uuid=None, + forisystemid=self.system.id, + hostname='controller-0', + personality=constants.CONTROLLER, + subfunctions=constants.CONTROLLER, + invprovision=constants.PROVISIONED + ) + self.worker = dbutils.create_test_ihost( + id='2', + uuid=None, + forisystemid=self.system.id, + hostname='worker-1', + personality=constants.WORKER, + subfunctions=constants.WORKER, + mgmt_mac='01:02:03:04:05:C5', + mgmt_ip='192.168.24.14', + invprovision=constants.PROVISIONED, + ) + + generic_labels = { + 'apps': 'enabled', + 'foo': 'bar' + } + def test_create_labels(self): host_uuid = self.worker.uuid input_data = self.generic_labels @@ -159,13 +173,6 @@ class LabelAssignTestCase(LabelTestCase): } self.assign_labels_failure(host_uuid, topology_mgr_label) - def mock_get_system_enabled_k8s_plugins_return_plugins(): - return {"intel-gpu-plugin": "intelgpu=enabled", - "intel-qat-plugin": "intelqat=enabled"} - - def mock_get_system_enabled_k8s_plugins_return_none(): - return None - @mock.patch('sysinv.api.controllers.v1.label._get_system_enabled_k8s_plugins', mock_get_system_enabled_k8s_plugins_return_plugins) def test_create_plugin_labels_on_supported_node(self): @@ -187,9 +194,7 @@ class LabelAssignTestCase(LabelTestCase): host_id=self.worker.id, pclass='VGA compatible controller', driver='',) - test_plugin_label = {'intelgpu': 'enabled', } - self.assign_labels_failure(self.worker.uuid, test_plugin_label) @mock.patch('sysinv.api.controllers.v1.label._get_system_enabled_k8s_plugins', @@ -201,3 +206,135 @@ class LabelAssignTestCase(LabelTestCase): response_data = self.get_host_labels(self.worker.uuid) self.validate_labels(test_plugin_label, response_data) + + +@mock.patch('sysinv.common.utils.is_chart_enabled', mock_helm_override_get) +class StxAppLabelsTest(LabelTestCase): + def setUp(self): + super(StxAppLabelsTest, self).setUp() + dbutils.create_test_app(name="stx-monitor", status="uploaded", progress="completed") + self.controller = dbutils.create_test_ihost( + id='1', + uuid=None, + forisystemid=self.system.id, + hostname='controller-0', + personality=constants.CONTROLLER, + subfunctions=constants.CONTROLLER, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED + ) + self.controller1 = dbutils.create_test_ihost( + id='2', + uuid=None, + forisystemid=self.system.id, + hostname='controller-1', + personality=constants.CONTROLLER, + subfunctions=constants.CONTROLLER, + mgmt_mac='01:02:03:04:05:C1', + mgmt_ip='192.168.24.13', + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED + ) + self.worker = dbutils.create_test_ihost( + id='3', + uuid=None, + forisystemid=self.system.id, + hostname='worker-0', + personality=constants.WORKER, + subfunctions=constants.WORKER, + mgmt_mac='01:02:03:04:05:C5', + mgmt_ip='192.168.24.14', + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED + ) + + def test_apply_stx_app_with_missing_labels(self): + response = self.patch_json('/apps/stx-monitor?directive=apply', + {'values': {}}, + headers={'User-Agent': 'sysinv-test'}, expect_errors=True) + self.assertIn("system host-label-assign controller-0 elastic-data=enabled", + response.json['error_message']) + self.assertIn("system host-label-assign controller-1 elastic-data=enabled", + response.json['error_message']) + self.assertIn("Please use [system host-label-assign elastic-master=enabled]", + response.json['error_message']) + + def test_apply_stx_app_with_missing_worker_labels(self): + self.assign_labels(self.controller.uuid, es_labels) + response_data_controller0 = self.get_host_labels(self.controller.uuid) + self.validate_labels(es_labels, response_data_controller0) + + self.assign_labels(self.controller1.uuid, es_labels) + response_data_controller1 = self.get_host_labels(self.controller1.uuid) + self.validate_labels(es_labels, response_data_controller1) + + response = self.patch_json('/apps/stx-monitor?directive=apply', + {'values': {}}, + headers={'User-Agent': 'sysinv-test'}, expect_errors=True) + + self.assertIn("Please use [system host-label-assign elastic-master=enabled]", + response.json['error_message']) + self.assertNotIn("system host-label-assign controller-0 elastic-data=enabled", response.json['error_message']) + self.assertNotIn("system host-label-assign controller-1 elastic-data=enabled", response.json['error_message']) + + def test_apply_stx_app_with_missing_controller0_labels(self): + self.assign_labels(self.worker.uuid, es_worker_labels) + response_data_worker = self.get_host_labels(self.worker.uuid) + self.validate_labels(es_worker_labels, response_data_worker) + + self.assign_labels(self.controller1.uuid, es_labels) + response_data_controller1 = self.get_host_labels(self.controller1.uuid) + self.validate_labels(es_labels, response_data_controller1) + + response = self.patch_json('/apps/stx-monitor?directive=apply', + {'values': {}}, + headers={'User-Agent': 'sysinv-test'}, expect_errors=True) + + self.assertIn("system host-label-assign controller-0 elastic-data=enabled", response.json['error_message']) + self.assertNotIn("Please use [system host-label-assign elastic-master=enabled]", + response.json['error_message']) + self.assertNotIn("system host-label-assign controller-1 elastic-data=enabled", response.json['error_message']) + + def test_apply_stx_app_with_invalid_worker_labels(self): + self.assign_labels(self.controller.uuid, es_labels) + response_data_controller0 = self.get_host_labels(self.controller.uuid) + self.validate_labels(es_labels, response_data_controller0) + + self.assign_labels(self.controller1.uuid, es_labels) + response_data_controller1 = self.get_host_labels(self.controller1.uuid) + self.validate_labels(es_labels, response_data_controller1) + + self.assign_labels(self.worker.uuid, es_invalid_worker_labels) + response_data_worker = self.get_host_labels(self.worker.uuid) + self.validate_labels(es_invalid_worker_labels, response_data_worker) + + response = self.patch_json('/apps/stx-monitor?directive=apply', + {'values': {}}, + headers={'User-Agent': 'sysinv-test'}, expect_errors=True) + self.assertIn("Please use [system host-label-assign elastic-master=enabled]", + response.json['error_message']) + self.assertIn("Please correct host labels values to be enabled [worker-0 elastic-master=", + response.json['error_message']) + self.assertNotIn("system host-label-assign controller-0 elastic-data=enabled", response.json['error_message']) + self.assertNotIn("system host-label-assign controller-1 elastic-data=enabled", response.json['error_message']) + + def test_apply_stx_app_with_required_labels(self): + self.assign_labels(self.worker.uuid, es_labels) + response_data_worker = self.get_host_labels(self.worker.uuid) + self.validate_labels(es_labels, response_data_worker) + + self.assign_labels(self.controller.uuid, es_labels) + response_data_controller0 = self.get_host_labels(self.controller.uuid) + self.validate_labels(es_labels, response_data_controller0) + + self.assign_labels(self.controller1.uuid, es_labels) + response_data_controller1 = self.get_host_labels(self.controller1.uuid) + self.validate_labels(es_labels, response_data_controller1) + + response = self.patch_json('/apps/stx-monitor?directive=apply', + {'values': {}}, + headers={'User-Agent': 'sysinv-test'}, expect_errors=True) + self.assertIn("applying", response.json['status']) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_network.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_network.py new file mode 100644 index 0000000000..a37f15f352 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_network.py @@ -0,0 +1,460 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API / network / methods. +""" + +import mock +from six.moves import http_client + +from oslo_utils import uuidutils +from sysinv.common import constants + +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class NetworkTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/networks' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'networks' + + # COMMON_FIELD is a field that is known to exist for inputs and outputs + COMMON_FIELD = 'type' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['id', + 'uuid', + 'name', + 'type', + 'dynamic', + 'pool_uuid', + ] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = ['forihostid'] + + def setUp(self): + super(NetworkTestCase, self).setUp() + + def get_single_url(self, uuid): + return '%s/%s' % (self.API_PREFIX, uuid) + + def assert_fields(self, api_object): + # check the uuid is a uuid + assert(uuidutils.is_uuid_like(api_object['uuid'])) + + # Verify that expected attributes are returned + for field in self.expected_api_fields: + self.assertIn(field, api_object) + + # Verify that hidden attributes are not returned + for field in self.hidden_api_fields: + self.assertNotIn(field, api_object) + + def get_post_object(self, network_type, address_pool_id): + net_db = dbutils.get_test_network( + type=network_type, + address_pool_id=address_pool_id + ) + + # pool_uuid in api corresponds to address_pool_id in db + net_db['pool_uuid'] = net_db.pop('address_pool_id') + + return net_db + + def _create_db_object(self, network_type=constants.NETWORK_TYPE_MGMT): + return self._create_test_network( + name=network_type, + network_type=network_type, + subnet=self.mgmt_subnet, + ) + + # Don't create default test networks + def _create_test_networks(self): + pass + + def _create_test_oam(self): + pass + + # Skip creating static pxeboot ip + def _create_test_static_ips(self): + hostnames = [ + constants.CONTROLLER_GATEWAY, + constants.CONTROLLER_HOSTNAME, + constants.CONTROLLER_0_HOSTNAME, + constants.CONTROLLER_1_HOSTNAME + ] + + platform_hostnames = [ + constants.CONTROLLER_PLATFORM_NFS, + ] + + self._create_test_addresses( + hostnames + platform_hostnames, + self.mgmt_subnet, + constants.NETWORK_TYPE_MGMT) + + self._create_test_addresses( + hostnames, self.oam_subnet, + constants.NETWORK_TYPE_OAM) + + self._create_test_addresses( + hostnames, self.cluster_host_subnet, + constants.NETWORK_TYPE_CLUSTER_HOST) + + +class TestPostMixin(NetworkTestCase): + + def setUp(self): + super(TestPostMixin, self).setUp() + + def _test_create_network_success(self, name, network_type, subnet): + # Test creation of object + + address_pool_id = self._create_test_address_pool(name, subnet)['uuid'] + + ndict = self.get_post_object(network_type, address_pool_id) + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS) + + # Check HTTP response is successful + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, http_client.OK) + + # Check that an expected field matches. + self.assertEqual(response.json[self.COMMON_FIELD], + ndict[self.COMMON_FIELD]) + + uuid = response.json['uuid'] + # Verify that the object was created and some basic attribute matches + response = self.get_json(self.get_single_url(uuid)) + self.assertEqual(response[self.COMMON_FIELD], + ndict[self.COMMON_FIELD]) + + def _test_create_network_fail_duplicate(self, name, network_type, subnet): + # Test creation of object + + address_pool_id = self._create_test_address_pool(name, subnet)['uuid'] + + ndict = self.get_post_object(network_type, address_pool_id) + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS) + + # Check HTTP response is successful + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, http_client.OK) + + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS, + expect_errors=True) + + # Check HTTP response is failed + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, http_client.CONFLICT) + self.assertIn("Network of type %s already exists." % network_type, + response.json['error_message']) + + def test_create_success_pxeboot(self): + self._test_create_network_success( + 'pxeboot', + constants.NETWORK_TYPE_PXEBOOT, + self.pxeboot_subnet) + + def test_create_success_management(self): + self._test_create_network_success( + 'management', + constants.NETWORK_TYPE_MGMT, + self.mgmt_subnet) + + def test_create_success_oam(self): + self._test_create_network_success( + 'oam', + constants.NETWORK_TYPE_OAM, + self.oam_subnet) + + def test_create_oam_calls_reconfigure_service_endpoints(self): + self._create_test_host(constants.CONTROLLER) + m = mock.Mock() + reconfigure_service_endpoints = "sysinv.conductor.rpcapi." \ + "ConductorAPI." \ + "reconfigure_service_endpoints" + with mock.patch(reconfigure_service_endpoints, + m.reconfigure_service_endpoints): + self._test_create_network_success( + 'oam', + constants.NETWORK_TYPE_OAM, + self.oam_subnet) + m.reconfigure_service_endpoints.assert_called_once() + + def test_create_success_cluster_host(self): + self._test_create_network_success( + 'cluster-host', + constants.NETWORK_TYPE_CLUSTER_HOST, + self.cluster_host_subnet) + + def test_create_success_cluster_pod(self): + self._test_create_network_success( + 'cluster-pod', + constants.NETWORK_TYPE_CLUSTER_POD, + self.cluster_pod_subnet) + + def test_create_success_cluster_service(self): + self._test_create_network_success( + 'cluster-service', + constants.NETWORK_TYPE_CLUSTER_SERVICE, + self.cluster_service_subnet) + + def test_create_fail_duplicate_pxeboot(self): + self._test_create_network_fail_duplicate( + 'pxeboot', + constants.NETWORK_TYPE_PXEBOOT, + self.pxeboot_subnet) + + def test_create_fail_duplicate_management(self): + self._test_create_network_fail_duplicate( + 'management', + constants.NETWORK_TYPE_MGMT, + self.mgmt_subnet) + + def test_create_fail_duplicate_oam(self): + self._test_create_network_fail_duplicate( + 'oam', + constants.NETWORK_TYPE_OAM, + self.oam_subnet) + + def test_create_fail_duplicate_cluster_host(self): + self._test_create_network_fail_duplicate( + 'cluster-host', + constants.NETWORK_TYPE_CLUSTER_HOST, + self.cluster_host_subnet) + + def test_create_fail_duplicate_cluster_pod(self): + self._test_create_network_fail_duplicate( + 'cluster-pod', + constants.NETWORK_TYPE_CLUSTER_POD, + self.cluster_pod_subnet) + + def test_create_fail_duplicate_cluster_service(self): + self._test_create_network_fail_duplicate( + 'cluster-service', + constants.NETWORK_TYPE_CLUSTER_SERVICE, + self.cluster_service_subnet) + + def test_create_with_invalid_type(self): + # Test creation with an invalid type + address_pool_id = self._create_test_address_pool( + 'management', + self.mgmt_subnet + )['uuid'] + ndict = self.get_post_object(constants.NETWORK_TYPE_DATA, + address_pool_id) + ndict['type'] = constants.NETWORK_TYPE_DATA + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS, + expect_errors=True) + + # Check HTTP response is failed + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, + http_client.INTERNAL_SERVER_ERROR) + self.assertIn("Network type data not supported", + response.json['error_message']) + + def test_create_with_invalid_additional_attributes(self): + # Test creation with an invalid attribute called 'foo' + address_pool_id = self._create_test_address_pool( + 'management', + self.mgmt_subnet + )['uuid'] + ndict = self.get_post_object(constants.NETWORK_TYPE_MGMT, + address_pool_id) + ndict['foo'] = 'some value' + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS, + expect_errors=True) + + # Check HTTP response is failed + self.assertEqual('application/json', response.content_type) + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Unknown attribute for argument network: foo", + response.json['error_message']) + + +class TestDelete(NetworkTestCase): + """ Tests deletion. + Typically delete APIs return NO CONTENT. + python2 and python3 libraries may return different + content_type (None, or empty json) when NO_CONTENT returned. + """ + + def setUp(self): + super(TestDelete, self).setUp() + + def _test_delete_allowed(self, network_type): + # Delete the API object + self.delete_object = self._create_db_object(network_type=network_type) + uuid = self.delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS) + + # Verify the expected API response for the delete + self.assertEqual(response.status_code, http_client.NO_CONTENT) + + def _test_delete_after_initial_config_not_allowed(self, network_type): + # Delete the API object + self.delete_object = self._create_db_object(network_type=network_type) + with mock.patch('sysinv.common.utils.is_initial_config_complete', + lambda: True): + uuid = self.delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS, + expect_errors=True) + + # Verify the expected API response for the delete + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + expected_error = ("Cannot delete type %s network %s after" + " initial configuration completion" % + (network_type, uuid)) + self.assertIn(expected_error, response.json['error_message']) + + def _test_delete_after_initial_config_allowed(self, network_type): + # Delete the API object + self.delete_object = self._create_db_object(network_type=network_type) + with mock.patch('sysinv.common.utils.is_initial_config_complete', + lambda: True): + uuid = self.delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS) + + # Verify the expected API response for the delete + self.assertEqual(response.status_code, http_client.NO_CONTENT) + + def test_delete_pxeboot(self): + self._test_delete_allowed(constants.NETWORK_TYPE_PXEBOOT) + + def test_delete_pxeboot_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_PXEBOOT + ) + + def test_delete_management(self): + self._test_delete_allowed(constants.NETWORK_TYPE_MGMT) + + def test_delete_management_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_MGMT + ) + + def test_delete_oam(self): + self._test_delete_allowed(constants.NETWORK_TYPE_OAM) + + def test_delete_oam_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_OAM + ) + + def test_delete_cluster_host(self): + self._test_delete_allowed(constants.NETWORK_TYPE_CLUSTER_HOST) + + def test_delete_cluster_host_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_CLUSTER_HOST + ) + + def test_delete_cluster_pod(self): + self._test_delete_allowed(constants.NETWORK_TYPE_CLUSTER_POD) + + def test_delete_cluster_pod_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_CLUSTER_POD + ) + + def test_delete_cluster_service(self): + self._test_delete_allowed(constants.NETWORK_TYPE_CLUSTER_SERVICE) + + def test_delete_cluster_service_after_initial_config(self): + self._test_delete_after_initial_config_not_allowed( + constants.NETWORK_TYPE_CLUSTER_SERVICE + ) + + def test_delete_data(self): + self._test_delete_allowed(constants.NETWORK_TYPE_DATA) + + def test_delete_data_after_initial_config(self): + self._test_delete_after_initial_config_allowed( + constants.NETWORK_TYPE_DATA + ) + + +class TestList(NetworkTestCase): + """ Network list operations + """ + + def setUp(self): + super(TestList, self).setUp() + + def test_empty_list(self): + response = self.get_json(self.API_PREFIX) + self.assertEqual([], response[self.RESULT_KEY]) + + def test_single_entry(self): + # create a single object + self.single_object = self._create_db_object() + response = self.get_json(self.API_PREFIX) + self.assertEqual(1, len(response[self.RESULT_KEY])) + + +class TestPatch(NetworkTestCase): + patch_path = '/dynamic' + patch_field = 'dynamic' + patch_value = False + + def setUp(self): + super(TestPatch, self).setUp() + self.patch_object = self._create_db_object() + + def test_patch_not_allowed(self): + # Try and patch an unmodifiable value + + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': '/junk_field', + 'value': self.patch_value, + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.METHOD_NOT_ALLOWED) + self.assertIn("The method PATCH is not allowed for this resource.", + response.json['error_message']) + + +class IPv4TestPost(TestPostMixin, + NetworkTestCase): + pass + + +class IPv6TestPost(TestPostMixin, + dbbase.BaseIPv6Mixin, + NetworkTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_ntp.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_ntp.py new file mode 100644 index 0000000000..d56684a7fd --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_ntp.py @@ -0,0 +1,332 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API / ntp / methods. +""" + +import mock +from six.moves import http_client +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class FakeConductorAPI(object): + + def __init__(self): + self.update_ntp_config = mock.MagicMock() + + +class FakeException(Exception): + pass + + +class ApiNTPTestCaseMixin(object): + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/intp' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'intps' + + # COMMON_FIELD is a field that is known to exist for inputs and outputs + COMMON_FIELD = 'ntpservers' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['uuid', + 'ntpservers', + 'isystem_uuid'] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = ['forisystemid'] + + def setUp(self): + super(ApiNTPTestCaseMixin, self).setUp() + self.fake_conductor_api = FakeConductorAPI() + p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') + self.mock_conductor_api = p.start() + self.mock_conductor_api.return_value = self.fake_conductor_api + self.addCleanup(p.stop) + + def get_single_url(self, uuid): + return '%s/%s' % (self.API_PREFIX, uuid) + + def _create_db_object(self, obj_id=None): + return dbutils.create_test_ntp(id=obj_id, + forisystemid=self.system.id, + ntpservers='0.pool.ntp.org,1.pool.ntp.org') + + +class ApiNTPPostTestSuiteMixin(ApiNTPTestCaseMixin): + """ NTP post operations + """ + def setUp(self): + super(ApiNTPPostTestSuiteMixin, self).setUp() + + def get_post_object(self): + return dbutils.post_get_test_ntp(forisystemid=self.system.id, + ntpservers='0.pool.ntp.org,1.pool.ntp.org') + + # Test that a valid POST operation is blocked by the API + # API should return 400 BAD_REQUEST or FORBIDDEN 403 + def test_create_not_allowed(self): + ndict = self.get_post_object() + response = self.post_json(self.API_PREFIX, + ndict, + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertTrue(response.json['error_message']) + self.assertIn("Operation not permitted.", + response.json['error_message']) + + +class ApiNTPDeleteTestSuiteMixin(ApiNTPTestCaseMixin): + """ NTP delete operations + """ + def setUp(self): + super(ApiNTPDeleteTestSuiteMixin, self).setUp() + self.delete_object = self._create_db_object() + + # Test that a valid DELETE operation is blocked by the API + def test_delete_not_allowed(self): + # Test that a valid DELETE operation is blocked by the API + uuid = self.delete_object.uuid + response = self.delete(self.get_single_url(uuid), + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertTrue(response.json['error_message']) + self.assertIn("Operation not permitted.", + response.json['error_message']) + + +class ApiNTPPatchTestSuiteMixin(ApiNTPTestCaseMixin): + """ NTP patch operations + """ + patch_path_ntpserver = '/ntpservers' + patch_path_action = '/action' + patch_field = 'ntpservers' + patch_value = '0.pool.ntp.org' + patch_value_no_change = '0.pool.ntp.org,1.pool.ntp.org' + patch_value_exceeds_max = '0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org,3.pool.ntp.org' + patch_value_invalid_hostname = '-invalid.hostname' + + def setUp(self): + super(ApiNTPPatchTestSuiteMixin, self).setUp() + self.patch_object = self._create_db_object() + + def exception_ntp(self): + print('Raised a fake exception') + raise FakeException + + def test_patch_invalid_field(self): + # Pass a non existant field to be patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': '/junk_field', + 'value': self.patch_value, + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + + def test_patch_no_change(self): + # Ensure No NTP Config changes are made when same value is passed + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': self.patch_value_no_change, + 'op': 'replace'}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the attribute remains unchanged + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertEqual(response[self.patch_field], self.patch_value_no_change) + + # Verify that the method that updates ntp config is not called + self.fake_conductor_api.update_ntp_config.assert_not_called() + + def test_patch_exception(self): + # Raise an exception and ensure the NTP configuration is not updated + self.fake_conductor_api.update_ntp_config.side_effect = self.exception_ntp + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': self.patch_value, + 'op': 'replace'}, + {"path": self.patch_path_action, + "value": "apply", + "op": "replace"}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Failed to update the NTP configuration", response.json['error_message']) + + # Verify that the attribute was not updated + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertNotEqual(response[self.patch_field], self.patch_value) + + def test_patch_valid_ntpserver(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': self.patch_value, + 'op': 'replace'}, + {'path': self.patch_path_action, + 'value': 'apply', + 'op': 'replace'}], + headers=self.API_HEADERS) + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + + # Verify that the attribute was updated + response = self.get_json(self.get_single_url(self.patch_object.uuid)) + self.assertEqual(response[self.patch_field], self.patch_value) + + # Verify that the method that updates ntp config is called once + self.fake_conductor_api.update_ntp_config.assert_called_once() + + def test_patch_exceeds_max_ntpservers(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': self.patch_value_exceeds_max, + 'op': 'replace'}, + {'path': self.patch_path_action, + 'value': 'apply', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Maximum NTP servers supported: 3 but provided: 4. Please configure a valid list of NTP servers.", + response.json["error_message"]) + + def test_patch_invalid_hostname(self): + # Update value of patchable field + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': self.patch_value_invalid_hostname, + 'op': 'replace'}, + {'path': self.patch_path_action, + 'value': 'apply', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Please configure valid hostname.", response.json["error_message"]) + + def test_patch_invalid_value(self): + # Pass a value that fails a semantic check when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': 'invalid_list', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("Please configure valid hostname.", response.json["error_message"]) + + def test_patch_empty_list(self): + # Pass a value that fails a semantic check when patched by the API + response = self.patch_json(self.get_single_url(self.patch_object.uuid), + [{'path': self.patch_path_ntpserver, + 'value': '', + 'op': 'replace'}], + headers=self.API_HEADERS, + expect_errors=True) + self.assertEqual(response.content_type, 'application/json') + + # Verify appropriate exception is raised + self.assertEqual(response.status_code, http_client.BAD_REQUEST) + self.assertIn("No NTP parameters provided.", response.json["error_message"]) + + +class ApiNTPListTestSuiteMixin(ApiNTPTestCaseMixin): + """ NTP GET operations + """ + + def setUp(self): + super(ApiNTPListTestSuiteMixin, self).setUp() + self.ntp_uuid = self.ntp.uuid + + def test_fetch_ntp_object(self): + response = self.get_json(self.API_PREFIX) + self.assertEqual(response[self.RESULT_KEY][0]['uuid'], self.ntp_uuid) + + +# ============= IPv4 environment tests ============== +# Tests NTP Api operations for a Controller (defaults to IPv4) +class PlatformIPv4ControllerApiNTPPatchTestCase(ApiNTPPatchTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiNTPListTestCase(ApiNTPListTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiNTPPostTestCase(ApiNTPPostTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiNTPDeleteTestCase(ApiNTPDeleteTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +# ============= IPv6 environment tests ============== +# Tests NTP Api operations for a Controller (defaults to IPv6) +class PlatformIPv6ControllerApiNTPPatchTestCase(ApiNTPPatchTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiNTPListTestCase(ApiNTPListTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiNTPPostTestCase(ApiNTPPostTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv6ControllerApiNTPDeleteTestCase(ApiNTPDeleteTestSuiteMixin, + dbbase.BaseIPv6Mixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py new file mode 100644 index 0000000000..a1fd2a58cf --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py @@ -0,0 +1,362 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API / oamnetwork / methods. +""" + +import mock +from six.moves import http_client + +from oslo_utils import uuidutils + +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase + + +class OAMNetworkTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): + + # API_HEADERS are a generic header passed to most API calls + API_HEADERS = {'User-Agent': 'sysinv-test'} + + # API_PREFIX is the prefix for the URL + API_PREFIX = '/iextoam' + + # RESULT_KEY is the python table key for the list of results + RESULT_KEY = 'iextoam' + + # expected_api_fields are attributes that should be populated by + # an API query + expected_api_fields = ['uuid', + 'oam_subnet', + 'oam_gateway_ip', + 'oam_floating_ip', + 'oam_c0_ip', + 'oam_c1_ip', + 'region_config', + 'oam_start_ip', + 'oam_end_ip', + 'isystem_uuid', + 'created_at', + 'updated_at'] + + # hidden_api_fields are attributes that should not be populated by + # an API query + hidden_api_fields = [] + + def setUp(self): + super(OAMNetworkTestCase, self).setUp() + + def get_single_url(self, uuid): + return '%s/%s' % (self.API_PREFIX, uuid) + + def assert_fields(self, api_object): + # check the uuid is a uuid + assert(uuidutils.is_uuid_like(api_object['uuid'])) + + # Verify that expected attributes are returned + for field in self.expected_api_fields: + self.assertIn(field, api_object) + + # Verify that hidden attributes are not returned + for field in self.hidden_api_fields: + self.assertNotIn(field, api_object) + + +class TestPost(OAMNetworkTestCase): + + def setUp(self): + super(TestPost, self).setUp() + + def test_post_not_allowed(self): + response = self.post_json(self.API_PREFIX, + {}, + headers=self.API_HEADERS, + expect_errors=True) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertIn("Operation not permitted.", + response.json['error_message']) + + +class TestDeleteMixin(OAMNetworkTestCase): + """ Tests deletion. + Typically delete APIs return NO CONTENT. + python2 and python3 libraries may return different + content_type (None, or empty json) when NO_CONTENT returned. + """ + + def setUp(self): + super(TestDeleteMixin, self).setUp() + + def test_delete_not_allowed(self): + # Delete the API object + response = self.delete(self.get_single_url(self.oam.uuid), + headers=self.API_HEADERS, + expect_errors=True) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.FORBIDDEN) + self.assertIn("Operation not permitted.", + response.json['error_message']) + + +class TestListMixin(OAMNetworkTestCase): + + def setUp(self): + super(TestListMixin, self).setUp() + + def test_get(self): + response = self.get_json(self.get_single_url(self.oam.uuid), + headers=self.API_HEADERS) + + # Verify the expected API response + self.assertEqual(response['oam_start_ip'], + str(self.oam_subnet[2])) + + def test_list(self): + response = self.get_json(self.get_single_url(""), + headers=self.API_HEADERS) + # Verify the expected API response + self.assertEqual(response['iextoams'][0]['oam_start_ip'], + str(self.oam_subnet[2])) + + +class TestPatchMixin(OAMNetworkTestCase): + + def setUp(self): + super(TestPatchMixin, self).setUp() + + def _test_patch_success(self, patch_obj): + # Patch the API object + m = mock.Mock() + update_oam_config = \ + "sysinv.conductor.rpcapi.ConductorAPI.update_oam_config" + with mock.patch(update_oam_config, m.update_oam_config): + response = self.patch_dict_json(self.get_single_url(self.oam.uuid), + headers=self.API_HEADERS, **patch_obj) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, http_client.OK) + m.update_oam_config.assert_called_once() + + def _test_patch_fail(self, patch_obj, status_code, error_message): + # Patch the API object + m = mock.Mock() + update_oam_config = \ + "sysinv.conductor.rpcapi.ConductorAPI.update_oam_config" + with mock.patch(update_oam_config, m.update_oam_config): + response = self.patch_dict_json(self.get_single_url(self.oam.uuid), + headers=self.API_HEADERS, + expect_errors=True, + **patch_obj) + + # Verify the expected API response + self.assertEqual(response.content_type, 'application/json') + self.assertEqual(response.status_code, status_code) + self.assertIn(error_message, response.json['error_message']) + m.update_oam_config.assert_not_called() + + def test_patch_same_address(self): + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[4] + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + self._test_patch_success(patch_obj) + + def test_patch_new_address(self): + oam_floating_ip = self.oam_subnet[2] + 100 + oam_c0_ip = self.oam_subnet[3] + 100 + oam_c1_ip = self.oam_subnet[4] + 100 + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + self._test_patch_success(patch_obj) + + def test_patch_new_address_in_range(self): + oam_start_ip = self.oam_subnet[1] + oam_end_ip = self.oam_subnet[128] + oam_floating_ip = self.oam_subnet[2] + 100 + oam_c0_ip = self.oam_subnet[3] + 100 + oam_c1_ip = self.oam_subnet[4] + 100 + patch_obj = { + 'oam_start_ip': str(oam_start_ip), + 'oam_end_ip': str(oam_end_ip), + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + self._test_patch_success(patch_obj) + + def test_patch_incomplete(self): + oam_floating_ip = self.oam_subnet[2] + 100 + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + } + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + "Invalid address None") + + def test_patch_change_family(self): + oam_floating_ip = self.change_family_oam_subnet[2] + oam_c0_ip = self.change_family_oam_subnet[3] + oam_c1_ip = self.change_family_oam_subnet[4] + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + "Invalid IP version") + + def test_patch_duplicate_address(self): + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[3] + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + "must be unique") + + def test_patch_oam_floating_ip_out_of_subnet(self): + oam_floating_ip = self.oam_subnet[2] - 100 + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[4] + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + error_message = "IP Address %s is not in subnet" % str(oam_floating_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + def test_patch_oam_c0_ip_out_of_subnet(self): + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] - 100 + oam_c1_ip = self.oam_subnet[4] + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + error_message = "IP Address %s is not in subnet" % str(oam_c0_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + def test_patch_oam_c1_ip_out_of_subnet(self): + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[4] - 100 + patch_obj = { + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + error_message = "IP Address %s is not in subnet" % str(oam_c1_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + def test_patch_oam_floating_ip_out_of_range(self): + oam_start_ip = self.oam_subnet[1] + oam_end_ip = self.oam_subnet[32] + oam_floating_ip = self.oam_subnet[2] + 100 + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[4] + patch_obj = { + 'oam_start_ip': str(oam_start_ip), + 'oam_end_ip': str(oam_end_ip), + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + } + error_message = ("Invalid oam_floating_ip=%s. Please configure a valid" + " IP address in range") % str(oam_floating_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + def test_patch_oam_c0_ip_out_of_range(self): + oam_start_ip = self.oam_subnet[1] + oam_end_ip = self.oam_subnet[32] + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] + 100 + oam_c1_ip = self.oam_subnet[4] + patch_obj = { + 'oam_start_ip': str(oam_start_ip), + 'oam_end_ip': str(oam_end_ip), + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + + } + error_message = ("Invalid oam_c0_ip=%s. Please configure a valid" + " IP address in range") % str(oam_c0_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + def test_patch_oam_c1_ip_out_of_range(self): + oam_start_ip = self.oam_subnet[1] + oam_end_ip = self.oam_subnet[32] + oam_floating_ip = self.oam_subnet[2] + oam_c0_ip = self.oam_subnet[3] + oam_c1_ip = self.oam_subnet[4] + 100 + patch_obj = { + 'oam_start_ip': str(oam_start_ip), + 'oam_end_ip': str(oam_end_ip), + 'oam_floating_ip': str(oam_floating_ip), + 'oam_c0_ip': str(oam_c0_ip), + 'oam_c1_ip': str(oam_c1_ip), + + } + error_message = ("Invalid oam_c1_ip=%s. Please configure a valid" + " IP address in range") % str(oam_c1_ip) + self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, + error_message) + + +class IPv4TestDelete(TestDeleteMixin, + OAMNetworkTestCase): + pass + + +class IPv6TestDelete(TestDeleteMixin, + dbbase.BaseIPv6Mixin, + OAMNetworkTestCase): + pass + + +class IPv4TestList(TestListMixin, + OAMNetworkTestCase): + pass + + +class IPv6TestList(TestListMixin, + dbbase.BaseIPv6Mixin, + OAMNetworkTestCase): + pass + + +class IPv4TestPatch(TestListMixin, + OAMNetworkTestCase): + pass + + +class IPv6TestPatch(TestPatchMixin, + dbbase.BaseIPv6Mixin, + OAMNetworkTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_pv.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_pv.py index 6130ba7838..d397a8a90a 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_pv.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_pv.py @@ -8,26 +8,18 @@ Tests for the API / pv / methods. """ -import mock -import webtest.app from six.moves import http_client +import webtest.app from oslo_utils import uuidutils from sysinv.common import constants from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase from sysinv.tests.db import utils as dbutils -class FakeConductorAPI(object): - - def __init__(self, dbapi): - self.dbapi = dbapi - self.create_controller_filesystems = mock.MagicMock() - - -class TestPV(base.FunctionalTest): - +class ApiPVTestCaseMixin(object): # can perform API operations on this object at a sublevel of host HOST_PREFIX = '/ihosts' @@ -73,42 +65,11 @@ class TestPV(base.FunctionalTest): hidden_api_fields = ['forihostid'] def setUp(self): - super(TestPV, self).setUp() + super(ApiPVTestCaseMixin, self).setUp() - # Mock the conductor API - self.fake_conductor_api = FakeConductorAPI(self.dbapi) - p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') - self.mock_conductor_api = p.start() - self.mock_conductor_api.return_value = self.fake_conductor_api - self.addCleanup(p.stop) - - # Behave as if the API is running on controller-0 - p = mock.patch('socket.gethostname') - self.mock_socket_gethostname = p.start() - self.mock_socket_gethostname.return_value = 'controller-0' - self.addCleanup(p.stop) - - # Behave as if running on a virtual system - p = mock.patch('sysinv.common.utils.is_virtual') - self.mock_utils_is_virtual = p.start() - self.mock_utils_is_virtual.return_value = True - self.addCleanup(p.stop) - - # Create an isystem and load - self.system = dbutils.create_test_isystem( - capabilities={"cinder_backend": constants.CINDER_BACKEND_CEPH, - "vswitch_type": constants.VSWITCH_TYPE_NONE, - "region_config": False, - "sdn_enabled": False, - "shared_services": "[]"} - ) - self.load = dbutils.create_test_load() - # Create controller-0 - self.ihost = self._create_controller_0() - # Create disk on the controller - self.disk = self._create_disk(self.ihost.id) + self.disk = self.disks.get(self.host.id) # Create logical volume group - self.lvg = self._create_lvg(self.ihost.id, + self.lvg = self._create_lvg(self.host.id, self.lvm_vg_name) def get_single_url(self, uuid): @@ -156,17 +117,9 @@ class TestPV(base.FunctionalTest): for field in self.hidden_api_fields: self.assertNotIn(field, api_object) - def get_post_object(self): - return dbutils.post_get_test_pv(forihostid=self.ihost.id, - forilvgid=self.lvg.id, - idisk_id=self.disk.id, - idisk_uuid=self.disk.uuid, - lvm_vg_name=self.lvm_vg_name, - disk_or_part_uuid=self.disk.uuid) - def _create_db_object(self, obj_id=None): return dbutils.create_test_pv(id=obj_id, - forihostid=self.ihost.id, + forihostid=self.host.id, forilvgid=self.lvg.id, idisk_id=self.disk.id, idisk_uuid=self.disk.uuid, @@ -174,10 +127,22 @@ class TestPV(base.FunctionalTest): disk_or_part_uuid=self.disk.uuid) -class TestPostPV(TestPV): +class ApiPVPostTestSuiteMixin(ApiPVTestCaseMixin): def setUp(self): - super(TestPostPV, self).setUp() + super(ApiPVPostTestSuiteMixin, self).setUp() + disk_1_path = '/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0' + self.disk_1 = dbutils.create_test_idisk(device_node='/dev/sdb', + device_path=disk_1_path, + forihostid=self.host.id) + + def get_post_object(self): + return dbutils.post_get_test_pv(forihostid=self.host.id, + forilvgid=self.lvg.id, + idisk_id=self.disk_1.id, + idisk_uuid=self.disk_1.uuid, + lvm_vg_name=self.lvm_vg_name, + disk_or_part_uuid=self.disk_1.uuid) def test_create_success(self): # Test creation of object @@ -231,7 +196,7 @@ class TestPostPV(TestPV): headers=self.API_HEADERS) -class TestDeletePV(TestPV): +class ApiPVDeleteTestSuiteMixin(ApiPVTestCaseMixin): """ Tests deletion. Typically delete APIs return NO CONTENT. python2 and python3 libraries may return different @@ -239,8 +204,7 @@ class TestDeletePV(TestPV): """ def setUp(self): - super(TestDeletePV, self).setUp() - # create a partition + super(ApiPVDeleteTestSuiteMixin, self).setUp() self.delete_object = self._create_db_object() # The PV delete is not a blocking operation. @@ -256,12 +220,8 @@ class TestDeletePV(TestPV): self.assertEqual(response.status_code, http_client.NO_CONTENT) -class TestListPVs(TestPV): - """ PV list operations - """ - - def setUp(self): - super(TestListPVs, self).setUp() +class ApiPVListTestSuiteMixin(ApiPVTestCaseMixin): + """ list operations """ def test_empty_list(self): response = self.get_json(self.API_PREFIX) @@ -278,7 +238,7 @@ class TestListPVs(TestPV): self.single_object = self._create_db_object() # Querying the URL scoped by host - response = self.get_json(self.get_host_scoped_url(self.ihost.uuid)) + response = self.get_json(self.get_host_scoped_url(self.host.uuid)) self.assertEqual(1, len(response[self.RESULT_KEY])) # Check the single result @@ -292,7 +252,7 @@ class TestListPVs(TestPV): loop_object = self._create_db_object(obj_id=obj_id) result_list.append(loop_object['uuid']) - response = self.get_json(self.get_host_scoped_url(self.ihost.uuid)) + response = self.get_json(self.get_host_scoped_url(self.host.uuid)) self.assertEqual(len(result_list), len(response[self.RESULT_KEY])) # Verify that the sorted list of uuids is the same @@ -300,13 +260,14 @@ class TestListPVs(TestPV): self.assertEqual(result_list.sort(), uuids.sort()) -class TestPatchPV(TestPV): +class ApiPVPatchTestSuiteMixin(ApiPVTestCaseMixin): + """ patch operations """ patch_path = '/lvm_pe_alloced' patch_field = 'lvm_pe_alloced' patch_value = 2 def setUp(self): - super(TestPatchPV, self).setUp() + super(ApiPVPatchTestSuiteMixin, self).setUp() self.patch_object = self._create_db_object() def test_patch_invalid_field(self): @@ -347,3 +308,29 @@ class TestPatchPV(TestPV): expect_errors=True) self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.status_code, http_client.BAD_REQUEST) + + +# ============= IPv4 environment tests ============== +# Tests PV Api operations for a Controller (defaults to IPv4) +class PlatformIPv4ControllerApiPVPatchTestCase(ApiPVPatchTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiPVListTestCase(ApiPVListTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiPVPostTestCase(ApiPVPostTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass + + +class PlatformIPv4ControllerApiPVDeleteTestCase(ApiPVDeleteTestSuiteMixin, + base.FunctionalTest, + dbbase.ControllerHostTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py index f3057e2b63..442a732dd7 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py @@ -759,12 +759,12 @@ class StorageTierDependentTCs(base.FunctionalTest): mock.patch.object(rpcapi.ConductorAPI, 'configure_osd_istor')) as ( mock_mon_status, mock_backend_configured, mock_osd): - def fake_configure_osd_istor(context, istor_obj): + def fake_configure_osd_istor_1(context, istor_obj): istor_obj['osdid'] = 1 return istor_obj mock_mon_status.return_value = [3, 2, ['controller-0', 'controller-1', 'storage-0']] - mock_osd.side_effect = fake_configure_osd_istor + mock_osd.side_effect = fake_configure_osd_istor_1 response = self.post_json('/istors', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) @@ -783,12 +783,12 @@ class StorageTierDependentTCs(base.FunctionalTest): mock.patch.object(rpcapi.ConductorAPI, 'configure_osd_istor')) as ( mock_mon_status, mock_backend_configured, mock_osd): - def fake_configure_osd_istor(context, istor_obj): + def fake_configure_osd_istor_2(context, istor_obj): istor_obj['osdid'] = 1 return istor_obj mock_mon_status.return_value = [3, 2, ['controller-0', 'controller-1', 'storage-0']] - mock_osd.side_effect = fake_configure_osd_istor + mock_osd.side_effect = fake_configure_osd_istor_2 response = self.post_json('/istors', values, expect_errors=True) self.assertEqual(http_client.OK, response.status_int) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py new file mode 100644 index 0000000000..b4539a7ecb --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py @@ -0,0 +1,93 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the API /upgrade/ methods. +""" + +import mock +from six.moves import http_client + +from sysinv.common import constants +from sysinv.common import kubernetes + +from sysinv.tests.api import base +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class FakeConductorAPI(object): + + def __init__(self): + self.start_upgrade = mock.MagicMock() + self.get_system_health_return = (True, "System is super healthy") + + def get_system_health(self, context, force=False, upgrade=False): + if force: + return True, "System is healthy because I was forced to say that" + else: + return self.get_system_health_return + + +class TestUpgrade(base.FunctionalTest, dbbase.BaseSystemTestCase): + + def setUp(self): + super(TestUpgrade, self).setUp() + + # Mock the Conductor API + self.fake_conductor_api = FakeConductorAPI() + p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI') + self.mock_conductor_api = p.start() + self.mock_conductor_api.return_value = self.fake_conductor_api + self.addCleanup(p.stop) + + # Behave as if the API is running on controller-0 + p = mock.patch('socket.gethostname') + self.mock_socket_gethostname = p.start() + self.mock_socket_gethostname.return_value = 'controller-0' + self.addCleanup(p.stop) + + +class TestPostUpgrade(TestUpgrade, dbbase.ControllerHostTestCase): + + def test_create(self): + # Create the to load + dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW, + compatible_version=dbutils.SW_VERSION, + state=constants.IMPORTED_LOAD_STATE) + + # Test creation of upgrade + create_dict = dbutils.get_test_upgrade() + result = self.post_json('/upgrade', create_dict, + headers={'User-Agent': 'sysinv-test'}) + + # Verify that the upgrade was started + self.fake_conductor_api.start_upgrade.assert_called_once() + + # Verify that the upgrade has the expected attributes + self.assertEqual(result.json['from_release'], dbutils.SW_VERSION) + self.assertEqual(result.json['to_release'], dbutils.SW_VERSION_NEW) + self.assertEqual(result.json['state'], constants.UPGRADE_STARTING) + + def test_create_kube_upgrade_exists(self): + # Test creation of upgrade when a kubernetes upgrade exists + dbutils.create_test_kube_upgrade( + from_version='v1.42.1', + to_version='v1.42.2', + state=kubernetes.KUBE_UPGRADING_FIRST_MASTER, + ) + + # Test creation of upgrade + create_dict = dbutils.get_test_upgrade() + result = self.post_json('/upgrade', create_dict, + headers={'User-Agent': 'sysinv-test'}, + expect_errors=True) + + # Verify the failure + self.assertEqual(result.content_type, 'application/json') + self.assertEqual(http_client.BAD_REQUEST, result.status_int) + self.assertIn("cannot be done while a kubernetes upgrade", + result.json['error_message']) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/base.py b/sysinv/sysinv/sysinv/sysinv/tests/base.py index f1532c9784..3449666ee2 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/base.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/base.py @@ -43,7 +43,6 @@ from sysinv.db import api as dbapi from sysinv.db import migration import sysinv.helm.utils from sysinv.objects import base as objects_base -from sysinv.openstack.common.fixture import moxstubout from sysinv.tests import conf_fixture from sysinv.tests import policy_fixture @@ -179,9 +178,6 @@ class TestCase(testtools.TestCase): objects_base.SysinvObject._obj_classes) self.addCleanup(self._restore_obj_registry) - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = mox_fixture.mox - self.stubs = mox_fixture.stubs self.addCleanup(self._clear_attrs) self.useFixture(fixtures.EnvironmentVariable('http_proxy')) self.policy = self.useFixture(policy_fixture.PolicyFixture()) @@ -229,9 +225,6 @@ class TestCase(testtools.TestCase): Use the monkey patch fixture to replace a function for the duration of a test. Useful when you want to provide fake methods instead of mocks during testing. - - This should be used instead of self.stubs.Set (which is based - on mox) going forward. """ self.useFixture(fixtures.MonkeyPatch(old, new)) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/common/test_health.py b/sysinv/sysinv/sysinv/sysinv/tests/common/test_health.py new file mode 100644 index 0000000000..10f478fada --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/common/test_health.py @@ -0,0 +1,377 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +""" +Tests for the health utilities. +""" + +import kubernetes +import mock +import uuid + +from sysinv.common import constants +from sysinv.common import health +from sysinv.openstack.common import context + +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils + + +class TestHealth(dbbase.BaseHostTestCase): + + def setup_result(self): + + self.patch_current_result = { + 'data': [ + {'hostname': 'controller-0', + 'patch_current': True, + }, + {'hostname': 'controller-1', + 'patch_current': True, + } + ] + } + + self.multi_node_result = [ + kubernetes.client.V1Node( + api_version="v1", + kind="Node", + metadata=kubernetes.client.V1ObjectMeta( + name="controller-0", + namespace="test-namespace-1"), + status=kubernetes.client.V1NodeStatus( + conditions=[ + kubernetes.client.V1NodeCondition( + status="False", + type="NetworkUnavailable"), + kubernetes.client.V1NodeCondition( + status="False", + type="MemoryPressure"), + kubernetes.client.V1NodeCondition( + status="False", + type="DiskPressure"), + kubernetes.client.V1NodeCondition( + status="False", + type="PIDPressure"), + kubernetes.client.V1NodeCondition( + status="True", + type="Ready"), + ], + node_info=kubernetes.client.V1NodeSystemInfo( + architecture="fake-architecture", + boot_id="fake-boot-id", + container_runtime_version="fake-cr-version", + kernel_version="fake-kernel-version", + kube_proxy_version="fake-proxy-version", + kubelet_version="v1.42.4", + machine_id="fake-machine-id", + operating_system="fake-os", + os_image="fake-os-image", + system_uuid="fake-system-uuid")) + ), + kubernetes.client.V1Node( + api_version="v1", + kind="Node", + metadata=kubernetes.client.V1ObjectMeta( + name="controller-1", + namespace="test-namespace-1"), + status=kubernetes.client.V1NodeStatus( + conditions=[ + kubernetes.client.V1NodeCondition( + status="False", + type="NetworkUnavailable"), + kubernetes.client.V1NodeCondition( + status="False", + type="MemoryPressure"), + kubernetes.client.V1NodeCondition( + status="False", + type="DiskPressure"), + kubernetes.client.V1NodeCondition( + status="False", + type="PIDPressure"), + kubernetes.client.V1NodeCondition( + status="True", + type="Ready"), + ], + node_info=kubernetes.client.V1NodeSystemInfo( + architecture="fake-architecture", + boot_id="fake-boot-id", + container_runtime_version="fake-cr-version", + kernel_version="fake-kernel-version", + kube_proxy_version="fake-proxy-version", + kubelet_version="v1.42.3", + machine_id="fake-machine-id", + operating_system="fake-os", + os_image="fake-os-image", + system_uuid="fake-system-uuid")) + ), + ] + + self.cp_pod_ready_status_result = { + 'kube-apiserver-controller-0': 'True', + 'kube-controller-manager-controller-0': 'True', + 'kube-scheduler-controller-0': 'True', + 'kube-apiserver-controller-1': 'True', + 'kube-controller-manager-controller-1': 'True', + 'kube-scheduler-controller-1': 'True', + } + + def setUp(self): + super(TestHealth, self).setUp() + + # Mock the patching API + self.mock_patch_query_hosts_result = None + + def mock_patch_query_hosts(token, timeout, region_name): + return self.mock_patch_query_hosts_result + self.mocked_patch_query_hosts = mock.patch( + 'sysinv.api.controllers.v1.patch_api.patch_query_hosts', + mock_patch_query_hosts) + self.mocked_patch_query_hosts.start() + self.addCleanup(self.mocked_patch_query_hosts.stop) + + # Mock the KubeOperator + self.kube_get_nodes_result = None + + def mock_kube_get_nodes(obj): + return self.kube_get_nodes_result + self.mocked_kube_get_nodes = mock.patch( + 'sysinv.common.kubernetes.KubeOperator.kube_get_nodes', + mock_kube_get_nodes) + self.mocked_kube_get_nodes.start() + self.addCleanup(self.mocked_kube_get_nodes.stop) + + self.kube_get_control_plane_pod_ready_status_result = None + + def mock_kube_get_control_plane_pod_ready_status(obj): + return self.kube_get_control_plane_pod_ready_status_result + self.mocked_kube_get_control_plane_pod_ready_status = mock.patch( + 'sysinv.common.kubernetes.KubeOperator.' + 'kube_get_control_plane_pod_ready_status', + mock_kube_get_control_plane_pod_ready_status) + self.mocked_kube_get_control_plane_pod_ready_status.start() + self.addCleanup( + self.mocked_kube_get_control_plane_pod_ready_status.stop) + + # Mock the fm API + p = mock.patch('sysinv.common.health.fmclient') + self.mock_fm_client_alarm_list = p.start() + self.addCleanup(p.stop) + + # Set up objects for testing + self.context = context.get_admin_context() + self.health = health.Health(self.dbapi) + + # Set up results + self.setup_result() + + def tearDown(self): + super(TestHealth, self).tearDown() + + pass + + def test_get_system_health(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_host(personality=constants.CONTROLLER, + unit=0, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + self._create_test_host(personality=constants.CONTROLLER, + unit=1, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Set up the mocked results + self.mock_patch_query_hosts_result = self.patch_current_result + self.kube_get_nodes_result = self.multi_node_result + self.kube_get_control_plane_pod_ready_status_result = \ + self.cp_pod_ready_status_result + + # Check system health + health_ok, output = self.health.get_system_health(self.context) + assert health_ok is True, "output: %s" % output + + def test_get_system_health_k8s_node_not_ready(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_host(personality=constants.CONTROLLER, + unit=0, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + self._create_test_host(personality=constants.CONTROLLER, + unit=1, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Set up the mocked results + self.mock_patch_query_hosts_result = self.patch_current_result + self.kube_get_nodes_result = self.multi_node_result + # Mark controller-0 as not ready + self.kube_get_nodes_result[0].status.conditions[4].status = "False" + self.kube_get_control_plane_pod_ready_status_result = \ + self.cp_pod_ready_status_result + + # Check system health + health_ok, output = self.health.get_system_health(self.context) + assert health_ok is False, "output: %s" % output + assert "Kubernetes nodes not ready: controller-0" in output, \ + "get_system_health output: %s" % output + + def test_get_system_health_k8s_cp_pod_not_ready(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_host(personality=constants.CONTROLLER, + unit=0, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + self._create_test_host(personality=constants.CONTROLLER, + unit=1, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Set up the mocked results + self.mock_patch_query_hosts_result = self.patch_current_result + self.kube_get_nodes_result = self.multi_node_result + self.kube_get_control_plane_pod_ready_status_result = \ + self.cp_pod_ready_status_result + # Mark a cp pod as not ready + self.kube_get_control_plane_pod_ready_status_result[ + 'kube-controller-manager-controller-1'] = 'False' + + # Check system health + health_ok, output = self.health.get_system_health(self.context) + assert health_ok is False, "get_system_health output: %s" % output + assert "kubernetes control plane pods are ready: [Fail]" in output, \ + "output: %s" % output + assert "not ready: kube-controller-manager-controller-1" in output, \ + "output: %s" % output + + def test_get_system_health_kube_upgrade(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_host(personality=constants.CONTROLLER, + unit=0, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + self._create_test_host(personality=constants.CONTROLLER, + unit=1, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create kubernetes apps + dbutils.create_test_app(name='test-app-1', + status=constants.APP_APPLY_SUCCESS) + dbutils.create_test_app(name='test-app-2', + status=constants.APP_APPLY_SUCCESS) + dbutils.create_test_app(name='test-app-3', + status=constants.APP_UPLOAD_SUCCESS) + + # Set up the mocked results + self.mock_patch_query_hosts_result = self.patch_current_result + self.kube_get_nodes_result = self.multi_node_result + self.kube_get_control_plane_pod_ready_status_result = \ + self.cp_pod_ready_status_result + + # Check system health + health_ok, output = self.health.get_system_health_kube_upgrade( + self.context) + assert health_ok is True, "output: %s" % output + + def test_get_system_health_kube_upgrade_k8s_app_invalid_state(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_host(personality=constants.CONTROLLER, + unit=0, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create controller-1 + self._create_test_host(personality=constants.CONTROLLER, + unit=1, + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE) + + # Create kubernetes apps + dbutils.create_test_app(name='test-app-1', + status=constants.APP_APPLY_SUCCESS) + dbutils.create_test_app(name='test-app-2', + status=constants.APP_APPLY_IN_PROGRESS) + dbutils.create_test_app(name='test-app-3', + status=constants.APP_UPLOAD_SUCCESS) + + # Set up the mocked results + self.mock_patch_query_hosts_result = self.patch_current_result + self.kube_get_nodes_result = self.multi_node_result + self.kube_get_control_plane_pod_ready_status_result = \ + self.cp_pod_ready_status_result + + # Check system health + health_ok, output = self.health.get_system_health_kube_upgrade( + self.context) + assert health_ok is False, "output: %s" % output + assert "applications are in a valid state: [Fail]" in output, \ + "output: %s" % output + assert "applications not in a valid state: test-app-2" in output, \ + "output: %s" % output diff --git a/sysinv/sysinv/sysinv/sysinv/tests/common/test_kubernetes.py b/sysinv/sysinv/sysinv/sysinv/tests/common/test_kubernetes.py index 9d763b22d2..2614295c5c 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/common/test_kubernetes.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/common/test_kubernetes.py @@ -54,6 +54,23 @@ FAKE_KUBE_VERSIONS = [ }, ] +FAKE_POD_STATUS = kubernetes.client.V1PodStatus( + conditions=[ + kubernetes.client.V1PodCondition( + status="True", + type="Initialized"), + kubernetes.client.V1PodCondition( + status="True", + type="Ready"), + kubernetes.client.V1PodCondition( + status="True", + type="ContainersReady"), + kubernetes.client.V1PodCondition( + status="True", + type="PodScheduled"), + ], +) + def mock_get_kube_versions(): return FAKE_KUBE_VERSIONS @@ -193,6 +210,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-apiserver-test-node-1", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -213,6 +231,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-controller-manager-test-node-1", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -233,6 +252,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-scheduler-test-node-1", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -253,6 +273,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-apiserver-test-node-2", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -273,6 +294,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-controller-manager-test-node-2", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -293,6 +315,7 @@ class TestKubeOperator(base.TestCase): metadata=kubernetes.client.V1ObjectMeta( name="kube-scheduler-test-node-2", namespace="kube-system"), + status=FAKE_POD_STATUS, spec=kubernetes.client.V1PodSpec( containers=[ kubernetes.client.V1Container( @@ -398,10 +421,106 @@ class TestKubeOperator(base.TestCase): ), } - self.single_node_result = kubernetes.client.V1NodeList( + self.cp_pods_list_result = kubernetes.client.V1PodList( api_version="v1", items=[ kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-apiserver-test-node-1", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-apiserver", + image="test-image-1:v1.42.1"), + ], + ), + ), + kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-controller-manager-test-node-1", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-controller-manager", + image="test-image-2:v1.42.1"), + ], + ), + ), + kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-scheduler-test-node-1", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-scheduler", + image="test-image-3:v1.42.1"), + ], + ), + ), + kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-apiserver-test-node-2", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-apiserver", + image="test-image-1:v1.42.1"), + ], + ), + ), + kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-controller-manager-test-node-2", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-controller-manager", + image="test-image-2:v1.42.1"), + ], + ), + ), + kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="kube-scheduler-test-node-2", + namespace="kube-system"), + status=FAKE_POD_STATUS, + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="kube-scheduler", + image="test-image-3:v1.42.1"), + ], + ), + ), + ], + ) + + self.single_node_result = kubernetes.client.V1NodeList( + api_version="v1", + items=[ + kubernetes.client.V1Node( api_version="v1", kind="Node", metadata=kubernetes.client.V1ObjectMeta( @@ -426,7 +545,7 @@ class TestKubeOperator(base.TestCase): self.multi_node_result = kubernetes.client.V1NodeList( api_version="v1", items=[ - kubernetes.client.V1Pod( + kubernetes.client.V1Node( api_version="v1", kind="Node", metadata=kubernetes.client.V1ObjectMeta( @@ -445,7 +564,7 @@ class TestKubeOperator(base.TestCase): os_image="fake-os-image", system_uuid="fake-system-uuid")) ), - kubernetes.client.V1Pod( + kubernetes.client.V1Node( api_version="v1", kind="Node", metadata=kubernetes.client.V1ObjectMeta( @@ -470,13 +589,13 @@ class TestKubeOperator(base.TestCase): self.config_map_result = kubernetes.client.V1ConfigMap( api_version="v1", data={"ClusterConfiguration": - "apiServer:\n" - " certSANs:\n" - " - 127.0.0.1\n" - " - 192.168.206.2\n" - "apiVersion: kubeadm.k8s.io/v1beta2\n" - "kubernetesVersion: v1.42.4\n" - "kind: ClusterStatus\n" + "apiServer:\n" + " certSANs:\n" + " - 127.0.0.1\n" + " - 192.168.206.2\n" + "apiVersion: kubeadm.k8s.io/v1beta2\n" + "kubernetesVersion: v1.42.4\n" + "kind: ClusterStatus\n" }, metadata=kubernetes.client.V1ObjectMeta( name="kubeadm-config", @@ -486,12 +605,12 @@ class TestKubeOperator(base.TestCase): self.config_map_result_no_version = kubernetes.client.V1ConfigMap( api_version="v1", data={"ClusterConfiguration": - "apiServer:\n" - " certSANs:\n" - " - 127.0.0.1\n" - " - 192.168.206.2\n" - "apiVersion: kubeadm.k8s.io/v1beta2\n" - "kind: ClusterStatus\n" + "apiServer:\n" + " certSANs:\n" + " - 127.0.0.1\n" + " - 192.168.206.2\n" + "apiVersion: kubeadm.k8s.io/v1beta2\n" + "kind: ClusterStatus\n" }, metadata=kubernetes.client.V1ObjectMeta( name="kubeadm-config", @@ -513,6 +632,15 @@ class TestKubeOperator(base.TestCase): mock_list_namespaced_pod) self.mocked_list_namespaced_pod.start() + self.list_pod_for_all_namespaces_result = None + + def mock_list_pod_for_all_namespaces(obj, label_selector=""): + return self.list_pod_for_all_namespaces_result + self.mocked_list_pod_for_all_namespaces = mock.patch( + 'kubernetes.client.CoreV1Api.list_pod_for_all_namespaces', + mock_list_pod_for_all_namespaces) + self.mocked_list_pod_for_all_namespaces.start() + self.list_node_result = None def mock_list_node(obj, label_selector=""): @@ -537,7 +665,9 @@ class TestKubeOperator(base.TestCase): super(TestKubeOperator, self).tearDown() self.mocked_list_namespaced_pod.stop() + self.mocked_list_pod_for_all_namespaces.stop() self.mocked_list_node.stop() + self.mocked_read_namespaced_config_map.stop() def test_kube_get_image_by_pod_name(self): @@ -563,6 +693,47 @@ class TestKubeOperator(base.TestCase): 'test-pod-1', 'test-namespace-1', 'test-container-1') assert result == "test-image-1:imageversion-1" + def test_kube_get_control_plane_pod_ready_status(self): + + self.list_pod_for_all_namespaces_result = self.cp_pods_list_result + self.list_node_result = self.multi_node_result + + result = self.kube_operator.kube_get_control_plane_pod_ready_status() + assert result == {'kube-apiserver-test-node-1': 'True', + 'kube-controller-manager-test-node-1': 'True', + 'kube-scheduler-test-node-1': 'True', + 'kube-apiserver-test-node-2': 'True', + 'kube-controller-manager-test-node-2': 'True', + 'kube-scheduler-test-node-2': 'True'} + + def test_kube_get_control_plane_pod_ready_status_single_node(self): + + self.list_pod_for_all_namespaces_result = self.cp_pods_list_result + del self.cp_pods_list_result.items[5] + del self.cp_pods_list_result.items[4] + del self.cp_pods_list_result.items[3] + self.list_node_result = self.single_node_result + + result = self.kube_operator.kube_get_control_plane_pod_ready_status() + assert result == {'kube-apiserver-test-node-1': 'True', + 'kube-controller-manager-test-node-1': 'True', + 'kube-scheduler-test-node-1': 'True'} + + def test_kube_get_control_plane_pod_ready_status_missing_pods(self): + + self.list_pod_for_all_namespaces_result = self.cp_pods_list_result + del self.cp_pods_list_result.items[5] + del self.cp_pods_list_result.items[1] + self.list_node_result = self.multi_node_result + + result = self.kube_operator.kube_get_control_plane_pod_ready_status() + assert result == {'kube-apiserver-test-node-1': 'True', + 'kube-controller-manager-test-node-1': None, + 'kube-scheduler-test-node-1': 'True', + 'kube-apiserver-test-node-2': 'True', + 'kube-controller-manager-test-node-2': 'True', + 'kube-scheduler-test-node-2': None} + def test_kube_get_control_plane_versions(self): self.list_namespaced_pod_result = self.cp_pods_result diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_app_operator.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_app_operator.py new file mode 100644 index 0000000000..a103ceea4a --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_kube_app_app_operator.py @@ -0,0 +1,111 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2020 Intel Corporation +# + +"""Test class for Sysinv kube_app AppOperator.""" + +import fixtures + +from sysinv.common import constants +from sysinv.conductor import kube_app +from sysinv.db import api as dbapi +from sysinv.openstack.common import context +from sysinv.objects import kube_app as obj_app + +from sysinv.tests.db import base +from sysinv.tests.db import utils as dbutils + + +class AppOperatorTestCase(base.DbTestCase): + + def setUp(self): + super(AppOperatorTestCase, self).setUp() + + # Set up objects for testing + self.app_operator = kube_app.AppOperator(dbapi.get_instance()) + self.context = context.get_admin_context() + self.dbapi = dbapi.get_instance() + self.temp_dir = self.useFixture(fixtures.TempDir()) + + def test_activate(self): + # Create kubernetes apps + dbutils.create_test_app(name='test-app-1', + active=True) + test_app_1 = obj_app.get_by_name(self.context, 'test-app-1') + self.assertEqual(test_app_1.active, True) + res = self.app_operator.activate(test_app_1) + # check was_active + self.assertEqual(res, True) + # check current active + self.assertEqual(test_app_1.active, True) + + dbutils.create_test_app(name='test-app-2', + active=False) + test_app_2 = obj_app.get_by_name(self.context, 'test-app-2') + self.assertEqual(test_app_2.active, False) + res = self.app_operator.activate(test_app_2) + # check was_active + self.assertEqual(res, False) + # check current active + self.assertEqual(test_app_2.active, True) + + def test_deactivate(self): + # Create kubernetes apps + dbutils.create_test_app(name='test-app-1', + active=True) + test_app_1 = obj_app.get_by_name(self.context, 'test-app-1') + self.assertEqual(test_app_1.active, True) + res = self.app_operator.deactivate(test_app_1) + # check was_active + self.assertEqual(res, True) + # check current active + self.assertEqual(test_app_1.active, False) + + dbutils.create_test_app(name='test-app-2', + active=False) + test_app_2 = obj_app.get_by_name(self.context, 'test-app-2') + self.assertEqual(test_app_2.active, False) + res = self.app_operator.deactivate(test_app_2) + # check was_active + self.assertEqual(res, False) + # check current active + self.assertEqual(test_app_2.active, False) + + def test_get_appname(self): + test_app_name = 'test-app-1' + dbutils.create_test_app(name=test_app_name, + status=constants.APP_APPLY_SUCCESS) + test_app_1 = obj_app.get_by_name(self.context, 'test-app-1') + app_name = self.app_operator.get_appname(test_app_1) + self.assertEqual(test_app_name, app_name) + + def test_is_app_active(self): + dbutils.create_test_app(name='test-app-1', + active=True) + test_app_1 = obj_app.get_by_name(self.context, 'test-app-1') + self.app_operator.activate(test_app_1) + is_active = self.app_operator.is_app_active(test_app_1) + self.assertEqual(is_active, True) + self.app_operator.deactivate(test_app_1) + is_active = self.app_operator.is_app_active(test_app_1) + self.assertEqual(is_active, False) + + def test_reapply(self): + dbutils.create_test_app(name='test-app-1', + active=True) + constants.APP_PENDING_REAPPLY_FLAG = self.temp_dir.path + "/.app_reapply" + self.app_operator.set_reapply('test-app-1') + result = self.app_operator.needs_reapply('test-app-1') + self.assertEqual(result, True) + self.app_operator.clear_reapply('test-app-1') + result = self.app_operator.needs_reapply('test-app-1') + self.assertEqual(result, False) + + def test_is_app_aborted(self): + self.app_operator.abort_requested["test_app"] = True + res = self.app_operator.is_app_aborted("test_app") + self.assertEqual(res, True) + res = self.app_operator.is_app_aborted("test_app_123") + self.assertEqual(res, False) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py index dd4e44e9a4..a086698c9f 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py @@ -23,11 +23,13 @@ """Test class for Sysinv ManagerService.""" import mock +import os.path import uuid from sysinv.common import constants from sysinv.common import exception from sysinv.common import kubernetes +from sysinv.common import utils as cutils from sysinv.conductor import manager from sysinv.db import api as dbapi from sysinv.openstack.common import context @@ -97,16 +99,14 @@ class ManagerTestCase(base.DbTestCase): self.upgrade_downgrade_kube_components_patcher.start() self.addCleanup(self.mock_upgrade_downgrade_kube_components.stop) - self.do_update_alarm_status_patcher = mock.patch.object( - manager.ConductorManager, '_do_update_alarm_status') - self.mock_do_update_alarm_status = \ - self.do_update_alarm_status_patcher.start() - self.addCleanup(self.mock_do_update_alarm_status.stop) + self.service.fm_api = mock.Mock() + self.service.fm_api.set_fault.side_effect = self._raise_alarm + self.service.fm_api.clear_fault.side_effect = self._clear_alarm self.fail_config_apply_runtime_manifest = False def mock_config_apply_runtime_manifest(obj, context, config_uuid, - config_dict): + config_dict, force=False): if not self.fail_config_apply_runtime_manifest: # Pretend the config was applied if 'host_uuids' in config_dict: @@ -184,6 +184,16 @@ class ManagerTestCase(base.DbTestCase): self.mocked_get_kube_versions.start() self.addCleanup(self.mocked_get_kube_versions.stop) + self.service._puppet = mock.Mock() + self.service._allocate_addresses_for_host = mock.Mock() + self.service._update_pxe_config = mock.Mock() + self.service._ceph_mon_create = mock.Mock() + self.alarm_raised = False + + def tearDown(self): + super(ManagerTestCase, self).tearDown() + self.upgrade_downgrade_kube_components_patcher.stop() + def _create_test_ihost(self, **kwargs): # ensure the system ID for proper association kwargs['forisystemid'] = self.system['id'] @@ -413,6 +423,145 @@ class ManagerTestCase(base.DbTestCase): self.context, ihost) + def test_vim_host_add(self): + mock_vim_host_add = mock.MagicMock() + p = mock.patch('sysinv.api.controllers.v1.vim_api.vim_host_add', + mock_vim_host_add) + p.start().return_value = {} + self.addCleanup(p.stop) + + ret = self.service.vim_host_add(self.context, None, str(uuid.uuid4()), + "newhostname", "worker", "locked", "disabled", "offline", + "disabled", "not-installed", 10) + + mock_vim_host_add.assert_called_with(mock.ANY, mock.ANY, + "newhostname", "worker", "locked", "disabled", "offline", + "disabled", "not-installed", 10) + + self.assertEqual(ret, {}) + + def test_mtc_host_add(self): + mock_notify_mtc_and_recv = mock.MagicMock() + p = mock.patch('sysinv.common.utils.notify_mtc_and_recv', + mock_notify_mtc_and_recv) + p.start().return_value = {'status': 'pass'} + self.addCleanup(p.stop) + + ihost = {} + ihost['hostname'] = 'newhost' + ihost['personality'] = 'worker' + + self.service.mtc_host_add(self.context, "localhost", 2112, ihost) + mock_notify_mtc_and_recv.assert_called_with("localhost", 2112, ihost) + + def test_ilvg_get_nova_ilvg_by_ihost(self): + ihost = self._create_test_ihost() + lvg_dict = { + 'lvm_vg_name': constants.LVG_NOVA_LOCAL, + } + ilvg = self.dbapi.ilvg_create(ihost['id'], lvg_dict) + ret = self.service.ilvg_get_nova_ilvg_by_ihost(self.context, ihost['uuid']) + self.assertEqual(ret[0]['uuid'], ilvg['uuid']) + + def test_ilvg_get_nova_ilvg_by_ihost_no_nova_ilvg(self): + ihost = self._create_test_ihost() + ret = self.service.ilvg_get_nova_ilvg_by_ihost(self.context, ihost['uuid']) + self.assertEqual(ret, []) + + def test_platform_interfaces(self): + ihost = self._create_test_ihost() + interface = utils.create_test_interface( + ifname='mgmt', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_PLATFORM, + iftype=constants.INTERFACE_TYPE_ETHERNET) + port = utils.create_test_ethernet_port( + name='eth0', + host_id=ihost['id'], + interface_id=interface['id'], + pciaddr='0000:00:00.01', + dev_id=0) + + ret = self.service.platform_interfaces(self.context, ihost['id']) + self.assertEqual(ret[0]['name'], port['name']) + + def test_platform_interfaces_multi(self): + ihost = self._create_test_ihost() + interface_mgmt = utils.create_test_interface( + ifname='mgmt', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_PLATFORM, + iftype=constants.INTERFACE_TYPE_ETHERNET) + port_mgmt = utils.create_test_ethernet_port( + name='eth0', + host_id=ihost['id'], + interface_id=interface_mgmt['id'], + pciaddr='0000:00:00.01', + dev_id=0) + + interface_oam = utils.create_test_interface( + ifname='oam', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_PLATFORM, + iftype=constants.INTERFACE_TYPE_ETHERNET) + port_oam = utils.create_test_ethernet_port( + name='eth1', + host_id=ihost['id'], + interface_id=interface_oam['id'], + pciaddr='0000:00:00.02', + dev_id=1) + + interface_data = utils.create_test_interface( + ifname='data', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_DATA, + iftype=constants.INTERFACE_TYPE_VLAN) + utils.create_test_ethernet_port( + name='eth2', + host_id=ihost['id'], + interface_id=interface_data['id'], + pciaddr='0000:00:00.03', + dev_id=2) + + ret = self.service.platform_interfaces(self.context, ihost['id']) + self.assertEqual(len(ret), 2) + self.assertEqual(ret[0]['name'], port_mgmt['name']) + self.assertEqual(ret[1]['name'], port_oam['name']) + + def test_platform_interfaces_no_port(self): + ihost = self._create_test_ihost() + utils.create_test_interface( + ifname='mgmt', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_PLATFORM, + iftype=constants.INTERFACE_TYPE_ETHERNET) + + ret = self.service.platform_interfaces(self.context, ihost['id']) + self.assertEqual(ret, []) + + def test_platform_interfaces_invalid_ihost(self): + ihost = self._create_test_ihost() + interface = utils.create_test_interface( + ifname='mgmt', + forihostid=ihost['id'], + ihost_uuid=ihost['uuid'], + ifclass=constants.INTERFACE_CLASS_PLATFORM, + iftype=constants.INTERFACE_TYPE_ETHERNET) + utils.create_test_ethernet_port( + name='eth0', + host_id=ihost['id'], + interface_id=interface['id'], + pciaddr='0000:00:00.01', + dev_id=0) + + ret = self.service.platform_interfaces(self.context, ihost['id'] + 1) + self.assertEqual(ret, []) + def test_kube_download_images(self): # Create an upgrade utils.create_test_kube_upgrade( @@ -1012,3 +1161,268 @@ class ManagerTestCase(base.DbTestCase): # Verify that the host upgrade status was cleared updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1) self.assertIsNotNone(updated_host_upgrade.status) + + def test_kube_upgrade_networking(self): + # Create an upgrade + utils.create_test_kube_upgrade( + from_version='v1.42.1', + to_version='v1.42.2', + state=kubernetes.KUBE_UPGRADING_NETWORKING, + ) + + # Upgrade kubernetes networking + self.service.kube_upgrade_networking(self.context, 'v1.42.2') + + # Verify that the upgrade state was updated + updated_upgrade = self.dbapi.kube_upgrade_get_one() + self.assertEqual(updated_upgrade.state, + kubernetes.KUBE_UPGRADED_NETWORKING) + + def test_kube_upgrade_networking_ansible_fail(self): + # Create an upgrade + utils.create_test_kube_upgrade( + from_version='v1.42.1', + to_version='v1.42.2', + state=kubernetes.KUBE_UPGRADING_NETWORKING, + ) + # Fake an ansible failure + self.fake_subprocess_popen.returncode = 1 + + # Upgrade kubernetes networking + self.service.kube_upgrade_networking(self.context, 'v1.42.2') + + # Verify that the upgrade state was updated + updated_upgrade = self.dbapi.kube_upgrade_get_one() + self.assertEqual(updated_upgrade.state, + kubernetes.KUBE_UPGRADING_NETWORKING_FAILED) + + def test_configure_out_of_date(self): + config_applied = self.service._config_set_reboot_required(uuid.uuid4()) + config_target = self.service._config_set_reboot_required(uuid.uuid4()) + ihost = self._create_test_ihost(config_applied=config_applied, + config_target=config_target) + os.path.isfile = mock.Mock(return_value=True) + cutils.is_aio_system = mock.Mock(return_value=True) + ihost['mgmt_mac'] = '00:11:22:33:44:55' + ihost['mgmt_ip'] = '1.2.3.42' + ihost['hostname'] = 'controller-0' + ihost['invprovision'] = 'provisioned' + ihost['personality'] = 'controller' + ihost['administrative'] = 'unlocked' + ihost['operational'] = 'available' + ihost['availability'] = 'online' + ihost['serialid'] = '1234567890abc' + ihost['boot_device'] = 'sda' + ihost['rootfs_device'] = 'sda' + ihost['install_output'] = 'text' + ihost['console'] = 'ttyS0,115200' + self.service.configure_ihost(self.context, ihost) + res = self.dbapi.ihost_get(ihost['uuid']) + imsg_dict = {'config_applied': res['config_target']} + self.service.iconfig_update_by_ihost(self.context, ihost['uuid'], imsg_dict) + self.assertEqual(self.alarm_raised, False) + + personalities = [constants.CONTROLLER] + self.service._config_update_hosts(self.context, personalities, reboot=True) + res = self.dbapi.ihost_get(ihost['uuid']) + + personalities = [constants.CONTROLLER] + self.service._config_update_hosts(self.context, personalities, reboot=False) + res = self.dbapi.ihost_get(ihost['uuid']) + config_uuid = self.service._config_clear_reboot_required(res['config_target']) + imsg_dict = {'config_applied': config_uuid} + self.service.iconfig_update_by_ihost(self.context, ihost['uuid'], imsg_dict) + self.assertEqual(self.alarm_raised, True) + + def _raise_alarm(self, fault): + self.alarm_raised = True + + def _clear_alarm(self, fm_id, fm_instance): + self.alarm_raised = False + + def _create_test_ihosts(self): + # Create controller-0 + config_uuid = str(uuid.uuid4()) + self._create_test_ihost( + personality=constants.CONTROLLER, + hostname='controller-0', + uuid=str(uuid.uuid4()), + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE, + mgmt_mac='00:11:22:33:44:55', + mgmt_ip='1.2.3.4') + # Create controller-1 + config_uuid = str(uuid.uuid4()) + self._create_test_ihost( + personality=constants.CONTROLLER, + hostname='controller-1', + uuid=str(uuid.uuid4()), + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE, + mgmt_mac='22:44:33:55:11:66', + mgmt_ip='1.2.3.5') + # Create compute-0 + config_uuid = str(uuid.uuid4()) + self._create_test_ihost( + personality=constants.WORKER, + hostname='compute-0', + uuid=str(uuid.uuid4()), + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE, + mgmt_mac='22:44:33:55:11:77', + mgmt_ip='1.2.3.6') + + def test_get_ihost_by_macs(self): + self._create_test_ihosts() + ihost_macs = ['22:44:33:55:11:66', '22:44:33:88:11:66'] + ihost = self.service.get_ihost_by_macs(self.context, ihost_macs) + self.assertEqual(ihost.mgmt_mac, '22:44:33:55:11:66') + + def test_get_ihost_by_macs_no_match(self): + self._create_test_ihosts() + ihost = None + ihost_macs = ['22:44:33:99:11:66', '22:44:33:88:11:66'] + ihost = self.service.get_ihost_by_macs(self.context, ihost_macs) + self.assertEqual(ihost, None) + + def test_get_ihost_by_hostname(self): + self._create_test_ihosts() + ihost_hostname = 'controller-1' + ihost = self.service.get_ihost_by_hostname(self.context, ihost_hostname) + self.assertEqual(ihost.mgmt_mac, '22:44:33:55:11:66') + self.assertEqual(ihost.mgmt_ip, '1.2.3.5') + self.assertEqual(ihost.hostname, 'controller-1') + + def test_get_ihost_by_hostname_invalid_name(self): + self._create_test_ihosts() + ihost_hostname = 'compute' + ihost = None + ihost = self.service.get_ihost_by_hostname(self.context, ihost_hostname) + self.assertEqual(ihost, None) + + def test_pci_device_update_by_host(self): + # Create compute-0 node + config_uuid = str(uuid.uuid4()) + ihost = self._create_test_ihost( + personality=constants.WORKER, + hostname='compute-0', + uuid=str(uuid.uuid4()), + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE, + ) + host_uuid = ihost['uuid'] + host_id = ihost['id'] + PCI_DEV_1 = {'uuid': str(uuid.uuid4()), + 'name': 'pci_dev_1', + 'pciaddr': '0000:0b:01.0', + 'pclass_id': '060100', + 'pvendor_id': '8086', + 'pdevice_id': '0443', + 'enabled': True} + PCI_DEV_2 = {'uuid': str(uuid.uuid4()), + 'name': 'pci_dev_2', + 'pciaddr': '0000:0c:01.0', + 'pclass_id': '060200', + 'pvendor_id': '8088', + 'pdevice_id': '0444', + 'enabled': True} + pci_device_dict_array = [PCI_DEV_1, PCI_DEV_2] + + # create new dev + self.service.pci_device_update_by_host(self.context, host_uuid, pci_device_dict_array) + + dev = self.dbapi.pci_device_get(PCI_DEV_1['pciaddr'], host_id) + for key in PCI_DEV_1: + self.assertEqual(dev[key], PCI_DEV_1[key]) + + dev = self.dbapi.pci_device_get(PCI_DEV_2['pciaddr'], host_id) + for key in PCI_DEV_2: + self.assertEqual(dev[key], PCI_DEV_2[key]) + + # update existed dev + pci_dev_dict_update1 = [{'pciaddr': PCI_DEV_2['pciaddr'], + 'pclass_id': '060500', + 'pvendor_id': '8086', + 'pdevice_id': '0449', + 'pclass': '0600', + 'pvendor': '', + 'psvendor': '', + 'psdevice': 'qat', + 'sriov_totalvfs': 32, + 'sriov_numvfs': 4, + 'sriov_vfs_pci_address': '', + 'driver': ''}] + self.service.pci_device_update_by_host(self.context, host_uuid, pci_dev_dict_update1) + + dev = self.dbapi.pci_device_get(PCI_DEV_2['pciaddr'], host_id) + + for key in pci_dev_dict_update1[0]: + self.assertEqual(dev[key], pci_dev_dict_update1[0][key]) + + # update existed dev failure case, failed to change uuid. + pci_dev_dict_update2 = [{'pciaddr': PCI_DEV_2['pciaddr'], + 'pclass_id': '060500', + 'pvendor_id': '8086', + 'pdevice_id': '0449', + 'pclass': '0600', + 'pvendor': '', + 'psvendor': '', + 'psdevice': 'qat', + 'sriov_totalvfs': 32, + 'sriov_numvfs': 4, + 'sriov_vfs_pci_address': '', + 'driver': '', + 'uuid': 1122}] + + self.service.pci_device_update_by_host(self.context, host_uuid, pci_dev_dict_update2) + dev = self.dbapi.pci_device_get(PCI_DEV_2['pciaddr'], host_id) + self.assertEqual(dev['uuid'], PCI_DEV_2['uuid']) + + def test_inumas_update_by_ihost(self): + # Create compute-0 node + config_uuid = str(uuid.uuid4()) + ihost = self._create_test_ihost( + personality=constants.WORKER, + hostname='compute-0', + uuid=str(uuid.uuid4()), + config_status=None, + config_applied=config_uuid, + config_target=config_uuid, + invprovision=constants.PROVISIONED, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_ONLINE, + ) + host_uuid = ihost['uuid'] + host_id = ihost['id'] + utils.create_test_node(id=1, numa_node=0, forihostid=host_id) + utils.create_test_node(id=2, numa_node=1, forihostid=host_id) + port1 = utils.create_test_ethernet_port( + id=1, name="port1", host_id=host_id, + interface_id="1122", mac='08:00:27:43:60:11', numa_node=3) + self.assertEqual(port1['node_id'], None) + inuma_dict_array = [{'numa_node': 1}, {'numa_node': 3}] + self.service.inumas_update_by_ihost(self.context, host_uuid, inuma_dict_array) + updated_port = self.dbapi.ethernet_port_get(port1['uuid'], host_id) + + self.assertEqual(updated_port['node_id'], 3) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py index 24c9323baa..7983d8bda0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_rpcapi.py @@ -22,6 +22,7 @@ """ Unit Tests for :py:class:`sysinv.conductor.rpcapi.ConductorAPI`. """ +import mock from oslo_config import cfg from oslo_serialization import jsonutils as json @@ -69,14 +70,13 @@ class RPCAPITestCase(base.DbTestCase): if expected_retval: return expected_retval - self.stubs.Set(rpc, rpc_method, _fake_rpc_method) - - retval = getattr(rpcapi, method)(ctxt, **kwargs) - - self.assertEqual(retval, expected_retval) - expected_args = [ctxt, expected_topic, expected_msg] - for arg, expected_arg in zip(self.fake_args, expected_args): - self.assertEqual(arg, expected_arg) + with mock.patch.object(rpc, rpc_method) as mock_method: + mock_method.side_effect = _fake_rpc_method + retval = getattr(rpcapi, method)(ctxt, **kwargs) + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, expected_topic, expected_msg] + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(arg, expected_arg) def test_create_ihost(self): ihost_dict = {'mgmt_mac': '00:11:22:33:44:55', diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/__init__.py b/sysinv/sysinv/sysinv/sysinv/tests/db/__init__.py index f894284186..bab71219c0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/__init__.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/__init__.py @@ -13,4 +13,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from sysinv.tests.db import * diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/base.py b/sysinv/sysinv/sysinv/sysinv/tests/db/base.py index 71f62cf298..3e8f4b37c8 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/base.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/base.py @@ -50,6 +50,9 @@ class BaseIPv4Mixin(object): nameservers = ['8.8.8.8', '8.8.4.4'] + # Used to test changing oam from ipv4 to ipv6 + change_family_oam_subnet = netaddr.IPNetwork('fd00::/64') + class BaseIPv6Mixin(object): @@ -63,6 +66,9 @@ class BaseIPv6Mixin(object): nameservers = ['2001:4860:4860::8888', '2001:4860:4860::8844'] + # Used to test changing oam from ipv6 to ipv4 + change_family_oam_subnet = netaddr.IPNetwork('10.10.10.0/24') + class BaseCephStorageBackendMixin(object): @@ -128,6 +134,7 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): self.hosts = [] self.address_pools = [] self.networks = [] + self.oam = None def _create_test_common(self): self._create_test_system() @@ -140,6 +147,7 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): self._create_test_ptp() self._create_test_networks() self._create_test_static_ips() + self._create_test_oam() self._create_test_multicast_ips() def _create_test_system(self): @@ -178,10 +186,19 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): self.ptp = dbutils.create_test_ptp( system_id=self.system.id) - def _create_test_network(self, name, nettype, subnet, ranges=None): + def _create_test_network(self, name, network_type, subnet, ranges=None): + address_pool_id = self._create_test_address_pool(name, subnet, ranges).id + + network = dbutils.create_test_network( + type=network_type, + address_pool_id=address_pool_id) + + self.networks.append(network) + return network + + def _create_test_address_pool(self, name, subnet, ranges=None): if not ranges: ranges = [(str(subnet[2]), str(subnet[-2]))] - pool = dbutils.create_test_address_pool( name=name, network=str(subnet.network), @@ -189,13 +206,7 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): prefix=subnet.prefixlen, ranges=ranges) self.address_pools.append(pool) - - network = dbutils.create_test_network( - type=nettype, - address_pool_id=pool.id) - - self.networks.append(network) - return network + return pool def _create_test_networks(self): @@ -226,12 +237,15 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): def _create_test_addresses(self, hostnames, subnet, network_type, start=1, stop=None): ips = itertools.islice(subnet, start, stop) + addresses = [] for name in hostnames: - dbutils.create_test_address( - name=utils.format_address_name(name, network_type), - family=subnet.version, - prefix=subnet.prefixlen, - address=str(next(ips))) + address = dbutils.create_test_address( + name=utils.format_address_name(name, network_type), + family=subnet.version, + prefix=subnet.prefixlen, + address=str(next(ips))) + addresses.append(address) + return addresses def _create_test_static_ips(self): hostnames = [ @@ -249,7 +263,7 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): hostnames, self.pxeboot_subnet, constants.NETWORK_TYPE_PXEBOOT) - self._create_test_addresses( + self.mgmt_addresses = self._create_test_addresses( hostnames + platform_hostnames, self.mgmt_subnet, constants.NETWORK_TYPE_MGMT) @@ -262,6 +276,9 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): hostnames, self.cluster_host_subnet, constants.NETWORK_TYPE_CLUSTER_HOST) + def _create_test_oam(self): + self.oam = dbutils.create_test_oam() + def _create_test_multicast_ips(self): hostnames = [ @@ -284,6 +301,11 @@ class BaseHostTestCase(BaseSystemTestCase): def setUp(self): super(BaseHostTestCase, self).setUp() + self.disks = {} + + def tearDown(self): + super(BaseHostTestCase, self).tearDown() + self.disks = {} def _create_test_host(self, personality, subfunction=None, numa_nodes=1, unit=0, **kw): @@ -325,9 +347,10 @@ class BaseHostTestCase(BaseSystemTestCase): self.dbapi.imemory_create(host.id, dbutils.get_test_imemory(forinodeid=node.id)) - self.dbapi.idisk_create(host.id, + disk = self.dbapi.idisk_create(host.id, dbutils.get_test_idisk(device_node=self.root_disk_device_node, device_type=self.root_disk_device_type)) + self.disks[host.id] = disk self.hosts.append(host) @@ -368,6 +391,7 @@ class BaseHostTestCase(BaseSystemTestCase): constants.NETWORK_TYPE_CLUSTER_HOST] ifnames = ['oam', 'mgmt', 'cluster'] index = 0 + ifaces = [] for nt, name in zip(network_types, ifnames): if (host.personality == constants.WORKER and nt == constants.NETWORK_TYPE_OAM): @@ -384,11 +408,13 @@ class BaseHostTestCase(BaseSystemTestCase): forihostid=host['id'], ihost_uuid=host['uuid']) iface = self.dbapi.iinterface_get(interface['uuid']) + ifaces.append(iface) network = self.dbapi.network_get_by_type(nt) dbutils.create_test_interface_network( interface_id=iface.id, network_id=network.id) index = index + 1 + return ifaces class ControllerHostTestCase(BaseHostTestCase): @@ -399,6 +425,18 @@ class ControllerHostTestCase(BaseHostTestCase): self._create_test_host_cpus(self.host, platform=16) +class ProvisionedControllerHostTestCase(BaseHostTestCase): + + def setUp(self): + super(ProvisionedControllerHostTestCase, self).setUp() + self.host = self._create_test_host(constants.CONTROLLER, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_AVAILABLE, + vim_progress_status=constants.VIM_SERVICES_ENABLED) + self._create_test_host_cpus(self.host, platform=16) + + class WorkerHostTestCase(BaseHostTestCase): def setUp(self): @@ -427,6 +465,20 @@ class AIOHostTestCase(BaseHostTestCase): self._create_test_host_cpus(self.host, platform=2, vswitch=2, application=11) +class ProvisionedAIOHostTestCase(BaseHostTestCase): + + system_mode = constants.TIS_AIO_BUILD + + def setUp(self): + super(ProvisionedAIOHostTestCase, self).setUp() + self.host = self._create_test_host(constants.CONTROLLER, constants.WORKER, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_AVAILABLE, + vim_progress_status=constants.VIM_SERVICES_ENABLED) + self._create_test_host_cpus(self.host, platform=2, vswitch=2, application=11) + + class AIOSimplexHostTestCase(AIOHostTestCase): system_mode = constants.SYSTEM_MODE_SIMPLEX @@ -437,3 +489,30 @@ class AIODuplexHostTestCase(AIOHostTestCase): class AIODuplexDirectHostTestCase(AIOHostTestCase): system_mode = constants.SYSTEM_MODE_DUPLEX_DIRECT + + +class AIODuplexSystemTestCase(AIODuplexHostTestCase): + + def setUp(self): + super(AIODuplexSystemTestCase, self).setUp() + self.host2 = self._create_test_host(constants.CONTROLLER, + constants.WORKER, + unit=1) + self._create_test_host_cpus(self.host2, platform=2, vswitch=2, + application=11) + + +class ProvisionedAIODuplexSystemTestCase(ProvisionedAIOHostTestCase): + system_mode = constants.SYSTEM_MODE_DUPLEX + + def setUp(self): + super(ProvisionedAIODuplexSystemTestCase, self).setUp() + self.host2 = self._create_test_host(constants.CONTROLLER, + constants.WORKER, + unit=1, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=constants.AVAILABILITY_AVAILABLE, + vim_progress_status=constants.VIM_SERVICES_ENABLED) + self._create_test_host_cpus(self.host2, platform=2, vswitch=2, + application=11) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py b/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py index 9c6758a884..4e211d76e8 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py @@ -48,14 +48,17 @@ from six.moves.urllib.parse import urlparse import mock import sqlalchemy import sqlalchemy.exc +import subprocess from migrate.versioning import repository from oslo_db.sqlalchemy import utils as db_utils +from oslo_concurrency import lockutils +from oslo_config import cfg from oslo_log import log as logging from sqlalchemy import MetaData, Table -from sysinv.openstack.common import lockutils import sysinv.db.sqlalchemy.migrate_repo +from sysinv.tests import conf_fixture from sysinv.tests import utils as test_utils LOG = logging.getLogger(__name__) @@ -198,13 +201,11 @@ class BaseMigrationTestCase(test_utils.BaseTestCase): super(BaseMigrationTestCase, self).tearDown() def execute_cmd(self, cmd=None): - from future import standard_library - standard_library.install_aliases() - from subprocess import getstatusoutput - status, output = getstatusoutput(cmd) - + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] LOG.debug(output) - self.assertEqual(0, status, + self.assertEqual(0, process.returncode, "Failed to run: %s\n%s" % (cmd, output)) @lockutils.synchronized('pgadmin', 'tests-', external=True) @@ -417,8 +418,8 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): self._post_downgrade_043.assert_called_with(self.engine) - @mock.patch.object(WalkVersionsMixin, '_migrate_up') @mock.patch.object(WalkVersionsMixin, '_migrate_down') + @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_all_default(self, _migrate_up, _migrate_down): self.REPOSITORY.latest = 20 self.migration_api.db_version.return_value = self.INIT_VERSION @@ -432,13 +433,13 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) upgraded = [mock.call(None, v, with_data=True) for v in versions] - self.assertEqual(self._migrate_up.call_args_list, upgraded) + self.assertEqual(_migrate_up.call_args_list, upgraded) downgraded = [mock.call(None, v - 1) for v in reversed(versions)] - self.assertEqual(self._migrate_down.call_args_list, downgraded) + self.assertEqual(_migrate_down.call_args_list, downgraded) - @mock.patch.object(WalkVersionsMixin, '_migrate_up') @mock.patch.object(WalkVersionsMixin, '_migrate_down') + @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_all_true(self, _migrate_up, _migrate_down): self.REPOSITORY.latest = 20 self.migration_api.db_version.return_value = self.INIT_VERSION @@ -453,7 +454,7 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): upgraded.extend( [mock.call(self.engine, v) for v in reversed(versions)] ) - self.assertEqual(upgraded, self._migrate_up.call_args_list) + self.assertEqual(upgraded, _migrate_up.call_args_list) downgraded_1 = [ mock.call(self.engine, v - 1, with_data=True) for v in versions @@ -463,10 +464,10 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): downgraded_2.append(mock.call(self.engine, v - 1)) downgraded_2.append(mock.call(self.engine, v - 1)) downgraded = downgraded_1 + downgraded_2 - self.assertEqual(self._migrate_down.call_args_list, downgraded) + self.assertEqual(_migrate_down.call_args_list, downgraded) - @mock.patch.object(WalkVersionsMixin, '_migrate_up') @mock.patch.object(WalkVersionsMixin, '_migrate_down') + @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_true_false(self, _migrate_up, _migrate_down): self.REPOSITORY.latest = 20 self.migration_api.db_version.return_value = self.INIT_VERSION @@ -479,15 +480,15 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): for v in versions: upgraded.append(mock.call(self.engine, v, with_data=True)) upgraded.append(mock.call(self.engine, v)) - self.assertEqual(upgraded, self._migrate_up.call_args_list) + self.assertEqual(upgraded, _migrate_up.call_args_list) downgraded = [ mock.call(self.engine, v - 1, with_data=True) for v in versions ] - self.assertEqual(self._migrate_down.call_args_list, downgraded) + self.assertEqual(_migrate_down.call_args_list, downgraded) - @mock.patch.object(WalkVersionsMixin, '_migrate_up') @mock.patch.object(WalkVersionsMixin, '_migrate_down') + @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_all_false(self, _migrate_up, _migrate_down): self.REPOSITORY.latest = 20 self.migration_api.db_version.return_value = self.INIT_VERSION @@ -499,7 +500,7 @@ class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin): upgraded = [ mock.call(self.engine, v, with_data=True) for v in versions ] - self.assertEqual(upgraded, self._migrate_up.call_args_list) + self.assertEqual(upgraded, _migrate_up.call_args_list) class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin): @@ -519,6 +520,8 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin): def setUp(self): super(TestMigrations, self).setUp() + self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) + if six.PY2: version = -1 else: @@ -665,13 +668,11 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin): def test_postgresql_opportunistically(self): # Test is skipped because postgresql isn't present/configured on target # server and will cause errors. Skipped to prevent Jenkins notification. - self.skipTest("Skipping to prevent postgres from throwing error in Jenkins") self._test_postgresql_opportunistically() def test_postgresql_connect_fail(self): # Test is skipped because postgresql isn't present/configured on target # server and will cause errors. Skipped to prevent Jenkins notification. - self.skipTest("Skipping to prevent postgres from throwing error in Jenkins") """Test that we can trigger a postgres connection failure Test that we can fail gracefully to ensure we don't break people @@ -744,7 +745,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin): 'forinodeid': 'Integer', 'core': 'Integer', 'thread': 'Integer', 'cpu_family': 'String', 'cpu_model': 'String', 'allocated_function': 'String', 'capabilities': 'Text', 'forihostid': 'Integer', # 'coProcessors': 'String', - 'forinodeid': 'Integer', 'deleted_at': 'DateTime', + 'deleted_at': 'DateTime', 'created_at': 'DateTime', 'updated_at': 'DateTime' } for col, coltype in cpus_col.items(): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py b/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py index 50ee69d583..6e7f44211f 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py @@ -77,6 +77,7 @@ properties = { int_uninitialized = 999 SW_VERSION = '0.0' +SW_VERSION_NEW = '1.0' def get_test_node(**kw): @@ -100,7 +101,7 @@ def create_test_node(**kw): if 'id' not in kw: del node['id'] dbapi = db_api.get_instance() - return dbapi.inode_create(node) + return dbapi.inode_create(node['forihostid'], node) def post_get_test_ihost(**kw): @@ -222,9 +223,10 @@ def create_test_isystem(**kw): def get_test_load(**kw): load = { - "software_version": SW_VERSION, - "compatible_version": "N/A", + "software_version": kw.get("software_version", SW_VERSION), + "compatible_version": kw.get("compatible_version", "N/A"), "required_patches": "N/A", + "state": kw.get("state", constants.ACTIVE_LOAD_STATE), } return load @@ -235,6 +237,19 @@ def create_test_load(**kw): return dbapi.load_create(load) +def get_test_upgrade(**kw): + upgrade = {'from_load': kw.get('from_load', 1), + 'to_load': kw.get('to_load', 2), + 'state': kw.get('state', constants.UPGRADE_STARTING)} + return upgrade + + +def create_test_upgrade(**kw): + upgrade = get_test_upgrade(**kw) + dbapi = db_api.get_instance() + return dbapi.software_upgrade_create(upgrade) + + def post_get_test_kube_upgrade(**kw): upgrade = get_test_kube_upgrade(**kw) del upgrade['id'] @@ -329,14 +344,36 @@ def create_test_user(**kw): return dbapi.iuser_create(user) +# Create test helm override object +def get_test_helm_overrides(**kw): + helm_overrides = { + 'id': kw.get('id'), + 'name': kw.get('name'), + 'namespace': kw.get('namespace'), + 'user_overrides': kw.get('user_overrides', None), + 'system_overrides': kw.get('system_overrides', None), + 'app_id': kw.get('app_id', None) + } + return helm_overrides + + +def create_test_helm_overrides(**kw): + helm_overrides = get_test_helm_overrides(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del helm_overrides['id'] + dbapi = db_api.get_instance() + return dbapi.helm_override_create(helm_overrides) + + # Create test ntp object def get_test_ntp(**kw): ntp = { 'id': kw.get('id'), 'uuid': kw.get('uuid'), - 'enabled': kw.get('enabled'), 'ntpservers': kw.get('ntpservers'), - 'forisystemid': kw.get('forisystemid', None) + 'forisystemid': kw.get('forisystemid', None), + 'isystem_uuid': kw.get('isystem_uuid', None) } return ntp @@ -350,6 +387,14 @@ def create_test_ntp(**kw): return dbapi.intp_create(ntp) +def post_get_test_ntp(**kw): + ntp = get_test_ntp(**kw) + # When invoking a POST the following fields should not be populated: + del ntp['uuid'] + del ntp['id'] + return ntp + + # Create test ptp object def get_test_ptp(**kw): ptp = { @@ -393,6 +438,16 @@ def create_test_dns(**kw): return dbapi.idns_create(dns) +def post_get_test_dns(**kw): + dns = get_test_dns(**kw) + + # When invoking a POST the following fields should not be populated: + del dns['uuid'] + del dns['id'] + + return dns + + # Create test drbd object def get_test_drbd(**kw): drbd = { @@ -862,7 +917,8 @@ def get_test_ethernet_port(**kw): 'sriov_numvfs': kw.get('sriov_numvfs'), 'sriov_vf_driver': kw.get('sriov_vf_driver'), 'sriov_vf_pdevice_id': kw.get('sriov_vf_pdevice_id'), - 'driver': kw.get('driver') + 'driver': kw.get('driver'), + 'numa_node': kw.get('numa_node', -1) } return ethernet_port @@ -1169,6 +1225,7 @@ def get_test_app(**kw): 'manifest_file': kw.get('manifest_file', constants.APP_TARFILE_NAME_PLACEHOLDER), 'status': kw.get('status', constants.APP_UPLOAD_IN_PROGRESS), + 'active': kw.get('active', False), } return app_data @@ -1218,3 +1275,28 @@ def create_test_pci_devices(**kw): del pci_devices['id'] dbapi = db_api.get_instance() return dbapi.pci_device_create(pci_devices['host_id'], pci_devices) + + +def get_test_label(**kw): + label = { + 'host_id': kw.get('host_id'), + 'label_key': kw.get('label_key'), + 'label_value': kw.get('label_value'), + } + return label + + +def create_test_label(**kw): + """Create test label in DB and return label object. + Function to be used to create test label objects in the database. + :param kw: kwargs with overriding values for labels's attributes. + :returns: Test label DB object. + """ + label = get_test_label(**kw) + dbapi = db_api.get_instance() + return dbapi.label_create(label['host_id'], label) + + +def create_test_oam(**kw): + dbapi = db_api.get_instance() + return dbapi.iextoam_get_one() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/base.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/base.py index 40ad31e4b7..2c114a613e 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/helm/base.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/helm/base.py @@ -16,3 +16,17 @@ class HelmTestCaseMixin(object): self.useFixture(keyring_fixture.KeyringBackend()) mock.patch('sysinv.common.utils.is_virtual', return_value=False).start() + + def assertOverridesParameters(self, overrides, parameters): + """Validate the overrides contains the supplied parameters""" + if not isinstance(overrides, dict) and not isinstance(parameters, dict): + self.assertEqual(overrides, parameters) + else: + for key, value in parameters.items(): + self.assertIn(key, overrides) + if isinstance(value, dict): + for subkey, subvalue in value.items(): + self.assertOverridesParameters(overrides[key][subkey], + subvalue) + else: + self.assertEqual(overrides.get(key), value) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_dex.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_dex.py new file mode 100644 index 0000000000..fb16778555 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_dex.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +from sysinv.common import constants +from sysinv.common import utils +from sysinv.db import api as dbapi +from sysinv.helm import common + +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils +from sysinv.tests.helm import base +from sysinv.tests.helm import test_helm + + +class DexTestCase(test_helm.StxPlatformAppMixin, + base.HelmTestCaseMixin): + + def setUp(self): + super(DexTestCase, self).setUp() + self.app = dbutils.create_test_app(name='oidc-auth-apps') + self.dbapi = dbapi.get_instance() + + def test_issuer(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_DEX, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + + oam_addr_name = utils.format_address_name(constants.CONTROLLER_HOSTNAME, + constants.NETWORK_TYPE_OAM) + oam_address = self.dbapi.address_get_by_name(oam_addr_name) + config_issuer = "https://%s:30556/dex" % (utils.format_url_address(oam_address.address)) + self.assertOverridesParameters(overrides, { + # issuer is set properly + 'config': {'issuer': config_issuer} + }) + + +class DexIPv4ControllerHostTestCase(DexTestCase, + dbbase.ProvisionedControllerHostTestCase): + + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_DEX, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + + self.assertOverridesParameters(overrides, { + # 1 replica for 1 controller + 'replicas': 1 + }) + + +class DexIPv6AIODuplexSystemTestCase(DexTestCase, + dbbase.BaseIPv6Mixin, + dbbase.ProvisionedAIODuplexSystemTestCase): + + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_DEX, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + + self.assertOverridesParameters(overrides, { + # 2 replicas for 2 controllers + 'replicas': 2 + }) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py index c4ab6939b9..bcee0c6c2b 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_helm.py @@ -6,8 +6,11 @@ import keyring import mock +from sysinv.common import constants +from sysinv.helm import common from sysinv.helm.helm import HelmOperator from sysinv.helm.manifest_base import ArmadaManifestOperator + from sysinv.tests.db import base as dbbase from sysinv.tests.db import utils as dbutils from sysinv.tests.helm import base as helm_base @@ -17,15 +20,38 @@ class StxOpenstackAppMixin(object): path_name = 'stx-openstack.tgz' app_name = 'stx-openstack' + def setUp(self): + super(StxOpenstackAppMixin, self).setUp() + # Label hosts with appropriate labels + for host in self.hosts: + if host.personality == constants.CONTROLLER: + dbutils.create_test_label( + host_id=host.id, + label_key=common.LABEL_CONTROLLER, + label_value=common.LABEL_VALUE_ENABLED) + elif host.personality == constants.WORKER: + dbutils.create_test_label( + host_id=host.id, + label_key=common.LABEL_COMPUTE_LABEL, + label_value=common.LABEL_VALUE_ENABLED) -class HelmOperatorTestSuite(helm_base.HelmTestCaseMixin): - """When HelmOperatorTestSuite is added as a Mixin + +class StxPlatformAppMixin(object): + path_name = 'stx-platform.tgz' + app_name = 'oidc-auth-apps' + + def setUp(self): + super(StxPlatformAppMixin, self).setUp() + + +class HelmOperatorTestSuiteMixin(helm_base.HelmTestCaseMixin): + """When HelmOperatorTestSuiteMixin is added as a Mixin alongside a subclass of BaseHostTestCase these testcases are added to it This also requires an AppMixin to provide app_name """ def setUp(self): - super(HelmOperatorTestSuite, self).setUp() + super(HelmOperatorTestSuiteMixin, self).setUp() self.app = dbutils.create_test_app(name=self.app_name) # If a ceph keyring entry is missing, a subprocess will be invoked # so a fake keyring password is being supplied here. @@ -54,7 +80,7 @@ class HelmOperatorTestSuite(helm_base.HelmTestCaseMixin): self.addCleanup(write_file.stop) def tearDown(self): - super(HelmOperatorTestSuite, self).tearDown() + super(HelmOperatorTestSuiteMixin, self).tearDown() @mock.patch.object(HelmOperator, '_write_chart_overrides') def test_generate_helm_chart_overrides(self, mock_write_chart): @@ -73,7 +99,7 @@ class HelmOperatorTestSuite(helm_base.HelmTestCaseMixin): class HelmSTXOpenstackControllerTestCase(StxOpenstackAppMixin, dbbase.BaseIPv6Mixin, dbbase.BaseCephStorageBackendMixin, - HelmOperatorTestSuite, + HelmOperatorTestSuiteMixin, dbbase.ControllerHostTestCase): pass @@ -85,6 +111,19 @@ class HelmSTXOpenstackControllerTestCase(StxOpenstackAppMixin, # - stx-openstack app class HelmSTXOpenstackAIOTestCase(StxOpenstackAppMixin, dbbase.BaseCephStorageBackendMixin, - HelmOperatorTestSuite, + HelmOperatorTestSuiteMixin, dbbase.AIOSimplexHostTestCase): pass + + +# Test Configuration: +# - Controller +# - IPv6 +# - Ceph Storage +# - stx-platform app +class HelmSTXPlatformControllerTestCase(StxPlatformAppMixin, + dbbase.BaseIPv6Mixin, + dbbase.BaseCephStorageBackendMixin, + HelmOperatorTestSuiteMixin, + dbbase.ControllerHostTestCase): + pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_nova_api_proxy.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_nova_api_proxy.py new file mode 100644 index 0000000000..5a2c2f12b0 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_nova_api_proxy.py @@ -0,0 +1,47 @@ +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sysinv.helm import common + +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils +from sysinv.tests.helm import base +from sysinv.tests.helm import test_helm + + +class NovaApiProxyTestCase(test_helm.StxOpenstackAppMixin, + base.HelmTestCaseMixin): + + def setUp(self): + super(NovaApiProxyTestCase, self).setUp() + self.app = dbutils.create_test_app(name=self.app_name) + + +class NovaApiProxyIPv4ControllerHostTestCase(NovaApiProxyTestCase, + dbbase.ControllerHostTestCase): + + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_NOVA_API_PROXY, + cnamespace=common.HELM_NS_OPENSTACK) + + self.assertOverridesParameters(overrides, { + # Only one replica for a single controller + 'pod': {'replicas': {'proxy': 1}} + }) + + +class NovaApiProxyIPv4AIODuplexSystemTestCase(NovaApiProxyTestCase, + dbbase.AIODuplexSystemTestCase): + + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_NOVA_API_PROXY, + cnamespace=common.HELM_NS_OPENSTACK) + + self.assertOverridesParameters(overrides, { + # Expect two replicas because there are two controllers + 'pod': {'replicas': {'proxy': 2}} + }) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/helm/test_oidc_client.py b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_oidc_client.py new file mode 100644 index 0000000000..bd2ff5c2b9 --- /dev/null +++ b/sysinv/sysinv/sysinv/sysinv/tests/helm/test_oidc_client.py @@ -0,0 +1,71 @@ +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from sysinv.helm import common + +from sysinv.common import constants +from sysinv.common import utils +from sysinv.db import api as dbapi +from sysinv.tests.db import base as dbbase +from sysinv.tests.db import utils as dbutils +from sysinv.tests.helm import base +from sysinv.tests.helm import test_helm + + +class OidcClientTestCase(test_helm.StxPlatformAppMixin, + base.HelmTestCaseMixin): + + def setUp(self): + super(OidcClientTestCase, self).setUp() + self.app = dbutils.create_test_app(name=self.app_name) + self.dbapi = dbapi.get_instance() + + def test_addresses(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_OIDC_CLIENT, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + oam_addr_name = utils.format_address_name(constants.CONTROLLER_HOSTNAME, + constants.NETWORK_TYPE_OAM) + address = self.dbapi.address_get_by_name(oam_addr_name) + oam_url = utils.format_url_address(address.address) + parameters = { + 'config': { + 'issuer': 'https://%s:30556/dex' % oam_url, + 'redirect_uri': "http://%s:30555/callback" % oam_url, + } + } + self.assertOverridesParameters(overrides, parameters) + + +class OidcClientIPv4ControllerHostTestCase(OidcClientTestCase, + dbbase.ProvisionedControllerHostTestCase): + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_DEX, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + + self.assertOverridesParameters(overrides, { + # Only one replica for a single controller + 'replicas': 1 + }) + + +class OidcClientIPv6ControllerHostTestCase(OidcClientTestCase, + dbbase.BaseIPv6Mixin, + dbbase.ProvisionedControllerHostTestCase): + pass + + +class OidcClientIPv4AIODuplexSystemTestCase(OidcClientTestCase, + dbbase.ProvisionedAIODuplexSystemTestCase): + def test_replicas(self): + overrides = self.operator.get_helm_chart_overrides( + common.HELM_CHART_DEX, + cnamespace=common.HELM_NS_KUBE_SYSTEM) + + self.assertOverridesParameters(overrides, { + # Expect two replicas because there are two controllers + 'replicas': 2 + }) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_host.py b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_host.py index 93d2ba2ea8..62ec06e4c0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_host.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_host.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: Apache-2.0 # # +import mock from sysinv.db import api as db_api from sysinv.db.sqlalchemy import models @@ -26,48 +27,37 @@ class TestHostObject(base.DbTestCase): def test_load(self): uuid = self.fake_node['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'ihost_get') - - self.dbapi.ihost_get(uuid).AndReturn(self.obj_node) - self.mox.ReplayAll() - - objects.host.get_by_uuid(self.admin_context, uuid) - self.mox.VerifyAll() - # TODO(deva): add tests for load-on-demand info, eg. ports, - # once Port objects are created + with mock.patch.object(self.dbapi, "ihost_get") as host_get_mock: + host_get_mock.return_value = self.obj_node + objects.host.get_by_uuid(self.admin_context, uuid) + host_get_mock.assert_called_once_with(uuid) def test_save(self): uuid = self.fake_node['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'ihost_get') - self.mox.StubOutWithMock(self.dbapi, 'ihost_update') - - self.dbapi.ihost_get(uuid).AndReturn(self.obj_node) - self.dbapi.ihost_update(uuid, {'location': {"City": "property"}}) - self.mox.ReplayAll() - - n = objects.host.get_by_uuid(self.admin_context, uuid) - n.location = {"City": "property"} - n.save() - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, "ihost_get") as host_get_mock: + host_get_mock.return_value = self.obj_node + with mock.patch.object(self.dbapi, "ihost_update") as host_update_mock: + # These next 3 lines are the unit test + n = objects.host.get_by_uuid(self.admin_context, uuid) + n.location = {"City": "property"} + n.save() + # verify the routines were called as expected + host_get_mock.assert_called_once_with(uuid) + host_update_mock.assert_called_once_with(uuid, + {'location': {"City": "property"}}) def test_refresh(self): uuid = self.fake_node['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'ihost_get') - first_obj = objects.host.from_db_object(self._get_db_node( dict(self.fake_node, location={"City": "first"}))) second_obj = objects.host.from_db_object(self._get_db_node( dict(self.fake_node, location={"City": "second"}))) - - self.dbapi.ihost_get(uuid).AndReturn(first_obj) - self.dbapi.ihost_get(uuid).AndReturn(second_obj) - self.mox.ReplayAll() - - n = objects.host.get_by_uuid(self.admin_context, uuid) - self.assertEqual(n.location, {"City": "first"}) - n.refresh() - self.assertEqual(n.location, {"City": "second"}) - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, "ihost_get") as host_get_mock: + host_get_mock.side_effect = iter([first_obj, second_obj]) + n = objects.host.get_by_uuid(self.admin_context, uuid) + self.assertEqual(n.location, {"City": "first"}) + n.refresh() + self.assertEqual(n.location, {"City": "second"}) def test_objectify(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_host_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_host_upgrade.py index ef00fe3e6b..627a3fac23 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_host_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_host_upgrade.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: Apache-2.0 # # +import mock from sysinv.db import api as db_api from sysinv.db.sqlalchemy import models @@ -26,46 +27,40 @@ class TestKubeHostUpgradesObject(base.DbTestCase): def test_load(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_host_upgrade_get') - - self.dbapi.kube_host_upgrade_get(uuid).AndReturn(self.obj_data) - self.mox.ReplayAll() - - objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid) - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_host_upgrade_get") as get_mock: + get_mock.return_value = self.obj_data + objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid) + get_mock.assert_called_once_with(uuid) def test_save(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_host_upgrade_get') - self.mox.StubOutWithMock(self.dbapi, 'kube_host_upgrade_update') - - self.dbapi.kube_host_upgrade_get(uuid).AndReturn(self.obj_data) - self.dbapi.kube_host_upgrade_update(uuid, {'status': "upgrading"}) - self.mox.ReplayAll() - - n = objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid) - n.status = "upgrading" - n.save() - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_host_upgrade_get") as get_mock: + with mock.patch.object(self.dbapi, + "kube_host_upgrade_update") as update_mock: + get_mock.return_value = self.obj_data + n = objects.kube_host_upgrade.get_by_uuid(self.admin_context, + uuid) + n.status = "upgrading" + n.save() + update_mock.assert_called_once_with(uuid, + {'status': "upgrading"}) def test_refresh(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_host_upgrade_get') - first_obj = objects.kube_host_upgrade.from_db_object(self._get_db_data( dict(self.fake_upgrade_data, target_version='v1.42.1'))) second_obj = objects.kube_host_upgrade.from_db_object(self._get_db_data( dict(self.fake_upgrade_data, target_version='v1.42.2'))) - self.dbapi.kube_host_upgrade_get(uuid).AndReturn(first_obj) - self.dbapi.kube_host_upgrade_get(uuid).AndReturn(second_obj) - self.mox.ReplayAll() - - n = objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid) - self.assertEqual(n.target_version, 'v1.42.1') - n.refresh() - self.assertEqual(n.target_version, 'v1.42.2') - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_host_upgrade_get") as get_mock: + get_mock.side_effect = iter([first_obj, second_obj]) + n = objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid) + self.assertEqual(n.target_version, 'v1.42.1') + n.refresh() + self.assertEqual(n.target_version, 'v1.42.2') def test_objectify(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_upgrade.py index b58688f017..cfecd2c880 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_kube_upgrade.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: Apache-2.0 # # +import mock from sysinv.db import api as db_api from sysinv.db.sqlalchemy import models @@ -26,46 +27,38 @@ class TestKubeUpgradesObject(base.DbTestCase): def test_load(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_upgrade_get') - - self.dbapi.kube_upgrade_get(uuid).AndReturn(self.obj_data) - self.mox.ReplayAll() - - objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_upgrade_get") as get_mock: + get_mock.return_value = self.obj_data + objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) + get_mock.assert_called_once_with(uuid) def test_save(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_upgrade_get') - self.mox.StubOutWithMock(self.dbapi, 'kube_upgrade_update') - - self.dbapi.kube_upgrade_get(uuid).AndReturn(self.obj_data) - self.dbapi.kube_upgrade_update(uuid, {'state': "upgrading"}) - self.mox.ReplayAll() - - n = objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) - n.state = "upgrading" - n.save() - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_upgrade_get") as get_mock: + with mock.patch.object(self.dbapi, + "kube_upgrade_update") as update_mock: + get_mock.return_value = self.obj_data + n = objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) + n.state = "upgrading" + n.save() + update_mock.assert_called_once_with(uuid, + {'state': "upgrading"}) def test_refresh(self): uuid = self.fake_upgrade_data['uuid'] - self.mox.StubOutWithMock(self.dbapi, 'kube_upgrade_get') - first_obj = objects.kube_upgrade.from_db_object(self._get_db_data( dict(self.fake_upgrade_data, to_version='v1.42.1'))) second_obj = objects.kube_upgrade.from_db_object(self._get_db_data( dict(self.fake_upgrade_data, to_version='v1.42.2'))) - - self.dbapi.kube_upgrade_get(uuid).AndReturn(first_obj) - self.dbapi.kube_upgrade_get(uuid).AndReturn(second_obj) - self.mox.ReplayAll() - - n = objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) - self.assertEqual(n.to_version, 'v1.42.1') - n.refresh() - self.assertEqual(n.to_version, 'v1.42.2') - self.mox.VerifyAll() + with mock.patch.object(self.dbapi, + "kube_upgrade_get") as get_mock: + get_mock.side_effect = iter([first_obj, second_obj]) + n = objects.kube_upgrade.get_by_uuid(self.admin_context, uuid) + self.assertEqual(n.to_version, 'v1.42.1') + n.refresh() + self.assertEqual(n.to_version, 'v1.42.2') def test_objectify(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_objects.py b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_objects.py index 51b353acd1..51a3b492fd 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/objects/test_objects.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/objects/test_objects.py @@ -230,7 +230,7 @@ def things_temporarily_local(): base.SysinvObject.indirection_api = _api -class _TestObject(object): +class _TestObjectMixin(object): def test_hydration_type_error(self): primitive = {'sysinv_object.name': 'MyObj', 'sysinv_object.namespace': 'sysinv', @@ -298,7 +298,7 @@ class _TestObject(object): raised = False ex_out = "" try: - obj.foobar + obj.foobar # pylint: disable=no-member except NotImplementedError as ex: ex_out = str(ex) raised = True @@ -443,7 +443,7 @@ class _TestObject(object): self.assertFalse('does_not_exist' in obj) -class TestObject(_LocalTest, _TestObject): +class TestObject(_LocalTest, _TestObjectMixin): pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py index 36f0e5d354..90a264ba64 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_interface.py @@ -298,8 +298,9 @@ class InterfaceTestCaseMixin(base.PuppetTestCaseMixin): @puppet.puppet_context def _update_context(self): + # interface is added as an operator by systemconfig.puppet_plugins self.context = \ - self.operator.interface._create_interface_context(self.host) + self.operator.interface._create_interface_context(self.host) # pylint: disable=no-member # Update the puppet context with generated interface context self.operator.context.update(self.context) @@ -354,43 +355,43 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase): self.assertFalse(result) def test_get_port_interface_id_index(self): - index = self.operator.interface._get_port_interface_id_index(self.host) + index = self.operator.interface._get_port_interface_id_index(self.host) # pylint: disable=no-member for port in self.ports: self.assertTrue(port['interface_id'] in index) self.assertEqual(index[port['interface_id']], port) def test_get_port_pciaddr_index(self): - index = self.operator.interface._get_port_pciaddr_index(self.host) + index = self.operator.interface._get_port_pciaddr_index(self.host) # pylint: disable=no-member for port in self.ports: self.assertTrue(port['pciaddr'] in index) self.assertIn(port, index[port['pciaddr']]) def test_get_interface_name_index(self): - index = self.operator.interface._get_interface_name_index(self.host) + index = self.operator.interface._get_interface_name_index(self.host) # pylint: disable=no-member for iface in self.interfaces: self.assertTrue(iface['ifname'] in index) self.assertEqual(index[iface['ifname']], iface) def test_get_network_type_index(self): - index = self.operator.interface._get_network_type_index() + index = self.operator.interface._get_network_type_index() # pylint: disable=no-member for network in self.networks: self.assertTrue(network['type'] in index) self.assertEqual(index[network['type']], network) def test_get_address_interface_name_index(self): - index = self.operator.interface._get_address_interface_name_index(self.host) + index = self.operator.interface._get_address_interface_name_index(self.host) # pylint: disable=no-member for address in self.addresses: self.assertTrue(address['ifname'] in index) self.assertIn(address, index[address['ifname']]) def test_get_routes_interface_name_index(self): - index = self.operator.interface._get_routes_interface_name_index(self.host) + index = self.operator.interface._get_routes_interface_name_index(self.host) # pylint: disable=no-member for route in self.routes: self.assertTrue(route['ifname'] in index) self.assertIn(route, index[route['ifname']]) def test_get_gateway_index(self): - index = self.operator.interface._get_gateway_index() + index = self.operator.interface._get_gateway_index() # pylint: disable=no-member self.assertEqual(len(index), 2) self.assertEqual(index[constants.NETWORK_TYPE_MGMT], str(self.mgmt_gateway_address)) @@ -1072,7 +1073,9 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase): def _get_sriov_config(self, ifname='default', vf_driver=constants.SRIOV_DRIVER_TYPE_VFIO, - vf_addrs=[""]): + vf_addrs=None): + if vf_addrs is None: + vf_addrs = [""] config = {'ifname': ifname, 'vf_driver': vf_driver, 'vf_addrs': vf_addrs} @@ -1548,12 +1551,12 @@ class InterfaceHostTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase): hieradata_directory = self._create_hieradata_directory() config_filename = self._get_config_filename(hieradata_directory) with open(config_filename, 'w') as config_file: - config = self.operator.interface.get_host_config(self.host) + config = self.operator.interface.get_host_config(self.host) # pylint: disable=no-member self.assertIsNotNone(config) yaml.dump(config, config_file, default_flow_style=False) def test_create_interface_context(self): - context = self.operator.interface._create_interface_context(self.host) + context = self.operator.interface._create_interface_context(self.host) # pylint: disable=no-member self.assertIn('personality', context) self.assertIn('subfunctions', context) self.assertIn('devices', context) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py index c5d4c08c89..007cd92d1b 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py @@ -7,8 +7,8 @@ from sysinv.tests.db import base as dbbase from sysinv.tests.puppet import base -class PuppetOperatorTestSuite(base.PuppetTestCaseMixin): - """When PuppetOperatorTestSuite is added as a Mixin +class PuppetOperatorTestSuiteMixin(base.PuppetTestCaseMixin): + """When PuppetOperatorTestSuiteMixin is added as a Mixin to a testcase which is a subclass of BaseHostTestCase these testcases are added to it """ @@ -29,60 +29,61 @@ class PuppetOperatorTestSuite(base.PuppetTestCaseMixin): self.operator.update_secure_system_config() assert self.mock_write_config.called + # self.host is defined in BaseHostTestCase def test_update_host_config(self): - self.operator.update_host_config(self.host) + self.operator.update_host_config(self.host) # pylint: disable=no-member assert self.mock_write_config.called # ============= IPv4 environment tests ============== # Tests all puppet operations for a Controller (defaults to IPv4) -class PlatformIPv4ControllerHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv4ControllerHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.ControllerHostTestCase): pass # Tests all puppet operations for a Worker (defaults to IPv4) -class PlatformIPv4WorkerHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv4WorkerHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.WorkerHostTestCase): pass # Tests all puppet operations for a Storage Host (defaults to IPv4) -class PlatformIPv4StorageHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv4StorageHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.StorageHostTestCase): pass # Tests all puppet operations for an AIO Host (defaults to IPv4) -class PlatformIPv4AIOHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv4AIOHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.AIOHostTestCase): pass # ============= IPv6 environment tests ============== # Tests all puppet operations for a Controller using IPv6 -class PlatformIPv6ControllerHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv6ControllerHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.BaseIPv6Mixin, dbbase.ControllerHostTestCase): pass # Tests all puppet operations for a Worker using IPv6 -class PlatformIPv6WorkerHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv6WorkerHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.BaseIPv6Mixin, dbbase.WorkerHostTestCase): pass # Tests all puppet operations for a Storage Host using IPv6 -class PlatformIPv6StorageHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv6StorageHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.BaseIPv6Mixin, dbbase.StorageHostTestCase): pass # Tests all puppet operations for an AIO Host using IPv6 -class PlatformIPv6AIOHostTestCase(PuppetOperatorTestSuite, +class PlatformIPv6AIOHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.BaseIPv6Mixin, dbbase.AIOHostTestCase): pass @@ -90,7 +91,7 @@ class PlatformIPv6AIOHostTestCase(PuppetOperatorTestSuite, # ============= Ceph Backend environment tests ============== # Tests all puppet operations for an AIO Host using IPv4 and Ceph Backend -class PlatformCephBackendAIOHostTestCase(PuppetOperatorTestSuite, +class PlatformCephBackendAIOHostTestCase(PuppetOperatorTestSuiteMixin, dbbase.BaseCephStorageBackendMixin, dbbase.AIOHostTestCase): pass diff --git a/sysinv/sysinv/sysinv/sysinv/tests/test_utils.py b/sysinv/sysinv/sysinv/sysinv/tests/test_utils.py index e283848c4a..489a7923e0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/test_utils.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/test_utils.py @@ -17,15 +17,14 @@ import errno import mock +import netaddr import os import os.path +import six.moves.builtins as __builtin__ import tempfile -import netaddr from oslo_config import cfg -from mox3 import mox -from six.moves import builtins from sysinv.common import exception from sysinv.common import utils from sysinv.tests import base @@ -42,39 +41,28 @@ class BareMetalUtilsTestCase(base.TestCase): self.assertEqual(len(s), 100) def test_unlink(self): - self.mox.StubOutWithMock(os, "unlink") - os.unlink("/fake/path") - - self.mox.ReplayAll() - utils.unlink_without_raise("/fake/path") - self.mox.UnsetStubs() - self.mox.VerifyAll() + with mock.patch.object(os, "unlink") as unlink_mock: + unlink_mock.return_value = None + utils.unlink_without_raise("/fake/path") + unlink_mock.assert_called_once_with("/fake/path") def test_unlink_ENOENT(self): - self.mox.StubOutWithMock(os, "unlink") - os.unlink("/fake/path").AndRaise(OSError(errno.ENOENT)) - - self.mox.ReplayAll() - utils.unlink_without_raise("/fake/path") - self.mox.UnsetStubs() - self.mox.VerifyAll() + with mock.patch.object(os, "unlink") as unlink_mock: + unlink_mock.side_effect = OSError(errno.ENOENT) + utils.unlink_without_raise("/fake/path") + unlink_mock.assert_called_once_with("/fake/path") def test_create_link(self): - self.mox.StubOutWithMock(os, "symlink") - os.symlink("/fake/source", "/fake/link") - - self.mox.ReplayAll() - utils.create_link_without_raise("/fake/source", "/fake/link") - self.mox.VerifyAll() + with mock.patch.object(os, "symlink") as symlink_mock: + symlink_mock.return_value = None + utils.create_link_without_raise("/fake/source", "/fake/link") + symlink_mock.assert_called_once_with("/fake/source", "/fake/link") def test_create_link_EEXIST(self): - self.mox.StubOutWithMock(os, "symlink") - os.symlink("/fake/source", "/fake/link").AndRaise( - OSError(errno.EEXIST)) - - self.mox.ReplayAll() - utils.create_link_without_raise("/fake/source", "/fake/link") - self.mox.VerifyAll() + with mock.patch.object(os, "symlink") as symlink_mock: + symlink_mock.side_effect = OSError(errno.EEXIST) + utils.create_link_without_raise("/fake/source", "/fake/link") + symlink_mock.assert_called_once_with("/fake/source", "/fake/link") class ExecuteTestCase(base.TestCase): @@ -186,39 +174,46 @@ class GenericUtilsTestCase(base.TestCase): self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_read_cached_file(self): - self.mox.StubOutWithMock(os.path, "getmtime") - os.path.getmtime(mox.IgnoreArg()).AndReturn(1) - self.mox.ReplayAll() + with mock.patch.object(os.path, "getmtime") as getmtime_mock: + getmtime_mock.return_value = 1 - cache_data = {"data": 1123, "mtime": 1} - data = utils.read_cached_file("/this/is/a/fake", cache_data) - self.assertEqual(cache_data["data"], data) + cache_data = {"data": 1123, "mtime": 1} + data = utils.read_cached_file("/this/is/a/fake", cache_data) + self.assertEqual(cache_data["data"], data) + getmtime_mock.assert_called_once_with(mock.ANY) def test_read_modified_cached_file(self): - self.mox.StubOutWithMock(os.path, "getmtime") - self.mox.StubOutWithMock(builtins, 'open') - os.path.getmtime(mox.IgnoreArg()).AndReturn(2) + with mock.patch.object(os.path, "getmtime") as getmtime_mock: + with mock.patch.object(__builtin__, 'open') as open_mock: + getmtime_mock.return_value = 2 + fake_contents = "lorem ipsum" + fake_file = mock.Mock() + fake_file.read.return_value = fake_contents + fake_context_manager = mock.MagicMock() + fake_context_manager.__enter__.return_value = fake_file + fake_context_manager.__exit__.return_value = None + open_mock.return_value = fake_context_manager - fake_contents = "lorem ipsum" - fake_file = self.mox.CreateMockAnything() - fake_file.read().AndReturn(fake_contents) - fake_context_manager = mock.Mock() - fake_context_manager.__enter__ = mock.Mock(return_value=fake_file) - fake_context_manager.__exit__ = mock.Mock(return_value=False) - builtins.open(mox.IgnoreArg()).AndReturn(fake_context_manager) + cache_data = {"data": 1123, "mtime": 1} + self.reload_called = False - self.mox.ReplayAll() - cache_data = {"data": 1123, "mtime": 1} - self.reload_called = False + def test_reload(reloaded_data): + self.assertEqual(fake_contents, reloaded_data) + self.reload_called = True - def test_reload(reloaded_data): - self.assertEqual(reloaded_data, fake_contents) - self.reload_called = True + data = utils.read_cached_file("/this/is/a/fake", + cache_data, + reload_func=test_reload) - data = utils.read_cached_file("/this/is/a/fake", cache_data, - reload_func=test_reload) - self.assertEqual(data, fake_contents) - self.assertTrue(self.reload_called) + self.assertEqual(fake_contents, data) + self.assertTrue(self.reload_called) + getmtime_mock.assert_called_once_with(mock.ANY) + open_mock.assert_called_once_with(mock.ANY) + fake_file.read.assert_called_once_with() + fake_context_manager.__exit__.assert_called_once_with(mock.ANY, + mock.ANY, + mock.ANY) + fake_context_manager.__enter__.assert_called_once_with() def test_is_valid_boolstr(self): self.assertTrue(utils.is_valid_boolstr('true')) @@ -314,30 +309,31 @@ class GenericUtilsTestCase(base.TestCase): class MkfsTestCase(base.TestCase): - def test_mkfs(self): - self.mox.StubOutWithMock(utils, 'execute') - utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev') - utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev') - utils.execute('mkswap', '/my/swap/block/dev') - self.mox.ReplayAll() - + @mock.patch.object(utils, 'execute') + def test_mkfs(self, execute_mock): utils.mkfs('ext4', '/my/block/dev') utils.mkfs('msdos', '/my/msdos/block/dev') utils.mkfs('swap', '/my/swap/block/dev') - def test_mkfs_with_label(self): - self.mox.StubOutWithMock(utils, 'execute') - utils.execute('mkfs', '-t', 'ext4', '-F', - '-L', 'ext4-vol', '/my/block/dev') - utils.execute('mkfs', '-t', 'msdos', - '-n', 'msdos-vol', '/my/msdos/block/dev') - utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev') - self.mox.ReplayAll() + expected = [mock.call('mkfs', '-t', 'ext4', '-F', '/my/block/dev'), + mock.call('mkfs', '-t', 'msdos', '/my/msdos/block/dev'), + mock.call('mkswap', '/my/swap/block/dev')] + self.assertEqual(expected, execute_mock.call_args_list) + @mock.patch.object(utils, 'execute') + def test_mkfs_with_label(self, execute_mock): utils.mkfs('ext4', '/my/block/dev', 'ext4-vol') utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol') utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol') + expected = [mock.call('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol', + '/my/block/dev'), + mock.call('mkfs', '-t', 'msdos', '-n', 'msdos-vol', + '/my/msdos/block/dev'), + mock.call('mkswap', '-L', 'swap-vol', + '/my/swap/block/dev')] + self.assertEqual(expected, execute_mock.call_args_list) + class IntLikeTestCase(base.TestCase): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/utils.py b/sysinv/sysinv/sysinv/sysinv/tests/utils.py index de4d0700e6..e32abf5c57 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/utils.py @@ -17,23 +17,18 @@ """Common utilities used in testing""" +import fixtures import os import tempfile - -import fixtures -from oslo_config import cfg import testtools -from sysinv.openstack.common.fixture import moxstubout +from oslo_config import cfg class BaseTestCase(testtools.TestCase): def setUp(self, conf=cfg.CONF): super(BaseTestCase, self).setUp() - moxfixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = moxfixture.mox - self.stubs = moxfixture.stubs self.conf = conf self.addCleanup(self.conf.reset) self.useFixture(fixtures.FakeLogger('openstack.common')) @@ -45,8 +40,6 @@ class BaseTestCase(testtools.TestCase): def tearDown(self): super(BaseTestCase, self).tearDown() self.conf.reset() - self.stubs.UnsetAll() - self.stubs.SmartUnsetAll() def create_tempfiles(self, files, ext='.conf'): tempfiles = [] diff --git a/sysinv/sysinv/sysinv/test-requirements.txt b/sysinv/sysinv/sysinv/test-requirements.txt index e8a051f647..030d094866 100644 --- a/sysinv/sysinv/sysinv/test-requirements.txt +++ b/sysinv/sysinv/sysinv/test-requirements.txt @@ -1,12 +1,12 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +hacking>=1.1.0,<=2.0.0 # Apache-2.0 +pycodestyle>=2.0.0 # MIT License coverage>=3.6 discover -fixtures>=0.3.14 -mock<1.1.0,>=1.0 -mox +fixtures>=3.0.0 # Apache-2.0/BSD +mock>=2.0.0 # BSD passlib>=1.7.0 psycopg2-binary python-barbicanclient<3.1.0,>=3.0.1 @@ -14,8 +14,8 @@ python-subunit>=0.0.18 requests-mock>=0.6.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 oslosphinx<2.6.0,>=2.5.0 # Apache-2.0 -oslotest<1.6.0,>=1.5.1 # Apache-2.0 -stestr +oslotest>=3.2.0 # Apache-2.0 +stestr>=1.0.0 # Apache-2.0 testrepository>=0.0.18 testtools!=1.2.0,>=0.9.36 tempest-lib<0.5.0,>=0.4.0 diff --git a/sysinv/sysinv/sysinv/tox.ini b/sysinv/sysinv/sysinv/tox.ini index 65791aaaa1..e4c91e168a 100644 --- a/sysinv/sysinv/sysinv/tox.ini +++ b/sysinv/sysinv/sysinv/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = flake8,py27,py35,pylint +envlist = flake8,py27,py36,pylint minversion = 1.6 # skipsdist = True #,pip-missing-reqs @@ -55,11 +55,6 @@ commands = find . -type f -name "*.pyc" -delete [flake8] -# Note: hacking pulls in flake8 2.5.5 which can not parse an ignore list spanning multiple lines -# E series are pep8 -# E126 continuation line over-indented for hanging indent -# E127 continuation line over-indented for visual indent -# E128 continuation line under-indented for visual indent # H series are hacking # H101 is TODO # H102 is apache license @@ -73,13 +68,40 @@ commands = # H701 Empty localization string # H702 Formatting operation should be outside of localization method call # H703 Multiple positional placeholders -ignore = E126,E127,E128,H101,H102,H104,H105,H306,H401,H403,H404,H405,H701,H702,H703 -exclude = build,dist,tools + +# B series are bugbear +# B006 Do not use mutable data structures for argument defaults. Needs to be FIXED. +# B007 Loop control variable not used within the loop body. +# B009 Do not call getattr with a constant attribute value +# B010 Do not call setattr with a constant attribute value +# B012 return/continue/break inside finally blocks cause exceptions to be silenced +# B014 Redundant exception types +# B301 Python 3 does not include `.iter*` methods on dictionaries. (this should be suppressed on a per line basis) +# B306 `BaseException.message` has been deprecated. Needs to be FIXED + +# W series are warnings +# W503 line break before binary operator +# W504 line break after binary operator +# W605 invalid escape sequence + +# E series are pep8 +# E117 over-indented +# E126 continuation line over-indented for hanging indent +# E127 continuation line over-indented for visual indent +# E128 continuation line under-indented for visual indent +# E402 module level import not at top of file + +ignore = H101,H102,H104,H105,H306,H401,H403,H404,H405,H701,H702,H703, + B006,B007,B009,B010,B012,B014,B301,B306, + W503,W504,W605, + E117,E126,E127,E128,E402 +exclude = build,dist,tools,.eggs max-line-length=120 [testenv:flake8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/test-requirements.txt + flake8-bugbear commands = flake8 {posargs} . \ scripts/manage-partitions \ @@ -92,8 +114,8 @@ commands = stestr run {posargs} stestr slowest -[testenv:py35] -basepython = python3.5 +[testenv:py36] +basepython = python3.6 commands = {[testenv]commands} stestr run {posargs} diff --git a/tsconfig/centos/build_srpm.data b/tsconfig/centos/build_srpm.data index e574177685..6d0c787215 100644 --- a/tsconfig/centos/build_srpm.data +++ b/tsconfig/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="tsconfig" -TIS_PATCH_VER=10 +TIS_PATCH_VER=11 diff --git a/tsconfig/tsconfig/tsconfig/tests/test_basics.py b/tsconfig/tsconfig/tsconfig/tests/test_basics.py index 3c444d0097..cd40c394c6 100644 --- a/tsconfig/tsconfig/tsconfig/tests/test_basics.py +++ b/tsconfig/tsconfig/tsconfig/tests/test_basics.py @@ -60,7 +60,7 @@ sdn_enabled=no region_config=no system_mode=duplex sw_version=19.09 -security_feature="nopti nospectre_v2" +security_feature="nopti nospectre_v2 nospectre_v1" vswitch_type=ovs-dpdk """ @@ -82,7 +82,7 @@ region_2_name=Region2 distributed_cloud_role=CloudRole system_mode=duplex sw_version=19.09 -security_feature="nopti nospectre_v2" +security_feature="nopti nospectre_v2 nospectre_v1" vswitch_type=ovs-dpdk """