Browse Source

Merge remote-tracking branch 'starlingx/master' into f/centos8

Change-Id: I3d182a19798182a62382921b45f84b75bb70f628
Signed-off-by: Saul Wold <sgw@linux.intel.com>
changes/37/705837/1
Saul Wold 11 months ago
parent
commit
8ac6ec70cb
100 changed files with 190 additions and 16742 deletions
  1. +21
    -20
      .zuul.yaml
  2. +1
    -1
      api-ref/source/conf.py
  3. +7
    -6
      api-ref/source/index.rst
  4. +1
    -1
      controllerconfig/centos/build_srpm.data
  5. +1
    -5
      controllerconfig/centos/controllerconfig.spec
  6. +1
    -25
      controllerconfig/controllerconfig/controllerconfig/__init__.py
  7. +0
    -1690
      controllerconfig/controllerconfig/controllerconfig/backup_restore.py
  8. +0
    -712
      controllerconfig/controllerconfig/controllerconfig/clone.py
  9. +0
    -371
      controllerconfig/controllerconfig/controllerconfig/common/configobjects.py
  10. +1
    -63
      controllerconfig/controllerconfig/controllerconfig/common/constants.py
  11. +0
    -102
      controllerconfig/controllerconfig/controllerconfig/common/crypt.py
  12. +0
    -44
      controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py
  13. +1
    -36
      controllerconfig/controllerconfig/controllerconfig/common/exceptions.py
  14. +2
    -3
      controllerconfig/controllerconfig/controllerconfig/common/keystone.py
  15. +0
    -49
      controllerconfig/controllerconfig/controllerconfig/common/log.py
  16. +3
    -17
      controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py
  17. +0
    -1189
      controllerconfig/controllerconfig/controllerconfig/common/validator.py
  18. +0
    -4746
      controllerconfig/controllerconfig/controllerconfig/configassistant.py
  19. +0
    -285
      controllerconfig/controllerconfig/controllerconfig/openstack.py
  20. +0
    -31
      controllerconfig/controllerconfig/controllerconfig/progress.py
  21. +0
    -629
      controllerconfig/controllerconfig/controllerconfig/regionconfig.py
  22. +0
    -579
      controllerconfig/controllerconfig/controllerconfig/sysinv_api.py
  23. +0
    -499
      controllerconfig/controllerconfig/controllerconfig/systemconfig.py
  24. +0
    -0
     
  25. +0
    -78
      controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly
  26. +0
    -78
      controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result
  27. +0
    -77
      controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall
  28. +0
    -81
      controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result
  29. +0
    -1
      controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem
  30. +0
    -62
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph
  31. +0
    -62
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default
  32. +0
    -62
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6
  33. +0
    -76
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes
  34. +0
    -94
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region
  35. +0
    -94
      controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs
  36. +0
    -0
     
  37. +0
    -72
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan
  38. +0
    -82
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result
  39. +0
    -81
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs
  40. +0
    -73
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result
  41. +0
    -77
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security
  42. +0
    -73
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result
  43. +0
    -77
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple
  44. +0
    -78
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips
  45. +0
    -73
      controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result
  46. +0
    -55
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph
  47. +0
    -53
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6
  48. +0
    -70
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes
  49. +0
    -55
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan
  50. +0
    -49
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot
  51. +0
    -51
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security
  52. +0
    -63
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple
  53. +0
    -46
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex
  54. +0
    -24
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt
  55. +0
    -52
      controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr
  56. +0
    -103
      controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py
  57. +0
    -759
      controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py
  58. +0
    -601
      controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py
  59. +3
    -4
      controllerconfig/controllerconfig/controllerconfig/tidy_storage.py
  60. +20
    -21
      controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py
  61. +7
    -6
      controllerconfig/controllerconfig/controllerconfig/upgrades/management.py
  62. +6
    -4
      controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py
  63. +3
    -808
      controllerconfig/controllerconfig/controllerconfig/utils.py
  64. +1
    -9
      controllerconfig/controllerconfig/pylint.rc
  65. +0
    -42
      controllerconfig/controllerconfig/scripts/finish_install_clone.sh
  66. +0
    -321
      controllerconfig/controllerconfig/scripts/install_clone.py
  67. +0
    -30
      controllerconfig/controllerconfig/scripts/keyringstaging
  68. +1
    -4
      controllerconfig/controllerconfig/setup.py
  69. +2
    -10
      controllerconfig/controllerconfig/tox.ini
  70. +2
    -2
      controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py
  71. +2
    -2
      controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py
  72. +0
    -3
      controllerconfig/opensuse/controllerconfig.spec
  73. +1
    -1
      doc/source/conf.py
  74. +15
    -12
      doc/source/index.rst
  75. +1
    -1
      releasenotes/source/conf.py
  76. +3
    -3
      releasenotes/source/index.rst
  77. +1
    -1
      sysinv/cgts-client/centos/build_srpm.data
  78. +3
    -0
      sysinv/cgts-client/centos/cgts-client.spec
  79. +8
    -0
      sysinv/cgts-client/cgts-client/.coveragerc
  80. +32
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/_i18n.py
  81. +1
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/client.py
  82. +16
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/common/base.py
  83. +1
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/common/http.py
  84. +4
    -7
      sysinv/cgts-client/cgts-client/cgtsclient/common/utils.py
  85. +1
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/common/wrapping_formatters.py
  86. +0
    -14
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/__init__.py
  87. +0
    -14
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/__init__.py
  88. +0
    -254
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/config/generator.py
  89. +0
    -50
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/gettextutils.py
  90. +0
    -66
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/importutils.py
  91. +0
    -16
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/__init__.py
  92. +0
    -118
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/cmd.py
  93. +0
    -228
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/filters.py
  94. +0
    -151
      sysinv/cgts-client/cgts-client/cgtsclient/openstack/common/rootwrap/wrapper.py
  95. +1
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/shell.py
  96. +5
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/health.py
  97. +5
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/health_shell.py
  98. +2
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py
  99. +1
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/icpu.py
  100. +3
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/isystem_shell.py

+ 21
- 20
.zuul.yaml View File

@@ -8,28 +8,28 @@
jobs:
- openstack-tox-linters
- sysinv-tox-py27
- sysinv-tox-py35
- sysinv-tox-py36
- sysinv-tox-flake8
- sysinv-tox-pylint
- sysinv-tox-bandit
- controllerconfig-tox-flake8
- controllerconfig-tox-py27
- controllerconfig-tox-pylint
- cgtsclient-tox-py27
- cgtsclient-tox-py36
- cgtsclient-tox-pep8
- cgtsclient-tox-pylint
gate:
jobs:
- openstack-tox-linters
- sysinv-tox-py27
- sysinv-tox-py35
- sysinv-tox-py36
- sysinv-tox-flake8
- sysinv-tox-pylint
- sysinv-tox-bandit
- controllerconfig-tox-flake8
- controllerconfig-tox-py27
- controllerconfig-tox-pylint
- cgtsclient-tox-py27
- cgtsclient-tox-py36
- cgtsclient-tox-pep8
- cgtsclient-tox-pylint

@@ -50,11 +50,11 @@
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini

- job:
name: sysinv-tox-py35
name: sysinv-tox-py36
parent: tox
description: |
Run py35 test for sysinv
nodeset: ubuntu-xenial
Run py36 test for sysinv
nodeset: ubuntu-bionic
required-projects:
- starlingx/fault
- starlingx/update
@@ -62,7 +62,7 @@
files:
- sysinv/sysinv/*
vars:
tox_envlist: py35
tox_envlist: py36
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini

- job:
@@ -112,18 +112,6 @@
tox_envlist: flake8
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini

- job:
name: controllerconfig-tox-py27
parent: tox
description: Run py27 tests for controllerconfig
required-projects:
- starlingx/fault
files:
- controllerconfig/*
vars:
tox_envlist: py27
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini

- job:
name: controllerconfig-tox-pylint
parent: tox
@@ -171,6 +159,19 @@
tox_envlist: py27
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini

- job:
name: cgtsclient-tox-py36
parent: tox
description: |
Run py36 test for cgts-client
nodeset: ubuntu-bionic
files:
- sysinv/cgts-client/*
vars:
tox_envlist: py36
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini


- job:
name: cgtsclient-tox-pep8
parent: tox


+ 1
- 1
api-ref/source/conf.py View File

@@ -51,7 +51,7 @@ master_doc = 'index'

# General information about the project.
repository_name = 'openstack/stx-config'
project = u'stx-config'
project = u'StarlingX Configuration'
bug_project = 'starlingx'
bug_tag = 'stx.config'



+ 7
- 6
api-ref/source/index.rst View File

@@ -1,12 +1,13 @@
========================
stx-config API Reference
========================
===========================
Configuration API Reference
===========================

Use the StarlingX stx-config API for system configuration management.
Use the StarlingX Configuration API for system configuration management.

stx-config API content can be searched using the :ref:`search page <search>`.
Search Configuration API content using the :ref:`search page <search>`.

API Reference
-------------
API reference
-------------

.. toctree::


+ 1
- 1
controllerconfig/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="controllerconfig"
TIS_PATCH_VER=151
TIS_PATCH_VER=152

+ 1
- 5
controllerconfig/centos/controllerconfig.spec View File

@@ -57,10 +57,7 @@ mkdir -p $RPM_BUILD_ROOT/wheels
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/

install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh

install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@@ -74,13 +71,12 @@ install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/

install -d -m 755 %{buildroot}%{local_etc_systemd}
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service

%post
systemctl enable controllerconfig.service

%clean
rm -rf $RPM_BUILD_ROOT
rm -rf $RPM_BUILD_ROOT

%files
%defattr(-,root,root,-)


+ 1
- 25
controllerconfig/controllerconfig/controllerconfig/__init__.py View File

@@ -1,34 +1,10 @@
#
# Copyright (c) 2015-2019 Wind River Systems, Inc.
# Copyright (c) 2015-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

from controllerconfig.common.validator import validate # noqa: F401
from controllerconfig.common.configobjects import Network # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_CONFIG # noqa: F401
from controllerconfig.common.configobjects import REGION_CONFIG # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_NAMES # noqa: F401
from controllerconfig.common.configobjects import HP_NAMES # noqa: F401
from controllerconfig.common.configobjects import SUBCLOUD_CONFIG # noqa: F401
from controllerconfig.common.configobjects import MGMT_TYPE # noqa: F401
from controllerconfig.common.configobjects import INFRA_TYPE # noqa: F401
from controllerconfig.common.configobjects import OAM_TYPE # noqa: F401
from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES # noqa: F401
from controllerconfig.common.configobjects import HOST_XML_ATTRIBUTES # noqa: F401
from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME # noqa: F401
from controllerconfig.common.exceptions import ConfigError # noqa: F401
from controllerconfig.common.exceptions import ConfigFail # noqa: F401
from controllerconfig.common.exceptions import ValidateFail # noqa: F401
from controllerconfig.utils import is_valid_vlan # noqa: F401
from controllerconfig.utils import is_mtu_valid # noqa: F401
from controllerconfig.utils import validate_network_str # noqa: F401
from controllerconfig.utils import validate_address_str # noqa: F401
from controllerconfig.utils import validate_address # noqa: F401
from controllerconfig.utils import is_valid_url # noqa: F401
from controllerconfig.utils import is_valid_domain_or_ip # noqa: F401
from controllerconfig.utils import ip_version_to_string # noqa: F401
from controllerconfig.utils import lag_mode_to_str # noqa: F401
from controllerconfig.utils import validate_openstack_password # noqa: F401
from controllerconfig.utils import validate_nameserver_address_str # noqa: F401
from controllerconfig.utils import extract_openstack_password_rules_from_file # noqa: F401

+ 0
- 1690
controllerconfig/controllerconfig/controllerconfig/backup_restore.py
File diff suppressed because it is too large
View File


+ 0
- 712
controllerconfig/controllerconfig/controllerconfig/clone.py View File

@@ -1,712 +0,0 @@
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

"""
Clone a Configured System and Install the image on another
identical hardware or the same hardware.
"""

from __future__ import print_function
import os
import re
import glob
import time
import shutil
import netaddr
import tempfile
import fileinput
import subprocess

from controllerconfig.common import constants
from sysinv.common import constants as si_const
from controllerconfig import sysinv_api
import tsconfig.tsconfig as tsconfig
from controllerconfig.common import log
from controllerconfig.common.exceptions import CloneFail
from controllerconfig.common.exceptions import BackupFail
from controllerconfig import utils
from controllerconfig import backup_restore

DEBUG = False
LOG = log.get_logger(__name__)
DEVNULL = open(os.devnull, 'w')
CLONE_ARCHIVE_DIR = "clone-archive"
CLONE_ISO_INI = ".cloneiso.ini"
NAME = "name"
INSTALLED = "installed_at"
RESULT = "result"
IN_PROGRESS = "in-progress"
FAIL = "failed"
OK = "ok"


def clone_status():
""" Check status of last install-clone. """
INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI)
INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI)
name = "unknown"
result = "unknown"
installed_at = "unknown time"
for ini_file in [INI_FILE1, INI_FILE2]:
if os.path.exists(ini_file):
with open(ini_file) as f:
s = f.read()
for line in s.split("\n"):
if line.startswith(NAME):
name = line.split("=")[1].strip()
elif line.startswith(RESULT):
result = line.split("=")[1].strip()
elif line.startswith(INSTALLED):
installed_at = line.split("=")[1].strip()
break # one file was found, skip the other file
if result != "unknown":
if result == OK:
print("\nInstallation of cloned image [{}] was successful at {}\n"
.format(name, installed_at))
elif result == FAIL:
print("\nInstallation of cloned image [{}] failed at {}\n"
.format(name, installed_at))
else:
print("\ninstall-clone is in progress.\n")
else:
print("\nCloned image is not installed on this node.\n")


def check_size(archive_dir):
""" Check if there is enough space to create iso. """
overhead_bytes = 1024 ** 3 # extra GB for staging directory
# Size of the cloned iso is directly proportional to the
# installed package repository (note that patches are a part of
# the system archive size below).
# 1G overhead size added (above) will accomodate the temporary
# workspace (updating system archive etc) needed to create the iso.
feed_dir = os.path.join('/www', 'pages', 'feed',
'rel-' + tsconfig.SW_VERSION)
overhead_bytes += backup_restore.backup_std_dir_size(feed_dir)

clone_size = (
overhead_bytes +
backup_restore.backup_etc_size() +
backup_restore.backup_config_size(tsconfig.CONFIG_PATH) +
backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) +
backup_restore.backup_keyring_size(backup_restore.keyring_permdir) +
backup_restore.backup_ldap_size() +
backup_restore.backup_postgres_size() +
backup_restore.backup_std_dir_size(backup_restore.home_permdir) +
backup_restore.backup_std_dir_size(backup_restore.patching_permdir) +
backup_restore.backup_std_dir_size(
backup_restore.patching_repo_permdir) +
backup_restore.backup_std_dir_size(backup_restore.extension_permdir) +
backup_restore.backup_std_dir_size(
backup_restore.patch_vault_permdir) +
backup_restore.backup_armada_manifest_size(
constants.ARMADA_PERMDIR) +
backup_restore.backup_std_dir_size(
constants.HELM_CHARTS_PERMDIR) +
backup_restore.backup_mariadb_size())

archive_dir_free_space = \
utils.filesystem_get_free_space(archive_dir)

if clone_size > archive_dir_free_space:
print("\nArchive directory (%s) does not have enough free "
"space (%s), estimated size to create image is %s." %
(archive_dir,
utils.print_bytes(archive_dir_free_space),
utils.print_bytes(clone_size)))
raise CloneFail("Not enough free space.\n")


def update_bootloader_default(bl_file, host):
""" Update bootloader files for cloned image """
if not os.path.exists(bl_file):
LOG.error("{} does not exist".format(bl_file))
raise CloneFail("{} does not exist".format(os.path.basename(bl_file)))

# Tags should be in sync with common-bsp/files/centos.syslinux.cfg
# and common-bsp/files/grub.cfg
STANDARD_STANDARD = '0'
STANDARD_EXTENDED = 'S0'
AIO_STANDARD = '2'
AIO_EXTENDED = 'S2'
AIO_LL_STANDARD = '4'
AIO_LL_EXTENDED = 'S4'
if "grub.cfg" in bl_file:
STANDARD_STANDARD = 'standard>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
STANDARD_EXTENDED = 'standard>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
AIO_STANDARD = 'aio>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
AIO_EXTENDED = 'aio>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
AIO_LL_STANDARD = 'aio-lowlat>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
SUBMENUITEM_TBOOT = 'tboot'
SUBMENUITEM_SECUREBOOT = 'secureboot'

timeout_line = None
default_line = None
default_label_num = STANDARD_STANDARD
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
default_label_num = AIO_LL_STANDARD
else:
default_label_num = AIO_STANDARD
if (tsconfig.security_profile ==
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED):
default_label_num = STANDARD_EXTENDED
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
default_label_num = AIO_LL_EXTENDED
else:
default_label_num = AIO_EXTENDED
if "grub.cfg" in bl_file:
if host.tboot is not None:
if host.tboot == "true":
default_label_num = default_label_num + '>' + \
SUBMENUITEM_TBOOT
else:
default_label_num = default_label_num + '>' + \
SUBMENUITEM_SECUREBOOT

try:
with open(bl_file) as f:
s = f.read()
for line in s.split("\n"):
if line.startswith("timeout"):
timeout_line = line
elif line.startswith("default"):
default_line = line

if "grub.cfg" in bl_file:
replace = "default='{}'\ntimeout=10".format(default_label_num)
else: # isolinux format
replace = "default {}\ntimeout 10".format(default_label_num)

if default_line and timeout_line:
s = s.replace(default_line, "")
s = s.replace(timeout_line, replace)
elif default_line:
s = s.replace(default_line, replace)
elif timeout_line:
s = s.replace(timeout_line, replace)
else:
s = replace + s

s = re.sub(r'boot_device=[^\s]*',
'boot_device=%s' % host.boot_device,
s)
s = re.sub(r'rootfs_device=[^\s]*',
'rootfs_device=%s' % host.rootfs_device,
s)
s = re.sub(r'console=[^\s]*',
'console=%s' % host.console,
s)

with open(bl_file, "w") as f:
LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]"
.format(bl_file, default_label_num, timeout_line,
default_line, replace.replace('\n', '<newline>')))
f.write(s)

except Exception as e:
LOG.error("update_bootloader_default failed: {}".format(e))
raise CloneFail("Failed to update bootloader files")


def get_online_cpus():
""" Get max cpu id """
with open('/sys/devices/system/cpu/online') as f:
s = f.read()
max_cpu_id = s.split('-')[-1].strip()
LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip()))
return max_cpu_id
return ""


def get_total_mem():
""" Get total memory size """
with open('/proc/meminfo') as f:
s = f.read()
for line in s.split("\n"):
if line.startswith("MemTotal:"):
mem_total = line.split()[1]
LOG.info("MemTotal:[{}]".format(mem_total))
return mem_total
return ""


def get_disk_size(disk):
""" Get the disk size """
disk_size = ""
try:
disk_size = subprocess.check_output(
['lsblk', '--nodeps', '--output', 'SIZE',
'--noheadings', '--bytes', disk])
except Exception as e:
LOG.exception(e)
LOG.error("Failed to get disk size [{}]".format(disk))
raise CloneFail("Failed to get disk size")
return disk_size.strip()


def create_ini_file(clone_archive_dir, iso_name):
"""Create clone ini file."""
interfaces = ""
my_hostname = utils.get_controller_hostname()
macs = sysinv_api.get_mac_addresses(my_hostname)
for intf in macs.keys():
interfaces += intf + " "

disk_paths = ""
for _, _, files in os.walk('/dev/disk/by-path'):
for f in files:
if f.startswith("pci-") and "part" not in f and "usb" not in f:
disk_size = get_disk_size('/dev/disk/by-path/' + f)
disk_paths += f + "#" + disk_size + " "
break # no need to go into sub-dirs.

LOG.info("create ini: {} {}".format(macs, files))
with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f:
f.write('[clone_iso]\n')
f.write('name=' + iso_name + '\n')
f.write('host=' + my_hostname + '\n')
f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z")
+ '\n')
f.write('interfaces=' + interfaces + '\n')
f.write('disks=' + disk_paths + '\n')
f.write('cpus=' + get_online_cpus() + '\n')
f.write('mem=' + get_total_mem() + '\n')
LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths))


def create_iso(iso_name, archive_dir):
""" Create iso image. This is modelled after
the cgcs-root/build-tools/build-iso tool. """
try:
controller_0 = sysinv_api.get_host_data('controller-0')
except Exception as e:
e_log = "Failed to retrieve controller-0 inventory details."
LOG.exception(e_log)
raise CloneFail(e_log)

iso_dir = os.path.join(archive_dir, 'isolinux')
clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)
output = None
tmpdir = None
total_steps = 6
step = 1
print ("\nCreating ISO:")

# Add the correct kick-start file to the image
ks_file = "controller_ks.cfg"
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
if si_const.LOWLATENCY in tsconfig.subfunctions:
ks_file = "smallsystem_lowlatency_ks.cfg"
else:
ks_file = "smallsystem_ks.cfg"

try:
# prepare the iso files
images_dir = os.path.join(iso_dir, 'images')
os.mkdir(images_dir, 0o644)
pxe_dir = os.path.join('/pxeboot',
'rel-' + tsconfig.SW_VERSION)
os.symlink(pxe_dir + '/installer-bzImage',
iso_dir + '/vmlinuz')
os.symlink(pxe_dir + '/installer-initrd',
iso_dir + '/initrd.img')
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1

feed_dir = os.path.join('/www', 'pages', 'feed',
'rel-' + tsconfig.SW_VERSION)
os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')
os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')
os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')
shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)
update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)
shutil.copyfile('/usr/share/syslinux/isolinux.bin',
iso_dir + '/isolinux.bin')
os.symlink('/usr/share/syslinux/vesamenu.c32',
iso_dir + '/vesamenu.c32')
for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):
shutil.copy(os.path.join(feed_dir, filename), iso_dir)
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1

efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')
os.makedirs(efiboot_dir, 0o644)
l_efi_dir = os.path.join('/boot', 'efi', 'EFI')
shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)
shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)
shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)
shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)
update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)
shutil.copytree(l_efi_dir + '/centos/fonts',
efiboot_dir + '/fonts')
# copy EFI boot image and update the grub.cfg file
efi_img = images_dir + '/efiboot.img'
shutil.copy2(pxe_dir + '/efiboot.img', efi_img)
tmpdir = tempfile.mkdtemp(dir=archive_dir)
output = subprocess.check_output(
["mount", "-t", "vfat", "-o", "loop",
efi_img, tmpdir],
stderr=subprocess.STDOUT)
# replace the grub.cfg file with the updated file
efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')
os.remove(efi_grub_f)
shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)
subprocess.call(['umount', tmpdir])
shutil.rmtree(tmpdir, ignore_errors=True)
tmpdir = None

epoch_time = "%.9f" % time.time()
disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"]
with open(iso_dir + '/.discinfo', 'w') as f:
f.write('\n'.join(disc_info))

# copy the latest install_clone executable
shutil.copy2('/usr/bin/install_clone', iso_dir)
subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " +
iso_dir + "/" + ks_file, shell=True)
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1

# copy patches
iso_patches_dir = os.path.join(iso_dir, 'patches')
iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')
iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')
iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')
iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')
iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,
'committed')

os.mkdir(iso_patches_dir, 0o755)
os.mkdir(iso_patch_repo_dir, 0o755)
os.mkdir(iso_patch_pkgs_dir, 0o755)
os.mkdir(iso_patch_metadata_dir, 0o755)
os.mkdir(iso_patch_applied_dir, 0o755)
os.mkdir(iso_patch_committed_dir, 0o755)

repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION
pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION
patch_applied_dir = '/opt/patching/metadata/applied/'
patch_committed_dir = '/opt/patching/metadata/committed/'
subprocess.check_call(['rsync', '-a', repodata,
'%s/' % iso_patch_repo_dir])
if os.path.exists(pkgsdir):
subprocess.check_call(['rsync', '-a', pkgsdir,
'%s/' % iso_patch_pkgs_dir])
if os.path.exists(patch_applied_dir):
subprocess.check_call(['rsync', '-a', patch_applied_dir,
'%s/' % iso_patch_applied_dir])
if os.path.exists(patch_committed_dir):
subprocess.check_call(['rsync', '-a', patch_committed_dir,
'%s/' % iso_patch_committed_dir])
utils.progress(total_steps, step, 'preparing files', 'DONE')
step += 1

create_ini_file(clone_archive_dir, iso_name)

os.chmod(iso_dir + '/isolinux.bin', 0o664)
iso_file = os.path.join(archive_dir, iso_name + ".iso")
output = subprocess.check_output(
["nice", "mkisofs",
"-o", iso_file, "-R", "-D",
"-A", "oe_iso_boot", "-V", "oe_iso_boot",
"-f", "-quiet",
"-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot",
"-boot-load-size", "4", "-boot-info-table",
"-eltorito-alt-boot", "-e", "images/efiboot.img",
"-no-emul-boot",
iso_dir],
stderr=subprocess.STDOUT)
LOG.info("{} created: [{}]".format(iso_file, output))
utils.progress(total_steps, step, 'iso created', 'DONE')
step += 1

output = subprocess.check_output(
["nice", "isohybrid",
"--uefi",
iso_file],
stderr=subprocess.STDOUT)
LOG.debug("isohybrid: {}".format(output))

output = subprocess.check_output(
["nice", "implantisomd5",
iso_file],
stderr=subprocess.STDOUT)
LOG.debug("implantisomd5: {}".format(output))
utils.progress(total_steps, step, 'checksum implanted', 'DONE')
print("Cloned iso image created: {}".format(iso_file))

except Exception as e:
LOG.exception(e)
e_log = "ISO creation ({}) failed".format(iso_name)
if output:
e_log += ' [' + output + ']'
LOG.error(e_log)
raise CloneFail("ISO creation failed.")

finally:
if tmpdir:
subprocess.call(['umount', tmpdir], stderr=DEVNULL)
shutil.rmtree(tmpdir, ignore_errors=True)


def find_and_replace_in_file(target, find, replace):
""" Find and replace a string in a file. """
found = None
try:
for line in fileinput.FileInput(target, inplace=1):
if find in line:
# look for "find" string within word boundaries
fpat = r'\b' + find + r'\b'
line = re.sub(fpat, replace, line)
found = True
print(line, end='')

except Exception as e:
LOG.error("Failed to replace [{}] with [{}] in [{}]: {}"
.format(find, replace, target, str(e)))
found = None
finally:
fileinput.close()
return found


def find_and_replace(target_list, find, replace):
""" Find and replace a string in all files in a directory. """
found = False
file_list = []
for target in target_list:
if os.path.isfile(target):
if find_and_replace_in_file(target, find, replace):
found = True
file_list.append(target)
elif os.path.isdir(target):
try:
output = subprocess.check_output(
['grep', '-rl', find, target])
if output:
for line in output.split('\n'):
if line and find_and_replace_in_file(
line, find, replace):
found = True
file_list.append(line)
except Exception:
pass # nothing found in that directory
if not found:
LOG.error("[{}] not found in backup".format(find))
else:
LOG.info("Replaced [{}] with [{}] in {}".format(
find, replace, file_list))


def remove_from_archive(archive, unwanted):
""" Remove a file from the archive. """
try:
subprocess.check_call(["tar", "--delete",
"--file=" + archive,
unwanted])
except subprocess.CalledProcessError as e:
LOG.error("Delete of {} failed: {}".format(unwanted, e.output))
raise CloneFail("Failed to modify backup archive")


def update_oamip_in_archive(tmpdir):
""" Update OAM IP in system archive file. """
oam_list = sysinv_api.get_oam_ip()
if not oam_list:
raise CloneFail("Failed to get OAM IP")
for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip,
oam_list.oam_subnet, oam_list.oam_floating_ip,
oam_list.oam_c0_ip, oam_list.oam_c1_ip]:
if not oamfind:
continue
ip = netaddr.IPNetwork(oamfind)
find_str = ""
if ip.version == 4:
# if ipv4, use 192.0.x.x as the temporary oam ip
find_str = str(ip.ip)
ipstr_list = find_str.split('.')
ipstr_list[0] = '192'
ipstr_list[1] = '0'
repl_ipstr = ".".join(ipstr_list)
else:
# if ipv6, use 2001:db8:x as the temporary oam ip
find_str = str(ip.ip)
ipstr_list = find_str.split(':')
ipstr_list[0] = '2001'
ipstr_list[1] = 'db8'
repl_ipstr = ":".join(ipstr_list)
if repl_ipstr:
find_and_replace(
[os.path.join(tmpdir, 'etc/hosts'),
os.path.join(tmpdir, 'etc/sysconfig/network-scripts'),
os.path.join(tmpdir, 'etc/nfv/vim/config.ini'),
os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'),
os.path.join(tmpdir, 'etc/heat/heat.conf'),
os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'),
os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'),
os.path.join(tmpdir, 'etc/nova/nova.conf'),
os.path.join(tmpdir, 'config/hosts'),
os.path.join(tmpdir, 'hieradata'),
os.path.join(tmpdir, 'postgres/keystone.sql.data'),
os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
find_str, repl_ipstr)
else:
LOG.error("Failed to modify OAM IP:[{}]"
.format(oamfind))
raise CloneFail("Failed to modify OAM IP")


def update_mac_in_archive(tmpdir):
""" Update MAC addresses in system archive file. """
hostname = utils.get_controller_hostname()
macs = sysinv_api.get_mac_addresses(hostname)
for intf, mac in macs.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
mac, "CLONEISOMAC_{}{}".format(hostname, intf))

if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
hostname = utils.get_mate_controller_hostname()
macs = sysinv_api.get_mac_addresses(hostname)
for intf, mac in macs.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
mac, "CLONEISOMAC_{}{}".format(hostname, intf))


def update_disk_serial_id_in_archive(tmpdir):
""" Update disk serial id in system archive file. """
hostname = utils.get_controller_hostname()
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
for d_dnode, d_sid in disk_sids.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))

if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
hostname = utils.get_mate_controller_hostname()
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
for d_dnode, d_sid in disk_sids.items():
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))


def update_sysuuid_in_archive(tmpdir):
""" Update system uuid in system archive file. """
sysuuid = sysinv_api.get_system_uuid()
find_and_replace(
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
sysuuid, "CLONEISO_SYSTEM_UUID")


def update_backup_archive(backup_name, archive_dir):
""" Update backup archive file to be included in clone-iso """
path_to_archive = os.path.join(archive_dir, backup_name)
tmpdir = tempfile.mkdtemp(dir=archive_dir)
try:
subprocess.check_call(
['gunzip', path_to_archive + '.tgz'],
stdout=DEVNULL, stderr=DEVNULL)
# 70-persistent-net.rules with the correct MACs will be
# generated on the linux boot on the cloned side. Remove
# the stale file from original side.
remove_from_archive(path_to_archive + '.tar',
'etc/udev/rules.d/70-persistent-net.rules')
# Extract only a subset of directories which have files to be
# updated for oam-ip and MAC addresses. After updating the files
# these directories are added back to the archive.
subprocess.check_call(
['tar', '-x',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'etc', 'postgres', 'config',
'hieradata'],
stdout=DEVNULL, stderr=DEVNULL)
update_oamip_in_archive(tmpdir)
update_mac_in_archive(tmpdir)
update_disk_serial_id_in_archive(tmpdir)
update_sysuuid_in_archive(tmpdir)
subprocess.check_call(
['tar', '--update',
'--directory=' + tmpdir,
'-f', path_to_archive + '.tar',
'etc', 'postgres', 'config',
'hieradata'],
stdout=DEVNULL, stderr=DEVNULL)
subprocess.check_call(['gzip', path_to_archive + '.tar'])
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')

except Exception as e:
LOG.error("Update of backup archive {} failed {}".format(
path_to_archive, str(e)))
raise CloneFail("Failed to update backup archive")

finally:
if not DEBUG:
shutil.rmtree(tmpdir, ignore_errors=True)


def validate_controller_state():
""" Cloning allowed now? """
# Check if this Controller is enabled and provisioned
try:
if not sysinv_api.controller_enabled_provisioned(
utils.get_controller_hostname()):
raise CloneFail("Controller is not enabled/provisioned")
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
if not sysinv_api.controller_enabled_provisioned(
utils.get_mate_controller_hostname()):
raise CloneFail("Mate controller is not enabled/provisioned")
except CloneFail:
raise
except Exception:
raise CloneFail("Controller is not enabled/provisioned")

if utils.get_system_type() != si_const.TIS_AIO_BUILD:
raise CloneFail("Cloning supported only on All-in-one systems")

if len(sysinv_api.get_alarms()) > 0:
raise CloneFail("There are active alarms on this system!")


def clone(backup_name, archive_dir):
""" Do Cloning """
validate_controller_state()
LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir))
check_size(archive_dir)

isolinux_dir = os.path.join(archive_dir, 'isolinux')
clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR)
if os.path.exists(isolinux_dir):
LOG.info("deleting old iso_dir %s" % isolinux_dir)
shutil.rmtree(isolinux_dir, ignore_errors=True)
os.makedirs(clone_archive_dir, 0o644)

try:
backup_restore.backup(backup_name, clone_archive_dir, clone=True)
LOG.info("system backup done")
update_backup_archive(backup_name + '_system', clone_archive_dir)
create_iso(backup_name, archive_dir)
except BackupFail as e:
raise CloneFail(e.message)
except CloneFail as e:
raise
finally:
if not DEBUG:
shutil.rmtree(isolinux_dir, ignore_errors=True)

+ 0
- 371
controllerconfig/controllerconfig/controllerconfig/common/configobjects.py View File

@@ -1,371 +0,0 @@
"""
Copyright (c) 2015-2019 Wind River Systems, Inc.

SPDX-License-Identifier: Apache-2.0

"""

from netaddr import IPRange
from controllerconfig.common.exceptions import ConfigFail
from controllerconfig.common.exceptions import ValidateFail
from controllerconfig.utils import is_mtu_valid
from controllerconfig.utils import is_valid_vlan
from controllerconfig.utils import validate_network_str
from controllerconfig.utils import validate_address_str

DEFAULT_CONFIG = 0
REGION_CONFIG = 1
SUBCLOUD_CONFIG = 2

MGMT_TYPE = 0
INFRA_TYPE = 1
OAM_TYPE = 2
CLUSTER_TYPE = 3
NETWORK_PREFIX_NAMES = [
('MGMT', 'INFRA', 'OAM', 'CLUSTER'),
('CLM', 'BLS', 'CAN', 'CLUSTER')
]

HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions',
'mgmt_mac', 'mgmt_ip',
'bm_ip', 'bm_type', 'bm_username',
'bm_password', 'boot_device', 'rootfs_device',
'install_output', 'console', 'vsc_controllers',
'power_on', 'location']

# Network naming types
DEFAULT_NAMES = 0
HP_NAMES = 1

# well-known default domain name
DEFAULT_DOMAIN_NAME = 'Default'


class LogicalInterface(object):
""" Represents configuration for a logical interface.
"""
def __init__(self):
self.name = None
self.mtu = None
self.lag_interface = False
self.lag_mode = None
self.ports = None

def parse_config(self, system_config, logical_interface):
# Ensure logical interface config is present
if not system_config.has_section(logical_interface):
raise ConfigFail("Missing config for logical interface %s." %
logical_interface)
self.name = logical_interface

# Parse/validate the MTU
self.mtu = system_config.getint(logical_interface, 'INTERFACE_MTU')
if not is_mtu_valid(self.mtu):
raise ConfigFail("Invalid MTU value for %s. "
"Valid values: 576 - 9216" % logical_interface)

# Parse the ports
self.ports = [_f for _f in
[x.strip() for x in
system_config.get(logical_interface,
'INTERFACE_PORTS').split(',')]
if _f]

# Parse/validate the LAG config
lag_interface = system_config.get(logical_interface,
'LAG_INTERFACE')
if lag_interface.lower() == 'y':
self.lag_interface = True
if len(self.ports) != 2:
raise ConfigFail(
"Invalid number of ports (%d) supplied for LAG "
"interface %s" % (len(self.ports), logical_interface))
self.lag_mode = system_config.getint(logical_interface, 'LAG_MODE')
if self.lag_mode < 1 or self.lag_mode > 6:
raise ConfigFail(
"Invalid LAG_MODE value of %d for %s. Valid values: 1-6" %
(self.lag_mode, logical_interface))
elif lag_interface.lower() == 'n':
if len(self.ports) > 1:
raise ConfigFail(
"More than one interface supplied for non-LAG "
"interface %s" % logical_interface)
if len(self.ports) == 0:
raise ConfigFail(
"No interfaces supplied for non-LAG "
"interface %s" % logical_interface)
else:
raise ConfigFail(
"Invalid LAG_INTERFACE value of %s for %s. Valid values: "
"Y or N" % (lag_interface, logical_interface))


class Network(object):
""" Represents configuration for a network.
"""
def __init__(self):
self.vlan = None
self.cidr = None
self.multicast_cidr = None
self.start_address = None
self.end_address = None
self.start_end_in_config = False
self.floating_address = None
self.address_0 = None
self.address_1 = None
self.dynamic_allocation = False
self.gateway_address = None
self.logical_interface = None

def parse_config(self, system_config, config_type, network_type,
min_addresses=0, multicast_addresses=0, optional=False,
naming_type=DEFAULT_NAMES,
logical_interface_required=True):
network_prefix = NETWORK_PREFIX_NAMES[naming_type][network_type]
network_name = network_prefix + '_NETWORK'

if naming_type == HP_NAMES:
attr_prefix = network_prefix + '_'
else:
attr_prefix = ''

# Ensure network config is present
if not system_config.has_section(network_name):
if not optional:
raise ConfigFail("Missing config for network %s." %
network_name)
else:
# Optional interface - just return
return

# Parse/validate the VLAN
if system_config.has_option(network_name, attr_prefix + 'VLAN'):
self.vlan = system_config.getint(network_name,
attr_prefix + 'VLAN')
if self.vlan:
if not is_valid_vlan(self.vlan):
raise ConfigFail(
"Invalid %s value of %d for %s. Valid values: 1-4094" %
(attr_prefix + 'VLAN', self.vlan, network_name))

# Parse/validate the cidr
cidr_str = system_config.get(network_name, attr_prefix + 'CIDR')
try:
self.cidr = validate_network_str(
cidr_str, min_addresses)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'CIDR', cidr_str, network_name, e))

# Parse/validate the multicast subnet
if 0 < multicast_addresses and \
system_config.has_option(network_name,
attr_prefix + 'MULTICAST_CIDR'):
multicast_cidr_str = system_config.get(network_name, attr_prefix +
'MULTICAST_CIDR')
try:
self.multicast_cidr = validate_network_str(
multicast_cidr_str, multicast_addresses, multicast=True)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
network_name, e))

if self.cidr.version != self.multicast_cidr.version:
raise ConfigFail(
"Invalid %s value of %s for %s. Multicast "
"subnet and network IP families must be the same." %
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
network_name))

# Parse/validate the hardwired controller addresses
floating_address_str = None
address_0_str = None
address_1_str = None

if min_addresses == 1:
if (system_config.has_option(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_START_ADDRESS') or
system_config.has_option(
network_name, attr_prefix + 'IP_END_ADDRESS')):
raise ConfigFail(
"Only one IP address is required for OAM "
"network, use 'IP_ADDRESS' to specify the OAM IP "
"address")
floating_address_str = system_config.get(
network_name, attr_prefix + 'IP_ADDRESS')
try:
self.floating_address = validate_address_str(
floating_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_ADDRESS',
floating_address_str, network_name, e))
self.address_0 = self.floating_address
self.address_1 = self.floating_address
else:
if system_config.has_option(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS'):
floating_address_str = system_config.get(
network_name, attr_prefix + 'IP_FLOATING_ADDRESS')
try:
self.floating_address = validate_address_str(
floating_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_FLOATING_ADDRESS',
floating_address_str, network_name, e))

if system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS'):
address_0_str = system_config.get(
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS')
try:
self.address_0 = validate_address_str(
address_0_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_UNIT_0_ADDRESS',
address_0_str, network_name, e))

if system_config.has_option(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS'):
address_1_str = system_config.get(
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS')
try:
self.address_1 = validate_address_str(
address_1_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_UNIT_1_ADDRESS',
address_1_str, network_name, e))

# Parse/validate the start/end addresses
start_address_str = None
end_address_str = None
if system_config.has_option(
network_name, attr_prefix + 'IP_START_ADDRESS'):
start_address_str = system_config.get(
network_name, attr_prefix + 'IP_START_ADDRESS')
try:
self.start_address = validate_address_str(
start_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'IP_START_ADDRESS',
start_address_str, network_name, e))

if system_config.has_option(
network_name, attr_prefix + 'IP_END_ADDRESS'):
end_address_str = system_config.get(
network_name, attr_prefix + 'IP_END_ADDRESS')
try:
self.end_address = validate_address_str(
end_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s " %
(attr_prefix + 'IP_END_ADDRESS',
end_address_str, network_name, e))

if start_address_str or end_address_str:
if not end_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_END_ADDRESS',
network_name))
if not start_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_START_ADDRESS',
network_name))
if not self.start_address < self.end_address:
raise ConfigFail(
"Start address %s not less than end address %s for %s."
% (str(self.start_address), str(self.end_address),
network_name))
if not IPRange(start_address_str, end_address_str).size >= \
min_addresses:
raise ConfigFail("Address range for %s must contain at "
"least %d addresses." %
(network_name, min_addresses))
self.start_end_in_config = True

if floating_address_str or address_0_str or address_1_str:
if not floating_address_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_FLOATING_ADDRESS',
network_name))
if not address_0_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_UNIT_0_ADDRESS',
network_name))
if not address_1_str:
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
(attr_prefix + 'IP_UNIT_1_ADDRESS',
network_name))

if start_address_str and floating_address_str:
raise ConfigFail("Overspecified network: Can only set %s "
"and %s OR %s, %s, and %s for "
"%s_NETWORK" %
(attr_prefix + 'IP_START_ADDRESS',
attr_prefix + 'IP_END_ADDRESS',
attr_prefix + 'IP_FLOATING_ADDRESS',
attr_prefix + 'IP_UNIT_0_ADDRESS',
attr_prefix + 'IP_UNIT_1_ADDRESS',
network_name))

if config_type == DEFAULT_CONFIG:
if not self.start_address:
self.start_address = self.cidr[2]
if not self.end_address:
self.end_address = self.cidr[-2]

# Parse/validate the dynamic IP address allocation
if system_config.has_option(network_name,
'DYNAMIC_ALLOCATION'):
dynamic_allocation = system_config.get(network_name,
'DYNAMIC_ALLOCATION')
if dynamic_allocation.lower() == 'y':
self.dynamic_allocation = True
elif dynamic_allocation.lower() == 'n':
self.dynamic_allocation = False
else:
raise ConfigFail(
"Invalid DYNAMIC_ALLOCATION value of %s for %s. "
"Valid values: Y or N" %
(dynamic_allocation, network_name))

# Parse/validate the gateway (optional)
if system_config.has_option(network_name, attr_prefix + 'GATEWAY'):
gateway_address_str = system_config.get(
network_name, attr_prefix + 'GATEWAY')
try:
self.gateway_address = validate_address_str(
gateway_address_str, self.cidr)
except ValidateFail as e:
raise ConfigFail(
"Invalid %s value of %s for %s.\nReason: %s" %
(attr_prefix + 'GATEWAY',
gateway_address_str, network_name, e))

# Parse/validate the logical interface
if logical_interface_required or system_config.has_option(
network_name, attr_prefix + 'LOGICAL_INTERFACE'):
logical_interface_name = system_config.get(
network_name, attr_prefix + 'LOGICAL_INTERFACE')
self.logical_interface = LogicalInterface()
self.logical_interface.parse_config(system_config,
logical_interface_name)

+ 1
- 63
controllerconfig/controllerconfig/controllerconfig/common/constants.py View File

@@ -1,10 +1,9 @@
#
# Copyright (c) 2016-2019 Wind River Systems, Inc.
# Copyright (c) 2016-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

from sysinv.common import constants as sysinv_constants
from tsconfig import tsconfig


@@ -15,70 +14,9 @@ CONFIG_PERMDIR = tsconfig.CONFIG_PATH
HIERADATA_WORKDIR = '/tmp/hieradata'
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'

ARMADA_PERMDIR = tsconfig.ARMADA_PATH
HELM_CHARTS_PERMDIR = tsconfig.PLATFORM_PATH + '/helm_charts'
HELM_OVERRIDES_PERMDIR = tsconfig.HELM_OVERRIDES_PATH

KEYRING_WORKDIR = '/tmp/python_keyring'
KEYRING_PERMDIR = tsconfig.KEYRING_PATH

INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
CONFIG_FAIL_FILE = '/var/run/.config_fail'
COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem"
FIREWALL_RULES_FILE = '/etc/platform/iptables.rules'
OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf'
INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed'

BACKUPS_PATH = '/opt/backups'

INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log"

LINK_MTU_DEFAULT = "1500"

CINDER_LVM_THIN = "thin"
CINDER_LVM_THICK = "thick"

DEFAULT_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_DATABASE_STOR_SIZE
DEFAULT_SMALL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
DEFAULT_SMALL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE
DEFAULT_EXTENSION_STOR_SIZE = \
sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE
DEFAULT_PLATFORM_STOR_SIZE = \
sysinv_constants.DEFAULT_PLATFORM_STOR_SIZE

SYSTEM_CONFIG_TIMEOUT = 420
SERVICE_ENABLE_TIMEOUT = 180
MINIMUM_ROOT_DISK_SIZE = 500
MAXIMUM_CGCS_LV_SIZE = 500
LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30
SYSADMIN_MAX_PASSWORD_AGE = 45 # 45 days

LAG_MODE_ACTIVE_BACKUP = "active-backup"
LAG_MODE_BALANCE_XOR = "balance-xor"
LAG_MODE_8023AD = "802.3ad"

LAG_TXHASH_LAYER2 = "layer2"

LAG_MIIMON_FREQUENCY = 100

LOOPBACK_IFNAME = 'lo'

DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28'
DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124'

DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28'

DEFAULT_REGION_NAME = "RegionOne"
DEFAULT_SERVICE_PROJECT_NAME = "services"

SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \
"console. Continuing with this terminal may cause " \
"loss of connectivity and configuration failure."
SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console."

+ 0
- 102
controllerconfig/controllerconfig/controllerconfig/common/crypt.py View File

@@ -1,102 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

"""
Routines for URL-safe encrypting/decrypting

Cloned from git/glance/common
"""

import base64
import os
import random

from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import modes
from oslo_utils import encodeutils
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range


def urlsafe_encrypt(key, plaintext, blocksize=16):
"""Encrypts plaintext.

Resulting ciphertext will contain URL-safe characters.
If plaintext is Unicode, encode it to UTF-8 before encryption.

:param key: AES secret key
:param plaintext: Input text to be encrypted
:param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
:returns: Resulting ciphertext
"""

def pad(text):
"""Pads text to be encrypted"""
pad_length = (blocksize - len(text) % blocksize)
# NOTE(rosmaita): I know this looks stupid, but we can't just
# use os.urandom() to get the bytes because we use char(0) as
# a delimiter
pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF))
for i in range(pad_length - 1))
# We use chr(0) as a delimiter between text and padding
return text + b'\0' + pad

plaintext = encodeutils.to_utf8(plaintext)
key = encodeutils.to_utf8(key)
# random initial 16 bytes for CBC
init_vector = os.urandom(16)
backend = default_backend()
cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector),
backend=backend)
encryptor = cypher.encryptor()
padded = encryptor.update(
pad(six.binary_type(plaintext))) + encryptor.finalize()
encoded = base64.urlsafe_b64encode(init_vector + padded)
if six.PY3:
encoded = encoded.decode('ascii')
return encoded


def urlsafe_decrypt(key, ciphertext):
"""Decrypts URL-safe base64 encoded ciphertext.

On Python 3, the result is decoded from UTF-8.

:param key: AES secret key
:param ciphertext: The encrypted text to decrypt

:returns: Resulting plaintext
"""
# Cast from unicode
ciphertext = encodeutils.to_utf8(ciphertext)
key = encodeutils.to_utf8(key)
ciphertext = base64.urlsafe_b64decode(ciphertext)
backend = default_backend()
cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]),
backend=backend)
decryptor = cypher.decryptor()
padded = decryptor.update(ciphertext[16:]) + decryptor.finalize()
text = padded[:padded.rfind(b'\0')]
if six.PY3:
text = text.decode('utf-8')
return text

+ 0
- 44
controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py View File

@@ -1,44 +0,0 @@
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

"""
DC Manager Interactions
"""

from controllerconfig.common import log

from Crypto.Hash import MD5
from controllerconfig.common import crypt

import json


LOG = log.get_logger(__name__)


class UserList(object):
"""
User List
"""
def __init__(self, user_data, hash_string):
# Decrypt the data using input hash_string to generate
# the key
h = MD5.new()
h.update(hash_string)
encryption_key = h.hexdigest()
user_data_decrypted = crypt.urlsafe_decrypt(encryption_key,
user_data)

self._data = json.loads(user_data_decrypted)

def get_password(self, name):
"""
Search the users for the password
"""
for user in self._data:
if user['name'] == name:
return user['password']
return None

+ 1
- 36
controllerconfig/controllerconfig/controllerconfig/common/exceptions.py View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2014-2019 Wind River Systems, Inc.
# Copyright (c) 2014-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -20,56 +20,21 @@ class ConfigError(Exception):
return self.message or ""


class ConfigFail(ConfigError):
"""General configuration error."""
pass


class ValidateFail(ConfigError):
"""Validation of data failed."""
pass


class BackupFail(ConfigError):
"""Backup error."""
pass


class UpgradeFail(ConfigError):
"""Upgrade error."""
pass


class BackupWarn(ConfigError):
"""Backup warning."""
pass


class RestoreFail(ConfigError):
"""Backup error."""
pass


class KeystoneFail(ConfigError):
"""Keystone error."""
pass


class SysInvFail(ConfigError):
"""System Inventory error."""
pass


class UserQuit(ConfigError):
"""User initiated quit operation."""
pass


class CloneFail(ConfigError):
"""Clone error."""
pass


class TidyStorageFail(ConfigError):
"""Tidy storage error."""
pass

+ 2
- 3
controllerconfig/controllerconfig/controllerconfig/common/keystone.py View File

@@ -12,10 +12,9 @@ import datetime
import iso8601

from controllerconfig.common.exceptions import KeystoneFail
from controllerconfig.common import log
from oslo_log import log


LOG = log.get_logger(__name__)
LOG = log.getLogger(__name__)


class Token(object):


+ 0
- 49
controllerconfig/controllerconfig/controllerconfig/common/log.py View File

@@ -1,49 +0,0 @@
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

"""
Logging
"""

import logging
import logging.handlers

_loggers = {}


def get_logger(name):
""" Get a logger or create one """

if name not in _loggers:
_loggers[name] = logging.getLogger(name)

return _loggers[name]


def setup_logger(logger):
""" Setup a logger """

# Send logs to /var/log/platform.log
syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1

formatter = logging.Formatter("configassistant[%(process)d] " +
"%(pathname)s:%(lineno)s " +
"%(levelname)8s [%(name)s] %(message)s")

handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=syslog_facility)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)

logger.addHandler(handler)
logger.setLevel(logging.INFO)


def configure():
""" Setup logging """

for logger in _loggers:
setup_logger(_loggers[logger])

+ 3
- 17
controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py View File

@@ -1,5 +1,5 @@
"""
Copyright (c) 2015-2017 Wind River Systems, Inc.
Copyright (c) 2015-2020 Wind River Systems, Inc.

SPDX-License-Identifier: Apache-2.0

@@ -7,16 +7,15 @@ SPDX-License-Identifier: Apache-2.0
import json

from controllerconfig.common.exceptions import KeystoneFail
from controllerconfig.common import dcmanager
from controllerconfig.common import keystone
from controllerconfig.common import log
from six.moves import http_client as httplib
from six.moves.urllib import request as urlrequest
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from oslo_log import log


LOG = log.get_logger(__name__)
LOG = log.getLogger(__name__)


def rest_api_request(token, method, api_cmd, api_cmd_headers=None,
@@ -324,16 +323,3 @@ def delete_project(token, api_url, id):
api_cmd = api_url + "/projects/" + id
response = rest_api_request(token, "DELETE", api_cmd,)
return keystone.Project(response)


def get_subcloud_config(token, api_url, subcloud_name,
hash_string):
"""
Ask DC Manager for our subcloud configuration
"""
api_cmd = api_url + "/subclouds/" + subcloud_name + "/config"
response = rest_api_request(token, "GET", api_cmd)
config = dict()
config['users'] = dcmanager.UserList(response['users'], hash_string)

return config

+ 0
- 1189
controllerconfig/controllerconfig/controllerconfig/common/validator.py
File diff suppressed because it is too large
View File


+ 0
- 4746
controllerconfig/controllerconfig/controllerconfig/configassistant.py
File diff suppressed because it is too large
View File


+ 0
- 285
controllerconfig/controllerconfig/controllerconfig/openstack.py View File

@@ -1,285 +0,0 @@
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

"""
OpenStack
"""

import os
import time
import subprocess

from controllerconfig.common import log
from controllerconfig.common.exceptions import SysInvFail
from controllerconfig.common.rest_api_utils import get_token
from controllerconfig import sysinv_api as sysinv


LOG = log.get_logger(__name__)

KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry


class OpenStack(object):

def __init__(self):
self.admin_token = None
self.conf = {}
self._sysinv = None

source_command = 'source /etc/platform/openrc && env'

with open(os.devnull, "w") as fnull:
proc = subprocess.Popen(
['bash', '-c', source_command],
stdout=subprocess.PIPE, stderr=fnull)

for line in proc.stdout:
key, _, value = line.partition("=")
if key == 'OS_USERNAME':
self.conf['admin_user'] = value.strip()
elif key == 'OS_PASSWORD':
self.conf['admin_pwd'] = value.strip()
elif key == 'OS_PROJECT_NAME':
self.conf['admin_tenant'] = value.strip()
elif key == 'OS_AUTH_URL':
self.conf['auth_url'] = value.strip()
elif key == 'OS_REGION_NAME':
self.conf['region_name'] = value.strip()
elif key == 'OS_USER_DOMAIN_NAME':
self.conf['user_domain'] = value.strip()
elif key == 'OS_PROJECT_DOMAIN_NAME':
self.conf['project_domain'] = value.strip()

proc.communicate()

def __enter__(self):
if not self._connect():
raise Exception('Failed to connect')
return self

def __exit__(self, exc_type, exc_val, exc_tb):
self._disconnect()

def __del__(self):
self._disconnect()

def _connect(self):
""" Connect to an OpenStack instance """

if self.admin_token is not None:
self._disconnect()

# Try to obtain an admin token from keystone
for _ in range(KEYSTONE_AUTH_SERVER_RETRY_CNT):
self.admin_token = get_token(self.conf['auth_url'],
self.conf['admin_tenant'],
self.conf['admin_user'],
self.conf['admin_pwd'],
self.conf['user_domain'],
self.conf['project_domain'])
if self.admin_token:
break
time.sleep(KEYSTONE_AUTH_SERVER_WAIT)

return self.admin_token is not None

def _disconnect(self):
""" Disconnect from an OpenStack instance """
self.admin_token = None

def lock_hosts(self, exempt_hostnames=None, progress_callback=None,
timeout=60):
""" Lock hosts of an OpenStack instance except for host names
in the exempt list
"""
failed_hostnames = []

if exempt_hostnames is None:
exempt_hostnames = []

hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
if not hosts:
if progress_callback is not None:
progress_callback(0, 0, None, None)
return

wait = False
host_i = 0

for host in hosts:
if host.name in exempt_hostnames:
continue

if host.is_unlocked():
if not host.force_lock(self.admin_token,
self.conf['region_name']):
failed_hostnames.append(host.name)
LOG.warning("Could not lock %s" % host.name)
else:
wait = True
else:
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('locking %s' % host.name),
'DONE')

if wait and timeout > 5:
time.sleep(5)
timeout -= 5

for _ in range(0, timeout):
wait = False

for host in hosts:
if host.name in exempt_hostnames:
continue

if (host.name not in failed_hostnames) and host.is_unlocked():
host.refresh_data(self.admin_token,
self.conf['region_name'])

if host.is_locked():
LOG.info("Locked %s" % host.name)
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('locking %s' % host.name),
'DONE')
else:
LOG.info("Waiting for lock of %s" % host.name)
wait = True

if not wait:
break

time.sleep(1)
else:
failed_hostnames.append(host.name)
LOG.warning("Wait failed for lock of %s" % host.name)

return failed_hostnames

def power_off_hosts(self, exempt_hostnames=None, progress_callback=None,
timeout=60):
""" Power-off hosts of an OpenStack instance except for host names
in the exempt list
"""

if exempt_hostnames is None:
exempt_hostnames = []

hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])

hosts[:] = [host for host in hosts if host.support_power_off()]
if not hosts:
if progress_callback is not None:
progress_callback(0, 0, None, None)
return

wait = False
host_i = 0

for host in hosts:
if host.name in exempt_hostnames:
continue

if host.is_powered_on():
if not host.power_off(self.admin_token,
self.conf['region_name']):
raise SysInvFail("Could not power-off %s" % host.name)
wait = True
else:
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('powering off %s' % host.name),
'DONE')

if wait and timeout > 5:
time.sleep(5)
timeout -= 5

for _ in range(0, timeout):
wait = False

for host in hosts:
if host.name in exempt_hostnames:
continue

if host.is_powered_on():
host.refresh_data(self.admin_token,
self.conf['region_name'])

if host.is_powered_off():
LOG.info("Powered-Off %s" % host.name)
host_i += 1
if progress_callback is not None:
progress_callback(len(hosts), host_i,
('powering off %s' % host.name),
'DONE')
else:
LOG.info("Waiting for power-off of %s" % host.name)
wait = True

if not wait:
break

time.sleep(1)
else:
failed_hosts = [h.name for h in hosts if h.is_powered_on()]
msg = "Wait timeout for power-off of %s" % failed_hosts
LOG.info(msg)
raise SysInvFail(msg)

def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300,
interval_step=10):
"""Wait for hosts to be identified as disabled.
Run check every interval_step seconds
"""
</