StarlingX open source release updates
Signed-off-by: Dean Troyer <dtroyer@gmail.com>
This commit is contained in:
6
controllerconfig/.gitignore
vendored
Normal file
6
controllerconfig/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
!.distro
|
||||
.distro/centos7/rpmbuild/RPMS
|
||||
.distro/centos7/rpmbuild/SRPMS
|
||||
.distro/centos7/rpmbuild/BUILD
|
||||
.distro/centos7/rpmbuild/BUILDROOT
|
||||
.distro/centos7/rpmbuild/SOURCES/controllerconfig*tar.gz
|
||||
13
controllerconfig/PKG-INFO
Normal file
13
controllerconfig/PKG-INFO
Normal file
@@ -0,0 +1,13 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: controllerconfig
|
||||
Version: 1.0
|
||||
Summary: Controller Node Configuration
|
||||
Home-page:
|
||||
Author: Windriver
|
||||
Author-email: info@windriver.com
|
||||
License: Apache-2.0
|
||||
|
||||
Description: Controller node configuration
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
||||
2
controllerconfig/centos/build_srpm.data
Executable file
2
controllerconfig/centos/build_srpm.data
Executable file
@@ -0,0 +1,2 @@
|
||||
SRC_DIR="controllerconfig"
|
||||
TIS_PATCH_VER=140
|
||||
86
controllerconfig/centos/controllerconfig.spec
Normal file
86
controllerconfig/centos/controllerconfig.spec
Normal file
@@ -0,0 +1,86 @@
|
||||
Summary: Controller node configuration
|
||||
Name: controllerconfig
|
||||
Version: 1.0
|
||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
||||
License: Apache-2.0
|
||||
Group: base
|
||||
Packager: Wind River <info@windriver.com>
|
||||
URL: unknown
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildRequires: python-setuptools
|
||||
Requires: systemd
|
||||
Requires: python-netaddr
|
||||
Requires: python-keyring
|
||||
Requires: python-six
|
||||
Requires: python-iso8601
|
||||
Requires: psmisc
|
||||
Requires: lshell
|
||||
Requires: python-pyudev
|
||||
Requires: python-netifaces
|
||||
|
||||
%description
|
||||
Controller node configuration
|
||||
|
||||
%define local_dir /usr/
|
||||
%define local_bindir %{local_dir}/bin/
|
||||
%define local_etc_initd /etc/init.d/
|
||||
%define local_goenabledd /etc/goenabled.d/
|
||||
%define local_etc_upgraded /etc/upgrade.d/
|
||||
%define local_etc_systemd /etc/systemd/system/
|
||||
%define pythonroot /usr/lib64/python2.7/site-packages
|
||||
%define debug_package %{nil}
|
||||
|
||||
%prep
|
||||
%setup
|
||||
|
||||
%build
|
||||
%{__python} setup.py build
|
||||
|
||||
# TODO: NO_GLOBAL_PY_DELETE (see python-byte-compile.bbclass), put in macro/script
|
||||
%install
|
||||
%{__python} setup.py install --root=$RPM_BUILD_ROOT \
|
||||
--install-lib=%{pythonroot} \
|
||||
--prefix=/usr \
|
||||
--install-data=/usr/share \
|
||||
--single-version-externally-managed
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_bindir}
|
||||
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
|
||||
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
||||
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
|
||||
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_goenabledd}
|
||||
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_etc_initd}
|
||||
install -p -D -m 755 scripts/controller_config %{buildroot}%{local_etc_initd}/controller_config
|
||||
|
||||
# Install Upgrade scripts
|
||||
install -d -m 755 %{buildroot}%{local_etc_upgraded}
|
||||
install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_etc_systemd}
|
||||
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
|
||||
#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service
|
||||
|
||||
%post
|
||||
systemctl enable controllerconfig.service
|
||||
|
||||
%clean
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
|
||||
%files
|
||||
%defattr(-,root,root,-)
|
||||
%doc LICENSE
|
||||
%{local_bindir}/*
|
||||
%dir %{pythonroot}/%{name}
|
||||
%{pythonroot}/%{name}/*
|
||||
%dir %{pythonroot}/%{name}-%{version}.0-py2.7.egg-info
|
||||
%{pythonroot}/%{name}-%{version}.0-py2.7.egg-info/*
|
||||
%{local_goenabledd}/*
|
||||
%{local_etc_initd}/*
|
||||
%dir %{local_etc_upgraded}
|
||||
%{local_etc_upgraded}/*
|
||||
%{local_etc_systemd}/*
|
||||
7
controllerconfig/controllerconfig/.coveragerc
Normal file
7
controllerconfig/controllerconfig/.coveragerc
Normal file
@@ -0,0 +1,7 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = controllerconfig
|
||||
omit = controllerconfig/tests/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
||||
5
controllerconfig/controllerconfig/.gitignore
vendored
Normal file
5
controllerconfig/controllerconfig/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
*.pyc
|
||||
.coverage
|
||||
.testrepository
|
||||
cover
|
||||
|
||||
8
controllerconfig/controllerconfig/.testr.conf
Normal file
8
controllerconfig/controllerconfig/.testr.conf
Normal file
@@ -0,0 +1,8 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=1 \
|
||||
OS_STDERR_CAPTURE=1 \
|
||||
OS_TEST_TIMEOUT=60 \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./controllerconfig/tests} $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
|
||||
202
controllerconfig/controllerconfig/LICENSE
Normal file
202
controllerconfig/controllerconfig/LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -0,0 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2015 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
1895
controllerconfig/controllerconfig/controllerconfig/backup_restore.py
Normal file
1895
controllerconfig/controllerconfig/controllerconfig/backup_restore.py
Normal file
File diff suppressed because it is too large
Load Diff
717
controllerconfig/controllerconfig/controllerconfig/clone.py
Normal file
717
controllerconfig/controllerconfig/controllerconfig/clone.py
Normal file
@@ -0,0 +1,717 @@
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Clone a Configured System and Install the image on another
|
||||
identical hardware or the same hardware.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import glob
|
||||
import time
|
||||
import shutil
|
||||
import netaddr
|
||||
import tempfile
|
||||
import fileinput
|
||||
import subprocess
|
||||
|
||||
from common import constants
|
||||
from sysinv.common import constants as si_const
|
||||
import sysinv_api
|
||||
import tsconfig.tsconfig as tsconfig
|
||||
from common import log
|
||||
from common.exceptions import CloneFail, BackupFail
|
||||
import utils
|
||||
import backup_restore
|
||||
|
||||
DEBUG = False
|
||||
LOG = log.get_logger(__name__)
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
CLONE_ARCHIVE_DIR = "clone-archive"
|
||||
CLONE_ISO_INI = ".cloneiso.ini"
|
||||
NAME = "name"
|
||||
INSTALLED = "installed_at"
|
||||
RESULT = "result"
|
||||
IN_PROGRESS = "in-progress"
|
||||
FAIL = "failed"
|
||||
OK = "ok"
|
||||
|
||||
|
||||
def clone_status():
|
||||
""" Check status of last install-clone. """
|
||||
INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI)
|
||||
INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI)
|
||||
name = "unknown"
|
||||
result = "unknown"
|
||||
installed_at = "unknown time"
|
||||
for ini_file in [INI_FILE1, INI_FILE2]:
|
||||
if os.path.exists(ini_file):
|
||||
with open(ini_file) as f:
|
||||
s = f.read()
|
||||
for line in s.split("\n"):
|
||||
if line.startswith(NAME):
|
||||
name = line.split("=")[1].strip()
|
||||
elif line.startswith(RESULT):
|
||||
result = line.split("=")[1].strip()
|
||||
elif line.startswith(INSTALLED):
|
||||
installed_at = line.split("=")[1].strip()
|
||||
break # one file was found, skip the other file
|
||||
if result != "unknown":
|
||||
if result == OK:
|
||||
print("\nInstallation of cloned image [{}] was successful at {}\n"
|
||||
.format(name, installed_at))
|
||||
elif result == FAIL:
|
||||
print("\nInstallation of cloned image [{}] failed at {}\n"
|
||||
.format(name, installed_at))
|
||||
else:
|
||||
print("\ninstall-clone is in progress.\n")
|
||||
else:
|
||||
print("\nCloned image is not installed on this node.\n")
|
||||
|
||||
|
||||
def check_size(archive_dir):
|
||||
""" Check if there is enough space to create iso. """
|
||||
overhead_bytes = 1024 ** 3 # extra GB for staging directory
|
||||
# Size of the cloned iso is directly proportional to the
|
||||
# installed package repository (note that patches are a part of
|
||||
# the system archive size below).
|
||||
# 1G overhead size added (above) will accomodate the temporary
|
||||
# workspace (updating system archive etc) needed to create the iso.
|
||||
feed_dir = os.path.join('/www', 'pages', 'feed',
|
||||
'rel-' + tsconfig.SW_VERSION)
|
||||
overhead_bytes += backup_restore.backup_std_dir_size(feed_dir)
|
||||
|
||||
cinder_config = False
|
||||
backend_services = sysinv_api.get_storage_backend_services()
|
||||
for services in backend_services.values():
|
||||
if (services.find(si_const.SB_SVC_CINDER) != -1):
|
||||
cinder_config = True
|
||||
break
|
||||
|
||||
clone_size = (
|
||||
overhead_bytes +
|
||||
backup_restore.backup_etc_size() +
|
||||
backup_restore.backup_config_size(tsconfig.CONFIG_PATH) +
|
||||
backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) +
|
||||
backup_restore.backup_keyring_size(backup_restore.keyring_permdir) +
|
||||
backup_restore.backup_ldap_size() +
|
||||
backup_restore.backup_postgres_size(cinder_config) +
|
||||
backup_restore.backup_ceilometer_size(
|
||||
backup_restore.ceilometer_permdir) +
|
||||
backup_restore.backup_std_dir_size(backup_restore.glance_permdir) +
|
||||
backup_restore.backup_std_dir_size(backup_restore.home_permdir) +
|
||||
backup_restore.backup_std_dir_size(backup_restore.patching_permdir) +
|
||||
backup_restore.backup_std_dir_size(
|
||||
backup_restore.patching_repo_permdir) +
|
||||
backup_restore.backup_std_dir_size(backup_restore.extension_permdir) +
|
||||
backup_restore.backup_std_dir_size(
|
||||
backup_restore.patch_vault_permdir) +
|
||||
backup_restore.backup_cinder_size(backup_restore.cinder_permdir))
|
||||
|
||||
archive_dir_free_space = \
|
||||
utils.filesystem_get_free_space(archive_dir)
|
||||
|
||||
if clone_size > archive_dir_free_space:
|
||||
print ("\nArchive directory (%s) does not have enough free "
|
||||
"space (%s), estimated size to create image is %s." %
|
||||
(archive_dir,
|
||||
utils.print_bytes(archive_dir_free_space),
|
||||
utils.print_bytes(clone_size)))
|
||||
raise CloneFail("Not enough free space.\n")
|
||||
|
||||
|
||||
def update_bootloader_default(bl_file, host):
|
||||
""" Update bootloader files for cloned image """
|
||||
if not os.path.exists(bl_file):
|
||||
LOG.error("{} does not exist".format(bl_file))
|
||||
raise CloneFail("{} does not exist".format(os.path.basename(bl_file)))
|
||||
|
||||
# Tags should be in sync with common-bsp/files/centos.syslinux.cfg
|
||||
# and common-bsp/files/grub.cfg
|
||||
STANDARD_STANDARD = '0'
|
||||
STANDARD_EXTENDED = 'S0'
|
||||
AIO_STANDARD = '2'
|
||||
AIO_EXTENDED = 'S2'
|
||||
AIO_LL_STANDARD = '4'
|
||||
AIO_LL_EXTENDED = 'S4'
|
||||
if "grub.cfg" in bl_file:
|
||||
STANDARD_STANDARD = 'standard>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
||||
STANDARD_EXTENDED = 'standard>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
||||
AIO_STANDARD = 'aio>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
||||
AIO_EXTENDED = 'aio>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
||||
AIO_LL_STANDARD = 'aio-lowlat>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
||||
AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \
|
||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
||||
SUBMENUITEM_TBOOT = 'tboot'
|
||||
SUBMENUITEM_SECUREBOOT = 'secureboot'
|
||||
|
||||
timeout_line = None
|
||||
default_line = None
|
||||
default_label_num = STANDARD_STANDARD
|
||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
||||
default_label_num = AIO_LL_STANDARD
|
||||
else:
|
||||
default_label_num = AIO_STANDARD
|
||||
if (tsconfig.security_profile ==
|
||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED):
|
||||
default_label_num = STANDARD_EXTENDED
|
||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
||||
default_label_num = AIO_LL_EXTENDED
|
||||
else:
|
||||
default_label_num = AIO_EXTENDED
|
||||
if "grub.cfg" in bl_file:
|
||||
if host.tboot is not None:
|
||||
if host.tboot == "true":
|
||||
default_label_num = default_label_num + '>' + \
|
||||
SUBMENUITEM_TBOOT
|
||||
else:
|
||||
default_label_num = default_label_num + '>' + \
|
||||
SUBMENUITEM_SECUREBOOT
|
||||
|
||||
try:
|
||||
with open(bl_file) as f:
|
||||
s = f.read()
|
||||
for line in s.split("\n"):
|
||||
if line.startswith("timeout"):
|
||||
timeout_line = line
|
||||
elif line.startswith("default"):
|
||||
default_line = line
|
||||
|
||||
if "grub.cfg" in bl_file:
|
||||
replace = "default='{}'\ntimeout=10".format(default_label_num)
|
||||
else: # isolinux format
|
||||
replace = "default {}\ntimeout 10".format(default_label_num)
|
||||
|
||||
if default_line and timeout_line:
|
||||
s = s.replace(default_line, "")
|
||||
s = s.replace(timeout_line, replace)
|
||||
elif default_line:
|
||||
s = s.replace(default_line, replace)
|
||||
elif timeout_line:
|
||||
s = s.replace(timeout_line, replace)
|
||||
else:
|
||||
s = replace + s
|
||||
|
||||
s = re.sub(r'boot_device=[^\s]*',
|
||||
'boot_device=%s' % host.boot_device,
|
||||
s)
|
||||
s = re.sub(r'rootfs_device=[^\s]*',
|
||||
'rootfs_device=%s' % host.rootfs_device,
|
||||
s)
|
||||
s = re.sub(r'console=[^\s]*',
|
||||
'console=%s' % host.console,
|
||||
s)
|
||||
|
||||
with open(bl_file, "w") as f:
|
||||
LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]"
|
||||
.format(bl_file, default_label_num, timeout_line,
|
||||
default_line, replace.replace('\n', '<newline>')))
|
||||
f.write(s)
|
||||
|
||||
except Exception as e:
|
||||
LOG.error("update_bootloader_default failed: {}".format(e))
|
||||
raise CloneFail("Failed to update bootloader files")
|
||||
|
||||
|
||||
def get_online_cpus():
|
||||
""" Get max cpu id """
|
||||
with open('/sys/devices/system/cpu/online') as f:
|
||||
s = f.read()
|
||||
max_cpu_id = s.split('-')[-1].strip()
|
||||
LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip()))
|
||||
return max_cpu_id
|
||||
return ""
|
||||
|
||||
|
||||
def get_total_mem():
|
||||
""" Get total memory size """
|
||||
with open('/proc/meminfo') as f:
|
||||
s = f.read()
|
||||
for line in s.split("\n"):
|
||||
if line.startswith("MemTotal:"):
|
||||
mem_total = line.split()[1]
|
||||
LOG.info("MemTotal:[{}]".format(mem_total))
|
||||
return mem_total
|
||||
return ""
|
||||
|
||||
|
||||
def get_disk_size(disk):
|
||||
""" Get the disk size """
|
||||
disk_size = ""
|
||||
try:
|
||||
disk_size = subprocess.check_output(
|
||||
['lsblk', '--nodeps', '--output', 'SIZE',
|
||||
'--noheadings', '--bytes', disk])
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.error("Failed to get disk size [{}]".format(disk))
|
||||
raise CloneFail("Failed to get disk size")
|
||||
return disk_size.strip()
|
||||
|
||||
|
||||
def create_ini_file(clone_archive_dir, iso_name):
|
||||
"""Create clone ini file."""
|
||||
interfaces = ""
|
||||
my_hostname = utils.get_controller_hostname()
|
||||
macs = sysinv_api.get_mac_addresses(my_hostname)
|
||||
for intf in macs.keys():
|
||||
interfaces += intf + " "
|
||||
|
||||
disk_paths = ""
|
||||
for _, _, files in os.walk('/dev/disk/by-path'):
|
||||
for f in files:
|
||||
if f.startswith("pci-") and "part" not in f and "usb" not in f:
|
||||
disk_size = get_disk_size('/dev/disk/by-path/' + f)
|
||||
disk_paths += f + "#" + disk_size + " "
|
||||
break # no need to go into sub-dirs.
|
||||
|
||||
LOG.info("create ini: {} {}".format(macs, files))
|
||||
with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f:
|
||||
f.write('[clone_iso]\n')
|
||||
f.write('name=' + iso_name + '\n')
|
||||
f.write('host=' + my_hostname + '\n')
|
||||
f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
+ '\n')
|
||||
f.write('interfaces=' + interfaces + '\n')
|
||||
f.write('disks=' + disk_paths + '\n')
|
||||
f.write('cpus=' + get_online_cpus() + '\n')
|
||||
f.write('mem=' + get_total_mem() + '\n')
|
||||
LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths))
|
||||
|
||||
|
||||
def create_iso(iso_name, archive_dir):
|
||||
""" Create iso image. This is modelled after
|
||||
the cgcs-root/build-tools/build-iso tool. """
|
||||
try:
|
||||
controller_0 = sysinv_api.get_host_data('controller-0')
|
||||
except Exception as e:
|
||||
e_log = "Failed to retrieve controller-0 inventory details."
|
||||
LOG.exception(e_log)
|
||||
raise CloneFail(e_log)
|
||||
|
||||
iso_dir = os.path.join(archive_dir, 'isolinux')
|
||||
clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)
|
||||
output = None
|
||||
tmpdir = None
|
||||
total_steps = 6
|
||||
step = 1
|
||||
print ("\nCreating ISO:")
|
||||
|
||||
# Add the correct kick-start file to the image
|
||||
ks_file = "controller_ks.cfg"
|
||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
||||
ks_file = "smallsystem_lowlatency_ks.cfg"
|
||||
else:
|
||||
ks_file = "smallsystem_ks.cfg"
|
||||
|
||||
try:
|
||||
# prepare the iso files
|
||||
images_dir = os.path.join(iso_dir, 'images')
|
||||
os.mkdir(images_dir, 0644)
|
||||
pxe_dir = os.path.join('/pxeboot',
|
||||
'rel-' + tsconfig.SW_VERSION)
|
||||
os.symlink(pxe_dir + '/installer-bzImage',
|
||||
iso_dir + '/vmlinuz')
|
||||
os.symlink(pxe_dir + '/installer-initrd',
|
||||
iso_dir + '/initrd.img')
|
||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
||||
step += 1
|
||||
|
||||
feed_dir = os.path.join('/www', 'pages', 'feed',
|
||||
'rel-' + tsconfig.SW_VERSION)
|
||||
os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')
|
||||
os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')
|
||||
os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')
|
||||
shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)
|
||||
update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)
|
||||
shutil.copyfile('/usr/share/syslinux/isolinux.bin',
|
||||
iso_dir + '/isolinux.bin')
|
||||
os.symlink('/usr/share/syslinux/vesamenu.c32',
|
||||
iso_dir + '/vesamenu.c32')
|
||||
for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):
|
||||
shutil.copy(os.path.join(feed_dir, filename), iso_dir)
|
||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
||||
step += 1
|
||||
|
||||
efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')
|
||||
os.makedirs(efiboot_dir, 0644)
|
||||
l_efi_dir = os.path.join('/boot', 'efi', 'EFI')
|
||||
shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)
|
||||
shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)
|
||||
shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)
|
||||
shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)
|
||||
update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)
|
||||
shutil.copytree(l_efi_dir + '/centos/fonts',
|
||||
efiboot_dir + '/fonts')
|
||||
# copy EFI boot image and update the grub.cfg file
|
||||
efi_img = images_dir + '/efiboot.img'
|
||||
shutil.copy2(pxe_dir + '/efiboot.img', efi_img)
|
||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
||||
output = subprocess.check_output(
|
||||
["mount", "-t", "vfat", "-o", "loop",
|
||||
efi_img, tmpdir],
|
||||
stderr=subprocess.STDOUT)
|
||||
# replace the grub.cfg file with the updated file
|
||||
efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')
|
||||
os.remove(efi_grub_f)
|
||||
shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)
|
||||
subprocess.call(['umount', tmpdir])
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
tmpdir = None
|
||||
|
||||
epoch_time = "%.9f" % time.time()
|
||||
disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"]
|
||||
with open(iso_dir + '/.discinfo', 'w') as f:
|
||||
f.write('\n'.join(disc_info))
|
||||
|
||||
# copy the latest install_clone executable
|
||||
shutil.copy2('/usr/bin/install_clone', iso_dir)
|
||||
subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " +
|
||||
iso_dir + "/" + ks_file, shell=True)
|
||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
||||
step += 1
|
||||
|
||||
# copy patches
|
||||
iso_patches_dir = os.path.join(iso_dir, 'patches')
|
||||
iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')
|
||||
iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')
|
||||
iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')
|
||||
iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')
|
||||
iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,
|
||||
'committed')
|
||||
|
||||
os.mkdir(iso_patches_dir, 0755)
|
||||
os.mkdir(iso_patch_repo_dir, 0755)
|
||||
os.mkdir(iso_patch_pkgs_dir, 0755)
|
||||
os.mkdir(iso_patch_metadata_dir, 0755)
|
||||
os.mkdir(iso_patch_applied_dir, 0755)
|
||||
os.mkdir(iso_patch_committed_dir, 0755)
|
||||
|
||||
repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION
|
||||
pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION
|
||||
patch_applied_dir = '/opt/patching/metadata/applied/'
|
||||
patch_committed_dir = '/opt/patching/metadata/committed/'
|
||||
subprocess.check_call(['rsync', '-a', repodata,
|
||||
'%s/' % iso_patch_repo_dir])
|
||||
if os.path.exists(pkgsdir):
|
||||
subprocess.check_call(['rsync', '-a', pkgsdir,
|
||||
'%s/' % iso_patch_pkgs_dir])
|
||||
if os.path.exists(patch_applied_dir):
|
||||
subprocess.check_call(['rsync', '-a', patch_applied_dir,
|
||||
'%s/' % iso_patch_applied_dir])
|
||||
if os.path.exists(patch_committed_dir):
|
||||
subprocess.check_call(['rsync', '-a', patch_committed_dir,
|
||||
'%s/' % iso_patch_committed_dir])
|
||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
||||
step += 1
|
||||
|
||||
create_ini_file(clone_archive_dir, iso_name)
|
||||
|
||||
os.chmod(iso_dir + '/isolinux.bin', 0664)
|
||||
iso_file = os.path.join(archive_dir, iso_name + ".iso")
|
||||
output = subprocess.check_output(
|
||||
["nice", "mkisofs",
|
||||
"-o", iso_file, "-R", "-D",
|
||||
"-A", "oe_iso_boot", "-V", "oe_iso_boot",
|
||||
"-f", "-quiet",
|
||||
"-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot",
|
||||
"-boot-load-size", "4", "-boot-info-table",
|
||||
"-eltorito-alt-boot", "-e", "images/efiboot.img",
|
||||
"-no-emul-boot",
|
||||
iso_dir],
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.info("{} created: [{}]".format(iso_file, output))
|
||||
utils.progress(total_steps, step, 'iso created', 'DONE')
|
||||
step += 1
|
||||
|
||||
output = subprocess.check_output(
|
||||
["nice", "isohybrid",
|
||||
"--uefi",
|
||||
iso_file],
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.debug("isohybrid: {}".format(output))
|
||||
|
||||
output = subprocess.check_output(
|
||||
["nice", "implantisomd5",
|
||||
iso_file],
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.debug("implantisomd5: {}".format(output))
|
||||
utils.progress(total_steps, step, 'checksum implanted', 'DONE')
|
||||
print("Cloned iso image created: {}".format(iso_file))
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
e_log = "ISO creation ({}) failed".format(iso_name)
|
||||
if output:
|
||||
e_log += ' [' + output + ']'
|
||||
LOG.error(e_log)
|
||||
raise CloneFail("ISO creation failed.")
|
||||
|
||||
finally:
|
||||
if tmpdir:
|
||||
subprocess.call(['umount', tmpdir], stderr=DEVNULL)
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
|
||||
def find_and_replace_in_file(target, find, replace):
|
||||
""" Find and replace a string in a file. """
|
||||
found = None
|
||||
try:
|
||||
for line in fileinput.FileInput(target, inplace=1):
|
||||
if find in line:
|
||||
# look for "find" string within word boundaries
|
||||
fpat = r'\b' + find + r'\b'
|
||||
line = re.sub(fpat, replace, line)
|
||||
found = True
|
||||
print line,
|
||||
|
||||
except Exception as e:
|
||||
LOG.error("Failed to replace [{}] with [{}] in [{}]: {}"
|
||||
.format(find, replace, target, str(e)))
|
||||
found = None
|
||||
finally:
|
||||
fileinput.close()
|
||||
return found
|
||||
|
||||
|
||||
def find_and_replace(target_list, find, replace):
|
||||
""" Find and replace a string in all files in a directory. """
|
||||
found = False
|
||||
file_list = []
|
||||
for target in target_list:
|
||||
if os.path.isfile(target):
|
||||
if find_and_replace_in_file(target, find, replace):
|
||||
found = True
|
||||
file_list.append(target)
|
||||
elif os.path.isdir(target):
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
['grep', '-rl', find, target])
|
||||
if output:
|
||||
for line in output.split('\n'):
|
||||
if line and find_and_replace_in_file(
|
||||
line, find, replace):
|
||||
found = True
|
||||
file_list.append(line)
|
||||
except Exception:
|
||||
pass # nothing found in that directory
|
||||
if not found:
|
||||
LOG.error("[{}] not found in backup".format(find))
|
||||
else:
|
||||
LOG.info("Replaced [{}] with [{}] in {}".format(
|
||||
find, replace, file_list))
|
||||
|
||||
|
||||
def remove_from_archive(archive, unwanted):
|
||||
""" Remove a file from the archive. """
|
||||
try:
|
||||
subprocess.check_call(["tar", "--delete",
|
||||
"--file=" + archive,
|
||||
unwanted])
|
||||
except subprocess.CalledProcessError, e:
|
||||
LOG.error("Delete of {} failed: {}".format(unwanted, e.output))
|
||||
raise CloneFail("Failed to modify backup archive")
|
||||
|
||||
|
||||
def update_oamip_in_archive(tmpdir):
|
||||
""" Update OAM IP in system archive file. """
|
||||
oam_list = sysinv_api.get_oam_ip()
|
||||
if not oam_list:
|
||||
raise CloneFail("Failed to get OAM IP")
|
||||
for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip,
|
||||
oam_list.oam_subnet, oam_list.oam_floating_ip,
|
||||
oam_list.oam_c0_ip, oam_list.oam_c1_ip]:
|
||||
if not oamfind:
|
||||
continue
|
||||
ip = netaddr.IPNetwork(oamfind)
|
||||
find_str = ""
|
||||
if ip.version == 4:
|
||||
# if ipv4, use 192.0.x.x as the temporary oam ip
|
||||
find_str = str(ip.ip)
|
||||
ipstr_list = find_str.split('.')
|
||||
ipstr_list[0] = '192'
|
||||
ipstr_list[1] = '0'
|
||||
repl_ipstr = ".".join(ipstr_list)
|
||||
else:
|
||||
# if ipv6, use 2001:db8:x as the temporary oam ip
|
||||
find_str = str(ip.ip)
|
||||
ipstr_list = find_str.split(':')
|
||||
ipstr_list[0] = '2001'
|
||||
ipstr_list[1] = 'db8'
|
||||
repl_ipstr = ":".join(ipstr_list)
|
||||
if repl_ipstr:
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'etc/hosts'),
|
||||
os.path.join(tmpdir, 'etc/sysconfig/network-scripts'),
|
||||
os.path.join(tmpdir, 'etc/nfv/vim/config.ini'),
|
||||
os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'),
|
||||
os.path.join(tmpdir, 'etc/heat/heat.conf'),
|
||||
os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'),
|
||||
os.path.join(tmpdir, 'etc/murano/murano.conf'),
|
||||
os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'),
|
||||
os.path.join(tmpdir, 'etc/nova/nova.conf'),
|
||||
os.path.join(tmpdir, 'config/hosts'),
|
||||
os.path.join(tmpdir, 'hieradata'),
|
||||
os.path.join(tmpdir, 'postgres/keystone.sql.data'),
|
||||
os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
find_str, repl_ipstr)
|
||||
else:
|
||||
LOG.error("Failed to modify OAM IP:[{}]"
|
||||
.format(oamfind))
|
||||
raise CloneFail("Failed to modify OAM IP")
|
||||
|
||||
|
||||
def update_mac_in_archive(tmpdir):
|
||||
""" Update MAC addresses in system archive file. """
|
||||
hostname = utils.get_controller_hostname()
|
||||
macs = sysinv_api.get_mac_addresses(hostname)
|
||||
for intf, mac in macs.iteritems():
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
|
||||
|
||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
||||
hostname = utils.get_mate_controller_hostname()
|
||||
macs = sysinv_api.get_mac_addresses(hostname)
|
||||
for intf, mac in macs.iteritems():
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
|
||||
|
||||
|
||||
def update_disk_serial_id_in_archive(tmpdir):
|
||||
""" Update disk serial id in system archive file. """
|
||||
hostname = utils.get_controller_hostname()
|
||||
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
|
||||
for d_dnode, d_sid in disk_sids.iteritems():
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
|
||||
|
||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
||||
hostname = utils.get_mate_controller_hostname()
|
||||
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
|
||||
for d_dnode, d_sid in disk_sids.iteritems():
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
|
||||
|
||||
|
||||
def update_sysuuid_in_archive(tmpdir):
|
||||
""" Update system uuid in system archive file. """
|
||||
sysuuid = sysinv_api.get_system_uuid()
|
||||
find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
sysuuid, "CLONEISO_SYSTEM_UUID")
|
||||
|
||||
|
||||
def update_backup_archive(backup_name, archive_dir):
|
||||
""" Update backup archive file to be included in clone-iso """
|
||||
path_to_archive = os.path.join(archive_dir, backup_name)
|
||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
||||
try:
|
||||
subprocess.check_call(
|
||||
['gunzip', path_to_archive + '.tgz'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
# 70-persistent-net.rules with the correct MACs will be
|
||||
# generated on the linux boot on the cloned side. Remove
|
||||
# the stale file from original side.
|
||||
remove_from_archive(path_to_archive + '.tar',
|
||||
'etc/udev/rules.d/70-persistent-net.rules')
|
||||
# Extract only a subset of directories which have files to be
|
||||
# updated for oam-ip and MAC addresses. After updating the files
|
||||
# these directories are added back to the archive.
|
||||
subprocess.check_call(
|
||||
['tar', '-x',
|
||||
'--directory=' + tmpdir,
|
||||
'-f', path_to_archive + '.tar',
|
||||
'etc', 'postgres', 'config',
|
||||
'hieradata'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
update_oamip_in_archive(tmpdir)
|
||||
update_mac_in_archive(tmpdir)
|
||||
update_disk_serial_id_in_archive(tmpdir)
|
||||
update_sysuuid_in_archive(tmpdir)
|
||||
subprocess.check_call(
|
||||
['tar', '--update',
|
||||
'--directory=' + tmpdir,
|
||||
'-f', path_to_archive + '.tar',
|
||||
'etc', 'postgres', 'config',
|
||||
'hieradata'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
subprocess.check_call(['gzip', path_to_archive + '.tar'])
|
||||
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
|
||||
|
||||
except Exception as e:
|
||||
LOG.error("Update of backup archive {} failed {}".format(
|
||||
path_to_archive, str(e)))
|
||||
raise CloneFail("Failed to update backup archive")
|
||||
|
||||
finally:
|
||||
if not DEBUG:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
|
||||
def validate_controller_state():
|
||||
""" Cloning allowed now? """
|
||||
# Check if this Controller is enabled and provisioned
|
||||
try:
|
||||
if not sysinv_api.controller_enabled_provisioned(
|
||||
utils.get_controller_hostname()):
|
||||
raise CloneFail("Controller is not enabled/provisioned")
|
||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
||||
if not sysinv_api.controller_enabled_provisioned(
|
||||
utils.get_mate_controller_hostname()):
|
||||
raise CloneFail("Mate controller is not enabled/provisioned")
|
||||
except CloneFail:
|
||||
raise
|
||||
except Exception:
|
||||
raise CloneFail("Controller is not enabled/provisioned")
|
||||
|
||||
if utils.get_system_type() != si_const.TIS_AIO_BUILD:
|
||||
raise CloneFail("Cloning supported only on All-in-one systems")
|
||||
|
||||
if len(sysinv_api.get_alarms()) > 0:
|
||||
raise CloneFail("There are active alarms on this system!")
|
||||
|
||||
|
||||
def clone(backup_name, archive_dir):
|
||||
""" Do Cloning """
|
||||
validate_controller_state()
|
||||
LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir))
|
||||
check_size(archive_dir)
|
||||
|
||||
isolinux_dir = os.path.join(archive_dir, 'isolinux')
|
||||
clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR)
|
||||
if os.path.exists(isolinux_dir):
|
||||
LOG.info("deleting old iso_dir %s" % isolinux_dir)
|
||||
shutil.rmtree(isolinux_dir, ignore_errors=True)
|
||||
os.makedirs(clone_archive_dir, 0644)
|
||||
|
||||
try:
|
||||
backup_restore.backup(backup_name, clone_archive_dir, clone=True)
|
||||
LOG.info("system backup done")
|
||||
update_backup_archive(backup_name + '_system', clone_archive_dir)
|
||||
create_iso(backup_name, archive_dir)
|
||||
except BackupFail as e:
|
||||
raise CloneFail(e.message)
|
||||
except CloneFail as e:
|
||||
raise
|
||||
finally:
|
||||
if not DEBUG:
|
||||
shutil.rmtree(isolinux_dir, ignore_errors=True)
|
||||
@@ -0,0 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2015 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -0,0 +1,93 @@
|
||||
#
|
||||
# Copyright (c) 2016-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
from tsconfig import tsconfig
|
||||
|
||||
|
||||
CONFIG_WORKDIR = '/tmp/config'
|
||||
CGCS_CONFIG_FILE = CONFIG_WORKDIR + '/cgcs_config'
|
||||
CONFIG_PERMDIR = tsconfig.CONFIG_PATH
|
||||
|
||||
HIERADATA_WORKDIR = '/tmp/hieradata'
|
||||
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'
|
||||
|
||||
KEYRING_WORKDIR = '/tmp/python_keyring'
|
||||
KEYRING_PERMDIR = tsconfig.KEYRING_PATH
|
||||
|
||||
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
|
||||
CONFIG_FAIL_FILE = '/var/run/.config_fail'
|
||||
COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem"
|
||||
FIREWALL_RULES_FILE = '/etc/platform/iptables.rules'
|
||||
OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf'
|
||||
INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed'
|
||||
|
||||
BACKUPS_PATH = '/opt/backups'
|
||||
|
||||
INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log"
|
||||
TC_SETUP_SCRIPT = '/usr/local/bin/cgcs_tc_setup.sh'
|
||||
|
||||
LINK_MTU_DEFAULT = "1500"
|
||||
|
||||
CINDER_LVM_THIN = "thin"
|
||||
CINDER_LVM_THICK = "thick"
|
||||
|
||||
DEFAULT_IMAGE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_IMAGE_STOR_SIZE
|
||||
DEFAULT_DATABASE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_DATABASE_STOR_SIZE
|
||||
DEFAULT_IMG_CONVERSION_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_IMG_CONVERSION_STOR_SIZE
|
||||
DEFAULT_SMALL_IMAGE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_SMALL_IMAGE_STOR_SIZE
|
||||
DEFAULT_SMALL_DATABASE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
|
||||
DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE
|
||||
DEFAULT_SMALL_BACKUP_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
|
||||
DEFAULT_VIRTUAL_IMAGE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_VIRTUAL_IMAGE_STOR_SIZE
|
||||
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE
|
||||
DEFAULT_VIRTUAL_IMG_CONVERSION_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_VIRTUAL_IMG_CONVERSION_STOR_SIZE
|
||||
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE
|
||||
DEFAULT_EXTENSION_STOR_SIZE = \
|
||||
sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE
|
||||
|
||||
VALID_LINK_SPEED_MGMT = [sysinv_constants.LINK_SPEED_1G,
|
||||
sysinv_constants.LINK_SPEED_10G,
|
||||
sysinv_constants.LINK_SPEED_25G]
|
||||
VALID_LINK_SPEED_INFRA = [sysinv_constants.LINK_SPEED_1G,
|
||||
sysinv_constants.LINK_SPEED_10G,
|
||||
sysinv_constants.LINK_SPEED_25G]
|
||||
|
||||
SYSTEM_CONFIG_TIMEOUT = 300
|
||||
SERVICE_ENABLE_TIMEOUT = 180
|
||||
MINIMUM_ROOT_DISK_SIZE = 500
|
||||
MAXIMUM_CGCS_LV_SIZE = 500
|
||||
LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30
|
||||
WRSROOT_MAX_PASSWORD_AGE = 45 # 45 days
|
||||
|
||||
LAG_MODE_ACTIVE_BACKUP = "active-backup"
|
||||
LAG_MODE_BALANCE_XOR = "balance-xor"
|
||||
LAG_MODE_8023AD = "802.3ad"
|
||||
|
||||
LAG_TXHASH_LAYER2 = "layer2"
|
||||
|
||||
LAG_MIIMON_FREQUENCY = 100
|
||||
|
||||
LOOPBACK_IFNAME = 'lo'
|
||||
|
||||
DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28'
|
||||
DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124'
|
||||
|
||||
DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '127.168.204.0/24'
|
||||
|
||||
DEFAULT_REGION_NAME = "RegionOne"
|
||||
DEFAULT_SERVICE_PROJECT_NAME = "services"
|
||||
44
controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py
Executable file
44
controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py
Executable file
@@ -0,0 +1,44 @@
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
DC Manager Interactions
|
||||
"""
|
||||
|
||||
import log
|
||||
|
||||
from Crypto.Hash import MD5
|
||||
from configutilities.common import crypt
|
||||
|
||||
import json
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
class UserList(object):
|
||||
"""
|
||||
User List
|
||||
"""
|
||||
def __init__(self, user_data, hash_string):
|
||||
# Decrypt the data using input hash_string to generate
|
||||
# the key
|
||||
h = MD5.new()
|
||||
h.update(hash_string)
|
||||
encryption_key = h.hexdigest()
|
||||
user_data_decrypted = crypt.urlsafe_decrypt(encryption_key,
|
||||
user_data)
|
||||
|
||||
self._data = json.loads(user_data_decrypted)
|
||||
|
||||
def get_password(self, name):
|
||||
"""
|
||||
Search the users for the password
|
||||
"""
|
||||
for user in self._data:
|
||||
if user['name'] == name:
|
||||
return user['password']
|
||||
return None
|
||||
@@ -0,0 +1,51 @@
|
||||
#
|
||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Configuration Errors
|
||||
"""
|
||||
|
||||
from configutilities import ConfigError
|
||||
|
||||
|
||||
class BackupFail(ConfigError):
|
||||
"""Backup error."""
|
||||
pass
|
||||
|
||||
|
||||
class UpgradeFail(ConfigError):
|
||||
"""Upgrade error."""
|
||||
pass
|
||||
|
||||
|
||||
class BackupWarn(ConfigError):
|
||||
"""Backup warning."""
|
||||
pass
|
||||
|
||||
|
||||
class RestoreFail(ConfigError):
|
||||
"""Backup error."""
|
||||
pass
|
||||
|
||||
|
||||
class KeystoneFail(ConfigError):
|
||||
"""Keystone error."""
|
||||
pass
|
||||
|
||||
|
||||
class SysInvFail(ConfigError):
|
||||
"""System Inventory error."""
|
||||
pass
|
||||
|
||||
|
||||
class UserQuit(ConfigError):
|
||||
"""User initiated quit operation."""
|
||||
pass
|
||||
|
||||
|
||||
class CloneFail(ConfigError):
|
||||
"""Clone error."""
|
||||
pass
|
||||
246
controllerconfig/controllerconfig/controllerconfig/common/keystone.py
Executable file
246
controllerconfig/controllerconfig/controllerconfig/common/keystone.py
Executable file
@@ -0,0 +1,246 @@
|
||||
#
|
||||
# Copyright (c) 2014-2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
OpenStack Keystone Interactions
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import iso8601
|
||||
|
||||
from exceptions import KeystoneFail
|
||||
import log
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
class Token(object):
|
||||
def __init__(self, token_data, token_id):
|
||||
self._expired = False
|
||||
self._data = token_data
|
||||
self._token_id = token_id
|
||||
|
||||
def set_expired(self):
|
||||
""" Indicate token is expired """
|
||||
self._expired = True
|
||||
|
||||
def is_expired(self, within_seconds=300):
|
||||
""" Check if token is expired """
|
||||
if not self._expired:
|
||||
end = iso8601.parse_date(self._data['token']['expires_at'])
|
||||
now = iso8601.parse_date(datetime.datetime.utcnow().isoformat())
|
||||
delta = abs(end - now).seconds
|
||||
return delta <= within_seconds
|
||||
return True
|
||||
|
||||
def get_id(self):
|
||||
""" Get the identifier of the token """
|
||||
return self._token_id
|
||||
|
||||
def get_service_admin_url(self, service_type, service_name, region_name):
|
||||
""" Search the catalog of a service for the administrative url """
|
||||
return self.get_service_url(region_name, service_name,
|
||||
service_type, 'admin')
|
||||
|
||||
def get_service_url(self, region_name, service_name, service_type,
|
||||
endpoint_type):
|
||||
"""
|
||||
Search the catalog of a service in a region for the url
|
||||
"""
|
||||
for catalog in self._data['token']['catalog']:
|
||||
if catalog['type'] == service_type:
|
||||
if catalog['name'] == service_name:
|
||||
if 0 != len(catalog['endpoints']):
|
||||
for endpoint in catalog['endpoints']:
|
||||
if (endpoint['region'] == region_name and
|
||||
endpoint['interface'] == endpoint_type):
|
||||
return endpoint['url']
|
||||
|
||||
raise KeystoneFail((
|
||||
"Keystone service type %s, name %s, region %s, endpoint type %s "
|
||||
"not available" %
|
||||
(service_type, service_name, region_name, endpoint_type)))
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""
|
||||
Keystone Service
|
||||
"""
|
||||
def __init__(self, service_data):
|
||||
self._data = service_data
|
||||
|
||||
def get_id(self):
|
||||
if 'id' in self._data['service']:
|
||||
return self._data['service']['id']
|
||||
return None
|
||||
|
||||
|
||||
class ServiceList(object):
|
||||
"""
|
||||
Keystone Service List
|
||||
"""
|
||||
def __init__(self, service_data):
|
||||
self._data = service_data
|
||||
|
||||
def get_service_id(self, name, type):
|
||||
"""
|
||||
Search the services for the id
|
||||
"""
|
||||
for service in self._data['services']:
|
||||
if service['name'] == name:
|
||||
if service['type'] == type:
|
||||
return service['id']
|
||||
|
||||
raise KeystoneFail((
|
||||
"Keystone service type %s, name %s not available" %
|
||||
(type, name)))
|
||||
|
||||
|
||||
class Project(object):
|
||||
"""
|
||||
Keystone Project
|
||||
"""
|
||||
def __init__(self, project_data):
|
||||
self._data = project_data
|
||||
|
||||
def get_id(self):
|
||||
if 'id' in self._data['project']:
|
||||
return self._data['project']['id']
|
||||
return None
|
||||
|
||||
|
||||
class ProjectList(object):
|
||||
"""
|
||||
Keystone Project List
|
||||
"""
|
||||
def __init__(self, project_data):
|
||||
self._data = project_data
|
||||
|
||||
def get_project_id(self, name):
|
||||
"""
|
||||
Search the projects for the id
|
||||
"""
|
||||
for project in self._data['projects']:
|
||||
if project['name'] == name:
|
||||
return project['id']
|
||||
return None
|
||||
|
||||
|
||||
class Endpoint(object):
|
||||
"""
|
||||
Keystone Endpoint
|
||||
"""
|
||||
def __init__(self, endpoint_data):
|
||||
self._data = endpoint_data
|
||||
|
||||
def get_id(self):
|
||||
if 'id' in self._data['endpoint']:
|
||||
return self._data['endpoint']['id']
|
||||
return None
|
||||
|
||||
|
||||
class EndpointList(object):
|
||||
"""
|
||||
Keystone Endpoint List
|
||||
"""
|
||||
def __init__(self, endpoint_data):
|
||||
self._data = endpoint_data
|
||||
|
||||
def get_service_url(self, region_name, service_id, endpoint_type):
|
||||
"""
|
||||
Search the endpoints for the url
|
||||
"""
|
||||
for endpoint in self._data['endpoints']:
|
||||
if endpoint['service_id'] == service_id:
|
||||
if (endpoint['region'] == region_name and
|
||||
endpoint['interface'] == endpoint_type):
|
||||
return endpoint['url']
|
||||
|
||||
raise KeystoneFail((
|
||||
"Keystone service id %s, region %s, endpoint type %s not "
|
||||
"available" % (service_id, region_name, endpoint_type)))
|
||||
|
||||
|
||||
class User(object):
|
||||
"""
|
||||
Keystone User
|
||||
"""
|
||||
def __init__(self, user_data):
|
||||
self._data = user_data
|
||||
|
||||
def get_user_id(self):
|
||||
return self._data['user']['id']
|
||||
|
||||
|
||||
class UserList(object):
|
||||
"""
|
||||
Keystone User List
|
||||
"""
|
||||
def __init__(self, user_data):
|
||||
self._data = user_data
|
||||
|
||||
def get_user_id(self, name):
|
||||
"""
|
||||
Search the users for the id
|
||||
"""
|
||||
for user in self._data['users']:
|
||||
if user['name'] == name:
|
||||
return user['id']
|
||||
return None
|
||||
|
||||
|
||||
class Role(object):
|
||||
"""
|
||||
Keystone Role
|
||||
"""
|
||||
def __init__(self, role_data):
|
||||
self._data = role_data
|
||||
|
||||
|
||||
class RoleList(object):
|
||||
"""
|
||||
Keystone Role List
|
||||
"""
|
||||
def __init__(self, role_data):
|
||||
self._data = role_data
|
||||
|
||||
def get_role_id(self, name):
|
||||
"""
|
||||
Search the roles for the id
|
||||
"""
|
||||
for role in self._data['roles']:
|
||||
if role['name'] == name:
|
||||
return role['id']
|
||||
return None
|
||||
|
||||
|
||||
class Domain(object):
|
||||
"""
|
||||
Keystone Domain
|
||||
"""
|
||||
def __init__(self, user_data):
|
||||
self._data = user_data
|
||||
|
||||
def get_domain_id(self):
|
||||
return self._data['domain']['id']
|
||||
|
||||
|
||||
class DomainList(object):
|
||||
"""
|
||||
Keystone Domain List
|
||||
"""
|
||||
def __init__(self, user_data):
|
||||
self._data = user_data
|
||||
|
||||
def get_domain_id(self, name):
|
||||
"""
|
||||
Search the domains for the id
|
||||
"""
|
||||
for domain in self._data['domains']:
|
||||
if domain['name'] == name:
|
||||
return domain['id']
|
||||
return None
|
||||
@@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Logging
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
_loggers = {}
|
||||
|
||||
|
||||
def get_logger(name):
|
||||
""" Get a logger or create one """
|
||||
|
||||
if name not in _loggers:
|
||||
_loggers[name] = logging.getLogger(name)
|
||||
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def setup_logger(logger):
|
||||
""" Setup a logger """
|
||||
|
||||
# Send logs to /var/log/platform.log
|
||||
syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1
|
||||
|
||||
formatter = logging.Formatter("configassistant[%(process)d] " +
|
||||
"%(pathname)s:%(lineno)s " +
|
||||
"%(levelname)8s [%(name)s] %(message)s")
|
||||
|
||||
handler = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=syslog_facility)
|
||||
handler.setLevel(logging.INFO)
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
|
||||
def configure():
|
||||
""" Setup logging """
|
||||
|
||||
for logger in _loggers:
|
||||
setup_logger(_loggers[logger])
|
||||
336
controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py
Executable file
336
controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py
Executable file
@@ -0,0 +1,336 @@
|
||||
"""
|
||||
Copyright (c) 2015-2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
import httplib
|
||||
import json
|
||||
import urllib2
|
||||
|
||||
from exceptions import KeystoneFail
|
||||
import dcmanager
|
||||
import keystone
|
||||
import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def rest_api_request(token, method, api_cmd, api_cmd_headers=None,
|
||||
api_cmd_payload=None):
|
||||
"""
|
||||
Make a rest-api request
|
||||
"""
|
||||
try:
|
||||
request_info = urllib2.Request(api_cmd)
|
||||
request_info.get_method = lambda: method
|
||||
request_info.add_header("X-Auth-Token", token.get_id())
|
||||
request_info.add_header("Accept", "application/json")
|
||||
|
||||
if api_cmd_headers is not None:
|
||||
for header_type, header_value in api_cmd_headers.items():
|
||||
request_info.add_header(header_type, header_value)
|
||||
|
||||
if api_cmd_payload is not None:
|
||||
request_info.add_header("Content-type", "application/json")
|
||||
request_info.add_data(api_cmd_payload)
|
||||
|
||||
request = urllib2.urlopen(request_info)
|
||||
response = request.read()
|
||||
|
||||
if response == "":
|
||||
response = json.loads("{}")
|
||||
else:
|
||||
response = json.loads(response)
|
||||
request.close()
|
||||
|
||||
return response
|
||||
|
||||
except urllib2.HTTPError as e:
|
||||
if httplib.UNAUTHORIZED == e.code:
|
||||
token.set_expired()
|
||||
LOG.exception(e)
|
||||
raise KeystoneFail(
|
||||
"REST API HTTP Error for url: %s. Error: %s" %
|
||||
(api_cmd, e))
|
||||
|
||||
except (urllib2.URLError, httplib.BadStatusLine) as e:
|
||||
LOG.exception(e)
|
||||
raise KeystoneFail(
|
||||
"REST API URL Error for url: %s. Error: %s" %
|
||||
(api_cmd, e))
|
||||
|
||||
|
||||
def get_token(auth_url, auth_project, auth_user, auth_password,
|
||||
user_domain, project_domain):
|
||||
"""
|
||||
Ask OpenStack Keystone for a token
|
||||
"""
|
||||
try:
|
||||
url = auth_url + "/auth/tokens"
|
||||
request_info = urllib2.Request(url)
|
||||
request_info.add_header("Content-Type", "application/json")
|
||||
request_info.add_header("Accept", "application/json")
|
||||
|
||||
payload = json.dumps(
|
||||
{"auth": {
|
||||
"identity": {
|
||||
"methods": [
|
||||
"password"
|
||||
],
|
||||
"password": {
|
||||
"user": {
|
||||
"name": auth_user,
|
||||
"password": auth_password,
|
||||
"domain": {"name": user_domain}
|
||||
}
|
||||
}
|
||||
},
|
||||
"scope": {
|
||||
"project": {
|
||||
"name": auth_project,
|
||||
"domain": {"name": project_domain}
|
||||
}}}})
|
||||
|
||||
request_info.add_data(payload)
|
||||
|
||||
request = urllib2.urlopen(request_info)
|
||||
# Identity API v3 returns token id in X-Subject-Token
|
||||
# response header.
|
||||
token_id = request.info().getheader('X-Subject-Token')
|
||||
response = json.loads(request.read())
|
||||
request.close()
|
||||
|
||||
return keystone.Token(response, token_id)
|
||||
|
||||
except urllib2.HTTPError as e:
|
||||
LOG.error("%s, %s" % (e.code, e.read()))
|
||||
return None
|
||||
|
||||
except (urllib2.URLError, httplib.BadStatusLine) as e:
|
||||
LOG.error(e)
|
||||
return None
|
||||
|
||||
|
||||
def get_services(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of services
|
||||
"""
|
||||
api_cmd = api_url + "/services"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.ServiceList(response)
|
||||
|
||||
|
||||
def create_service(token, api_url, name, type, description):
|
||||
"""
|
||||
Ask OpenStack Keystone to create a service
|
||||
"""
|
||||
api_cmd = api_url + "/services"
|
||||
req = json.dumps({"service": {
|
||||
"name": name,
|
||||
"type": type,
|
||||
"description": description}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.Service(response)
|
||||
|
||||
|
||||
def delete_service(token, api_url, id):
|
||||
"""
|
||||
Ask OpenStack Keystone to delete a service
|
||||
"""
|
||||
api_cmd = api_url + "/services/" + id
|
||||
response = rest_api_request(token, "DELETE", api_cmd)
|
||||
return keystone.Service(response)
|
||||
|
||||
|
||||
def get_endpoints(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of endpoints
|
||||
"""
|
||||
api_cmd = api_url + "/endpoints"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.EndpointList(response)
|
||||
|
||||
|
||||
def create_endpoint(token, api_url, service_id, region_name, type, url):
|
||||
"""
|
||||
Ask OpenStack Keystone to create an endpoint
|
||||
"""
|
||||
api_cmd = api_url + "/endpoints"
|
||||
req = json.dumps({"endpoint": {
|
||||
"region": region_name,
|
||||
"service_id": service_id,
|
||||
"interface": type,
|
||||
"url": url}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.Endpoint(response)
|
||||
|
||||
|
||||
def delete_endpoint(token, api_url, id):
|
||||
"""
|
||||
Ask OpenStack Keystone to delete an endpoint
|
||||
"""
|
||||
api_cmd = api_url + "/endpoints/" + id
|
||||
response = rest_api_request(token, "DELETE", api_cmd)
|
||||
return keystone.Endpoint(response)
|
||||
|
||||
|
||||
def get_users(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of users
|
||||
"""
|
||||
api_cmd = api_url + "/users"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.UserList(response)
|
||||
|
||||
|
||||
def create_user(token, api_url, name, password, email, project_id, domain_id):
|
||||
"""
|
||||
Ask OpenStack Keystone to create a user
|
||||
"""
|
||||
api_cmd = api_url + "/users"
|
||||
req = json.dumps({"user": {
|
||||
"password": password,
|
||||
"default_project_id": project_id,
|
||||
"domain_id": domain_id,
|
||||
"name": name,
|
||||
"email": email
|
||||
}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.User(response)
|
||||
|
||||
|
||||
def create_domain_user(token, api_url, name, password, email, domain_id):
|
||||
"""
|
||||
Ask OpenStack Keystone to create a domain user
|
||||
"""
|
||||
api_cmd = api_url + "/users"
|
||||
req = json.dumps({"user": {
|
||||
"password": password,
|
||||
"domain_id": domain_id,
|
||||
"name": name,
|
||||
"email": email
|
||||
}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.User(response)
|
||||
|
||||
|
||||
def delete_user(token, api_url, id):
|
||||
"""
|
||||
Ask OpenStack Keystone to create a user
|
||||
"""
|
||||
api_cmd = api_url + "/users/" + id
|
||||
response = rest_api_request(token, "DELETE", api_cmd)
|
||||
return keystone.User(response)
|
||||
|
||||
|
||||
def add_role(token, api_url, project_id, user_id, role_id):
|
||||
"""
|
||||
Ask OpenStack Keystone to add a role
|
||||
"""
|
||||
api_cmd = "%s/projects/%s/users/%s/roles/%s" % (
|
||||
api_url, project_id, user_id, role_id)
|
||||
response = rest_api_request(token, "PUT", api_cmd)
|
||||
return keystone.Role(response)
|
||||
|
||||
|
||||
def add_role_on_domain(token, api_url, domain_id, user_id, role_id):
|
||||
"""
|
||||
Ask OpenStack Keystone to assign role to user on domain
|
||||
"""
|
||||
api_cmd = "%s/domains/%s/users/%s/roles/%s" % (
|
||||
api_url, domain_id, user_id, role_id)
|
||||
response = rest_api_request(token, "PUT", api_cmd)
|
||||
return keystone.Role(response)
|
||||
|
||||
|
||||
def get_roles(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of roles
|
||||
"""
|
||||
api_cmd = api_url + "/roles"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.RoleList(response)
|
||||
|
||||
|
||||
def get_domains(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of domains
|
||||
"""
|
||||
# Domains are only available from the keystone V3 API
|
||||
api_cmd = api_url + "/domains"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.DomainList(response)
|
||||
|
||||
|
||||
def create_domain(token, api_url, name, description):
|
||||
api_cmd = api_url + "/domains"
|
||||
req = json.dumps({"domain": {
|
||||
"enabled": True,
|
||||
"name": name,
|
||||
"description": description}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.Domain(response)
|
||||
|
||||
|
||||
def disable_domain(token, api_url, id):
|
||||
api_cmd = api_url + "/domains/" + id
|
||||
req = json.dumps({"domain": {
|
||||
"enabled": False}})
|
||||
response = rest_api_request(token, "PATCH", api_cmd, api_cmd_payload=req)
|
||||
return keystone.Domain(response)
|
||||
|
||||
|
||||
def delete_domain(token, api_url, id):
|
||||
"""
|
||||
Ask OpenStack Keystone to delete a project
|
||||
"""
|
||||
api_cmd = api_url + "/domains/" + id
|
||||
response = rest_api_request(token, "DELETE", api_cmd,)
|
||||
return keystone.Domain(response)
|
||||
|
||||
|
||||
def get_projects(token, api_url):
|
||||
"""
|
||||
Ask OpenStack Keystone for a list of projects
|
||||
"""
|
||||
api_cmd = api_url + "/projects"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
return keystone.ProjectList(response)
|
||||
|
||||
|
||||
def create_project(token, api_url, name, description, domain_id):
|
||||
"""
|
||||
Ask OpenStack Keystone to create a project
|
||||
"""
|
||||
api_cmd = api_url + "/projects"
|
||||
req = json.dumps({"project": {
|
||||
"enabled": True,
|
||||
"name": name,
|
||||
"domain_id": domain_id,
|
||||
"is_domain": False,
|
||||
"description": description}})
|
||||
response = rest_api_request(token, "POST", api_cmd, api_cmd_payload=req)
|
||||
return keystone.Project(response)
|
||||
|
||||
|
||||
def delete_project(token, api_url, id):
|
||||
"""
|
||||
Ask OpenStack Keystone to delete a project
|
||||
"""
|
||||
api_cmd = api_url + "/projects/" + id
|
||||
response = rest_api_request(token, "DELETE", api_cmd,)
|
||||
return keystone.Project(response)
|
||||
|
||||
|
||||
def get_subcloud_config(token, api_url, subcloud_name,
|
||||
hash_string):
|
||||
"""
|
||||
Ask DC Manager for our subcloud configuration
|
||||
"""
|
||||
api_cmd = api_url + "/subclouds/" + subcloud_name + "/config"
|
||||
response = rest_api_request(token, "GET", api_cmd)
|
||||
config = dict()
|
||||
config['users'] = dcmanager.UserList(response['users'], hash_string)
|
||||
|
||||
return config
|
||||
@@ -0,0 +1,159 @@
|
||||
"""
|
||||
Copyright (c) 2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
import netaddr
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import configutilities.common.exceptions as cexeptions
|
||||
import configutilities.common.utils as cutils
|
||||
|
||||
|
||||
def is_valid_management_address(ip_address, management_subnet):
|
||||
"""Determine whether a management address is valid."""
|
||||
if ip_address == management_subnet.network:
|
||||
print "Cannot use network address"
|
||||
return False
|
||||
elif ip_address == management_subnet.broadcast:
|
||||
print "Cannot use broadcast address"
|
||||
return False
|
||||
elif ip_address.is_multicast():
|
||||
print "Invalid address - multicast address not allowed"
|
||||
return False
|
||||
elif ip_address.is_loopback():
|
||||
print "Invalid address - loopback address not allowed"
|
||||
return False
|
||||
elif ip_address not in management_subnet:
|
||||
print "Address must be in the management subnet"
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def configure_management():
|
||||
interface_list = list()
|
||||
lldp_interface_list = list()
|
||||
|
||||
print "Enabling interfaces... ",
|
||||
ip_link_output = subprocess.check_output(['ip', '-o', 'link'])
|
||||
|
||||
for line in ip_link_output.splitlines():
|
||||
interface = line.split()[1].rstrip(':')
|
||||
if interface != 'lo':
|
||||
interface_list.append(interface)
|
||||
subprocess.call(['ip', 'link', 'set', interface, 'up'])
|
||||
print 'DONE'
|
||||
|
||||
wait_seconds = 120
|
||||
delay_seconds = 5
|
||||
print "Waiting %d seconds for LLDP neighbor discovery" % wait_seconds,
|
||||
while wait_seconds > 0:
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
time.sleep(delay_seconds)
|
||||
wait_seconds -= delay_seconds
|
||||
print ' DONE'
|
||||
|
||||
print "Retrieving neighbor details... ",
|
||||
lldpcli_show_output = subprocess.check_output(
|
||||
['sudo', 'lldpcli', 'show', 'neighbors', 'summary', '-f', 'json'])
|
||||
lldp_interfaces = json.loads(lldpcli_show_output)['lldp'][0]['interface']
|
||||
print "DONE"
|
||||
|
||||
print "\nAvailable interfaces:"
|
||||
print "%-20s %s" % ("local interface", "remote port")
|
||||
print "%-20s %s" % ("---------------", "-----------")
|
||||
for interface in lldp_interfaces:
|
||||
print "%-20s %s" % (interface['name'],
|
||||
interface['port'][0]['id'][0]['value'])
|
||||
lldp_interface_list.append(interface['name'])
|
||||
for interface in interface_list:
|
||||
if interface not in lldp_interface_list:
|
||||
print "%-20s %s" % (interface, 'unknown')
|
||||
|
||||
print
|
||||
while True:
|
||||
user_input = raw_input("Enter management interface name: ")
|
||||
if user_input in interface_list:
|
||||
management_interface = user_input
|
||||
break
|
||||
else:
|
||||
print "Invalid interface name"
|
||||
continue
|
||||
|
||||
while True:
|
||||
user_input = raw_input("Enter management address CIDR: ")
|
||||
try:
|
||||
management_cidr = netaddr.IPNetwork(user_input)
|
||||
management_ip = management_cidr.ip
|
||||
management_network = netaddr.IPNetwork(
|
||||
"%s/%s" % (str(management_cidr.network),
|
||||
str(management_cidr.prefixlen)))
|
||||
if not is_valid_management_address(management_ip,
|
||||
management_network):
|
||||
continue
|
||||
break
|
||||
except (netaddr.AddrFormatError, ValueError):
|
||||
print ("Invalid CIDR - "
|
||||
"please enter a valid management address CIDR")
|
||||
|
||||
while True:
|
||||
user_input = raw_input("Enter management gateway address [" +
|
||||
str(management_network[1]) + "]: ")
|
||||
if user_input == "":
|
||||
user_input = management_network[1]
|
||||
|
||||
try:
|
||||
ip_input = netaddr.IPAddress(user_input)
|
||||
if not is_valid_management_address(ip_input,
|
||||
management_network):
|
||||
continue
|
||||
management_gateway_address = ip_input
|
||||
break
|
||||
except (netaddr.AddrFormatError, ValueError):
|
||||
print ("Invalid address - "
|
||||
"please enter a valid management gateway address")
|
||||
|
||||
min_addresses = 8
|
||||
while True:
|
||||
user_input = raw_input("Enter System Controller subnet: ")
|
||||
try:
|
||||
system_controller_subnet = cutils.validate_network_str(
|
||||
user_input, min_addresses)
|
||||
break
|
||||
except cexeptions.ValidateFail as e:
|
||||
print "{}".format(e)
|
||||
|
||||
print "Disabling non-management interfaces... ",
|
||||
for interface in interface_list:
|
||||
if interface != management_interface:
|
||||
subprocess.call(['ip', 'link', 'set', interface, 'down'])
|
||||
print 'DONE'
|
||||
|
||||
print "Configuring management interface... ",
|
||||
subprocess.call(['ip', 'addr', 'add', str(management_cidr), 'dev',
|
||||
management_interface])
|
||||
print "DONE"
|
||||
|
||||
print "Adding route to System Controller... ",
|
||||
subprocess.call(['ip', 'route', 'add', str(system_controller_subnet),
|
||||
'dev', management_interface, 'via',
|
||||
str(management_gateway_address)])
|
||||
print "DONE"
|
||||
|
||||
|
||||
def main():
|
||||
if not os.geteuid() == 0:
|
||||
print "%s must be run with root privileges" % sys.argv[0]
|
||||
exit(1)
|
||||
try:
|
||||
configure_management()
|
||||
except KeyboardInterrupt:
|
||||
print "\nAborted"
|
||||
File diff suppressed because it is too large
Load Diff
284
controllerconfig/controllerconfig/controllerconfig/openstack.py
Executable file
284
controllerconfig/controllerconfig/controllerconfig/openstack.py
Executable file
@@ -0,0 +1,284 @@
|
||||
#
|
||||
# Copyright (c) 2014-2015 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
OpenStack
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from common import log
|
||||
from common.exceptions import SysInvFail
|
||||
from common.rest_api_utils import get_token
|
||||
import sysinv_api as sysinv
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
|
||||
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
|
||||
|
||||
|
||||
class OpenStack(object):
|
||||
|
||||
def __init__(self):
|
||||
self.admin_token = None
|
||||
self.conf = {}
|
||||
self._sysinv = None
|
||||
|
||||
with open(os.devnull, "w") as fnull:
|
||||
proc = subprocess.Popen(
|
||||
['bash', '-c',
|
||||
'source /etc/nova/openrc && env'],
|
||||
stdout=subprocess.PIPE, stderr=fnull)
|
||||
|
||||
for line in proc.stdout:
|
||||
key, _, value = line.partition("=")
|
||||
if key == 'OS_USERNAME':
|
||||
self.conf['admin_user'] = value.strip()
|
||||
elif key == 'OS_PASSWORD':
|
||||
self.conf['admin_pwd'] = value.strip()
|
||||
elif key == 'OS_PROJECT_NAME':
|
||||
self.conf['admin_tenant'] = value.strip()
|
||||
elif key == 'OS_AUTH_URL':
|
||||
self.conf['auth_url'] = value.strip()
|
||||
elif key == 'OS_REGION_NAME':
|
||||
self.conf['region_name'] = value.strip()
|
||||
elif key == 'OS_USER_DOMAIN_NAME':
|
||||
self.conf['user_domain'] = value.strip()
|
||||
elif key == 'OS_PROJECT_DOMAIN_NAME':
|
||||
self.conf['project_domain'] = value.strip()
|
||||
|
||||
proc.communicate()
|
||||
|
||||
def __enter__(self):
|
||||
if not self._connect():
|
||||
raise Exception('Failed to connect')
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self._disconnect()
|
||||
|
||||
def __del__(self):
|
||||
self._disconnect()
|
||||
|
||||
def _connect(self):
|
||||
""" Connect to an OpenStack instance """
|
||||
|
||||
if self.admin_token is not None:
|
||||
self._disconnect()
|
||||
|
||||
# Try to obtain an admin token from keystone
|
||||
for _ in xrange(KEYSTONE_AUTH_SERVER_RETRY_CNT):
|
||||
self.admin_token = get_token(self.conf['auth_url'],
|
||||
self.conf['admin_tenant'],
|
||||
self.conf['admin_user'],
|
||||
self.conf['admin_pwd'],
|
||||
self.conf['user_domain'],
|
||||
self.conf['project_domain'])
|
||||
if self.admin_token:
|
||||
break
|
||||
time.sleep(KEYSTONE_AUTH_SERVER_WAIT)
|
||||
|
||||
return self.admin_token is not None
|
||||
|
||||
def _disconnect(self):
|
||||
""" Disconnect from an OpenStack instance """
|
||||
self.admin_token = None
|
||||
|
||||
def lock_hosts(self, exempt_hostnames=None, progress_callback=None,
|
||||
timeout=60):
|
||||
""" Lock hosts of an OpenStack instance except for host names
|
||||
in the exempt list
|
||||
"""
|
||||
failed_hostnames = []
|
||||
|
||||
if exempt_hostnames is None:
|
||||
exempt_hostnames = []
|
||||
|
||||
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
|
||||
if not hosts:
|
||||
if progress_callback is not None:
|
||||
progress_callback(0, 0, None, None)
|
||||
return
|
||||
|
||||
wait = False
|
||||
host_i = 0
|
||||
|
||||
for host in hosts:
|
||||
if host.name in exempt_hostnames:
|
||||
continue
|
||||
|
||||
if host.is_unlocked():
|
||||
if not host.force_lock(self.admin_token,
|
||||
self.conf['region_name']):
|
||||
failed_hostnames.append(host.name)
|
||||
LOG.warning("Could not lock %s" % host.name)
|
||||
else:
|
||||
wait = True
|
||||
else:
|
||||
host_i += 1
|
||||
if progress_callback is not None:
|
||||
progress_callback(len(hosts), host_i,
|
||||
('locking %s' % host.name),
|
||||
'DONE')
|
||||
|
||||
if wait and timeout > 5:
|
||||
time.sleep(5)
|
||||
timeout -= 5
|
||||
|
||||
for _ in range(0, timeout):
|
||||
wait = False
|
||||
|
||||
for host in hosts:
|
||||
if host.name in exempt_hostnames:
|
||||
continue
|
||||
|
||||
if (host.name not in failed_hostnames) and host.is_unlocked():
|
||||
host.refresh_data(self.admin_token,
|
||||
self.conf['region_name'])
|
||||
|
||||
if host.is_locked():
|
||||
LOG.info("Locked %s" % host.name)
|
||||
host_i += 1
|
||||
if progress_callback is not None:
|
||||
progress_callback(len(hosts), host_i,
|
||||
('locking %s' % host.name),
|
||||
'DONE')
|
||||
else:
|
||||
LOG.info("Waiting for lock of %s" % host.name)
|
||||
wait = True
|
||||
|
||||
if not wait:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
else:
|
||||
failed_hostnames.append(host.name)
|
||||
LOG.warning("Wait failed for lock of %s" % host.name)
|
||||
|
||||
return failed_hostnames
|
||||
|
||||
def power_off_hosts(self, exempt_hostnames=None, progress_callback=None,
|
||||
timeout=60):
|
||||
""" Power-off hosts of an OpenStack instance except for host names
|
||||
in the exempt list
|
||||
"""
|
||||
|
||||
if exempt_hostnames is None:
|
||||
exempt_hostnames = []
|
||||
|
||||
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
|
||||
|
||||
hosts[:] = [host for host in hosts if host.support_power_off()]
|
||||
if not hosts:
|
||||
if progress_callback is not None:
|
||||
progress_callback(0, 0, None, None)
|
||||
return
|
||||
|
||||
wait = False
|
||||
host_i = 0
|
||||
|
||||
for host in hosts:
|
||||
if host.name in exempt_hostnames:
|
||||
continue
|
||||
|
||||
if host.is_powered_on():
|
||||
if not host.power_off(self.admin_token,
|
||||
self.conf['region_name']):
|
||||
raise SysInvFail("Could not power-off %s" % host.name)
|
||||
wait = True
|
||||
else:
|
||||
host_i += 1
|
||||
if progress_callback is not None:
|
||||
progress_callback(len(hosts), host_i,
|
||||
('powering off %s' % host.name),
|
||||
'DONE')
|
||||
|
||||
if wait and timeout > 5:
|
||||
time.sleep(5)
|
||||
timeout -= 5
|
||||
|
||||
for _ in range(0, timeout):
|
||||
wait = False
|
||||
|
||||
for host in hosts:
|
||||
if host.name in exempt_hostnames:
|
||||
continue
|
||||
|
||||
if host.is_powered_on():
|
||||
host.refresh_data(self.admin_token,
|
||||
self.conf['region_name'])
|
||||
|
||||
if host.is_powered_off():
|
||||
LOG.info("Powered-Off %s" % host.name)
|
||||
host_i += 1
|
||||
if progress_callback is not None:
|
||||
progress_callback(len(hosts), host_i,
|
||||
('powering off %s' % host.name),
|
||||
'DONE')
|
||||
else:
|
||||
LOG.info("Waiting for power-off of %s" % host.name)
|
||||
wait = True
|
||||
|
||||
if not wait:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
else:
|
||||
failed_hosts = [h.name for h in hosts if h.is_powered_on()]
|
||||
msg = "Wait timeout for power-off of %s" % failed_hosts
|
||||
LOG.info(msg)
|
||||
raise SysInvFail(msg)
|
||||
|
||||
def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300,
|
||||
interval_step=10):
|
||||
"""Wait for hosts to be identified as disabled.
|
||||
Run check every interval_step seconds
|
||||
"""
|
||||
if exempt_hostnames is None:
|
||||
exempt_hostnames = []
|
||||
|
||||
for _ in xrange(timeout / interval_step):
|
||||
hosts = sysinv.get_hosts(self.admin_token,
|
||||
self.conf['region_name'])
|
||||
if not hosts:
|
||||
time.sleep(interval_step)
|
||||
continue
|
||||
|
||||
for host in hosts:
|
||||
if host.name in exempt_hostnames:
|
||||
continue
|
||||
|
||||
if host.is_enabled():
|
||||
LOG.info("host %s is still enabled" % host.name)
|
||||
break
|
||||
else:
|
||||
LOG.info("all hosts disabled.")
|
||||
return True
|
||||
|
||||
time.sleep(interval_step)
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def sysinv(self):
|
||||
if self._sysinv is None:
|
||||
# TOX cannot import cgts_client and all the dependencies therefore
|
||||
# the client is being lazy loaded since TOX doesn't actually
|
||||
# require the cgtsclient module.
|
||||
from cgtsclient import client as cgts_client
|
||||
|
||||
endpoint = self.admin_token.get_service_url(
|
||||
self.conf['region_name'], "sysinv", "platform", 'admin')
|
||||
self._sysinv = cgts_client.Client(
|
||||
sysinv.API_VERSION,
|
||||
endpoint=endpoint,
|
||||
token=self.admin_token.get_id())
|
||||
|
||||
return self._sysinv
|
||||
@@ -0,0 +1,31 @@
|
||||
import sys
|
||||
|
||||
from common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
class ProgressRunner(object):
|
||||
steps = []
|
||||
|
||||
def add(self, action, message):
|
||||
self.steps.append((action, message))
|
||||
|
||||
def run(self):
|
||||
total = len(self.steps)
|
||||
for i, step in enumerate(self.steps, start=1):
|
||||
action, message = step
|
||||
LOG.info("Start step: %s" % message)
|
||||
sys.stdout.write(
|
||||
"\n%.2u/%.2u: %s ... " % (i, total, message))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
action()
|
||||
sys.stdout.write('DONE')
|
||||
sys.stdout.flush()
|
||||
except Exception:
|
||||
sys.stdout.flush()
|
||||
raise
|
||||
LOG.info("Finish step: %s" % message)
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
732
controllerconfig/controllerconfig/controllerconfig/regionconfig.py
Executable file
732
controllerconfig/controllerconfig/controllerconfig/regionconfig.py
Executable file
@@ -0,0 +1,732 @@
|
||||
"""
|
||||
Copyright (c) 2015-2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from common import constants
|
||||
from common import log
|
||||
from common import rest_api_utils as rutils
|
||||
from common.exceptions import KeystoneFail
|
||||
from configutilities.common import utils
|
||||
from configutilities.common.configobjects import REGION_CONFIG, SUBCLOUD_CONFIG
|
||||
from configutilities import ConfigFail
|
||||
from configassistant import ConfigAssistant
|
||||
from netaddr import IPAddress
|
||||
from systemconfig import parse_system_config, configure_management_interface, \
|
||||
create_cgcs_config_file
|
||||
from configutilities import DEFAULT_DOMAIN_NAME
|
||||
|
||||
# Temporary file for building cgcs_config
|
||||
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
|
||||
|
||||
# For region mode, this is the list of users that we expect to find configured
|
||||
# in the region config file as <USER>_USER_KEY and <USER>_PASSWORD.
|
||||
# For distributed cloud, this is the list of users that we expect to find
|
||||
# configured in keystone. The password for each user will be retrieved from
|
||||
# the DC Manager in the system controller and added to the region config file.
|
||||
# The format is:
|
||||
# REGION_NAME = key in region config file for this user's region
|
||||
# USER_KEY = key in region config file for this user's name
|
||||
# USER_NAME = user name in keystone
|
||||
|
||||
REGION_NAME = 0
|
||||
USER_KEY = 1
|
||||
USER_NAME = 2
|
||||
|
||||
EXPECTED_USERS = [
|
||||
('REGION_2_SERVICES', 'NOVA', 'nova'),
|
||||
('REGION_2_SERVICES', 'PLACEMENT', 'placement'),
|
||||
('REGION_2_SERVICES', 'SYSINV', 'sysinv'),
|
||||
('REGION_2_SERVICES', 'PATCHING', 'patching'),
|
||||
('REGION_2_SERVICES', 'HEAT', 'heat'),
|
||||
('REGION_2_SERVICES', 'CEILOMETER', 'ceilometer'),
|
||||
('REGION_2_SERVICES', 'NFV', 'vim'),
|
||||
('REGION_2_SERVICES', 'AODH', 'aodh'),
|
||||
('REGION_2_SERVICES', 'MTCE', 'mtce'),
|
||||
('REGION_2_SERVICES', 'PANKO', 'panko')]
|
||||
|
||||
EXPECTED_SHARED_SERVICES_NEUTRON_USER = ('SHARED_SERVICES', 'NEUTRON',
|
||||
'neutron')
|
||||
EXPECTED_REGION_2_NEUTRON_USER = ('REGION_2_SERVICES', 'NEUTRON', 'neutron')
|
||||
EXPECTED_REGION_2_GLANCE_USER = ('REGION_2_SERVICES', 'GLANCE', 'glance')
|
||||
|
||||
# This a description of the region 2 endpoints that we expect to configure or
|
||||
# find configured in keystone. The format is as follows:
|
||||
# SERVICE_NAME = key in region config file for this service's name
|
||||
# SERVICE_TYPE = key in region config file for this service's type
|
||||
# PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP
|
||||
# INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP
|
||||
# ADMIN_URL = required adminurl - {} is replaced with CLM floating IP
|
||||
# DESCRIPTION = Description of the service (for automatic configuration)
|
||||
|
||||
SERVICE_NAME = 0
|
||||
SERVICE_TYPE = 1
|
||||
PUBLIC_URL = 2
|
||||
INTERNAL_URL = 3
|
||||
ADMIN_URL = 4
|
||||
DESCRIPTION = 5
|
||||
|
||||
EXPECTED_REGION2_ENDPOINTS = [
|
||||
('NOVA_SERVICE_NAME', 'NOVA_SERVICE_TYPE',
|
||||
'http://{}:8774/v2.1/%(tenant_id)s',
|
||||
'http://{}:8774/v2.1/%(tenant_id)s',
|
||||
'http://{}:8774/v2.1/%(tenant_id)s',
|
||||
'Openstack Compute Service'),
|
||||
('PLACEMENT_SERVICE_NAME', 'PLACEMENT_SERVICE_TYPE',
|
||||
'http://{}:8778',
|
||||
'http://{}:8778',
|
||||
'http://{}:8778',
|
||||
'Openstack Placement Service'),
|
||||
('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE',
|
||||
'http://{}:6385/v1',
|
||||
'http://{}:6385/v1',
|
||||
'http://{}:6385/v1',
|
||||
'SysInv Service'),
|
||||
('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE',
|
||||
'http://{}:15491',
|
||||
'http://{}:5491',
|
||||
'http://{}:5491',
|
||||
'Patching Service'),
|
||||
('HEAT_SERVICE_NAME', 'HEAT_SERVICE_TYPE',
|
||||
'http://{}:8004/v1/%(tenant_id)s',
|
||||
'http://{}:8004/v1/%(tenant_id)s',
|
||||
'http://{}:8004/v1/%(tenant_id)s',
|
||||
'Openstack Orchestration Service'),
|
||||
('HEAT_CFN_SERVICE_NAME', 'HEAT_CFN_SERVICE_TYPE',
|
||||
'http://{}:8000/v1/',
|
||||
'http://{}:8000/v1/',
|
||||
'http://{}:8000/v1/',
|
||||
'Openstack Cloudformation Service'),
|
||||
('CEILOMETER_SERVICE_NAME', 'CEILOMETER_SERVICE_TYPE',
|
||||
'http://{}:8777',
|
||||
'http://{}:8777',
|
||||
'http://{}:8777',
|
||||
'Openstack Metering Service'),
|
||||
('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE',
|
||||
'http://{}:4545',
|
||||
'http://{}:4545',
|
||||
'http://{}:4545',
|
||||
'Virtual Infrastructure Manager'),
|
||||
('AODH_SERVICE_NAME', 'AODH_SERVICE_TYPE',
|
||||
'http://{}:8042',
|
||||
'http://{}:8042',
|
||||
'http://{}:8042',
|
||||
'OpenStack Alarming Service'),
|
||||
('PANKO_SERVICE_NAME', 'PANKO_SERVICE_TYPE',
|
||||
'http://{}:8977',
|
||||
'http://{}:8977',
|
||||
'http://{}:8977',
|
||||
'OpenStack Event Service'),
|
||||
]
|
||||
|
||||
EXPECTED_NEUTRON_ENDPOINT = (
|
||||
'NEUTRON_SERVICE_NAME', 'NEUTRON_SERVICE_TYPE',
|
||||
'http://{}:9696',
|
||||
'http://{}:9696',
|
||||
'http://{}:9696',
|
||||
'Neutron Networking Service')
|
||||
|
||||
EXPECTED_KEYSTONE_ENDPOINT = (
|
||||
'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE',
|
||||
'http://{}:8081/keystone/main/v2.0',
|
||||
'http://{}:8081/keystone/main/v2.0',
|
||||
'http://{}:8081/keystone/admin/v2.0',
|
||||
'OpenStack Identity')
|
||||
|
||||
EXPECTED_GLANCE_ENDPOINT = (
|
||||
'GLANCE_SERVICE_NAME', 'GLANCE_SERVICE_TYPE',
|
||||
'http://{}:9292',
|
||||
'http://{}:9292',
|
||||
'http://{}:9292',
|
||||
'OpenStack Image Service')
|
||||
|
||||
DEFAULT_HEAT_ADMIN_DOMAIN = 'heat'
|
||||
DEFAULT_HEAT_ADMIN_USER_NAME = 'heat_admin'
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def validate_region_one_keystone_config(region_config, token, api_url, users,
|
||||
services, endpoints, create=False,
|
||||
config_type=REGION_CONFIG,
|
||||
user_config=None):
|
||||
""" Validate that the required region one configuration are in place,
|
||||
if create is True, any missing entries will be set up to be added
|
||||
to keystone later on by puppet.
|
||||
"""
|
||||
|
||||
region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
|
||||
region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME')
|
||||
|
||||
# Determine what keystone entries are expected
|
||||
expected_users = EXPECTED_USERS
|
||||
expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS
|
||||
# Keystone is always in region 1
|
||||
expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT]
|
||||
|
||||
# Region of neutron user and endpoint depends on vswitch type
|
||||
if region_config.has_option('NETWORK', 'VSWITCH_TYPE'):
|
||||
if region_config.get('NETWORK', 'VSWITCH_TYPE').upper() == 'NUAGE_VRS':
|
||||
expected_users.append(EXPECTED_SHARED_SERVICES_NEUTRON_USER)
|
||||
else:
|
||||
expected_users.append(EXPECTED_REGION_2_NEUTRON_USER)
|
||||
expected_region_2_endpoints.append(EXPECTED_NEUTRON_ENDPOINT)
|
||||
|
||||
# Determine region of glance user and endpoint
|
||||
if not region_config.has_option('SHARED_SERVICES',
|
||||
'GLANCE_SERVICE_NAME'):
|
||||
expected_users.append(EXPECTED_REGION_2_GLANCE_USER)
|
||||
expected_region_2_endpoints.append(EXPECTED_GLANCE_ENDPOINT)
|
||||
elif region_config.has_option(
|
||||
'SHARED_SERVICES', 'GLANCE_CACHED'):
|
||||
if region_config.get('SHARED_SERVICES',
|
||||
'GLANCE_CACHED').upper() == 'TRUE':
|
||||
expected_users.append(EXPECTED_REGION_2_GLANCE_USER)
|
||||
expected_region_2_endpoints.append(EXPECTED_GLANCE_ENDPOINT)
|
||||
else:
|
||||
expected_region_1_endpoints.append(EXPECTED_GLANCE_ENDPOINT)
|
||||
|
||||
domains = rutils.get_domains(token, api_url)
|
||||
# Verify service project domain, creating if necessary
|
||||
if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'):
|
||||
project_domain = region_config.get('REGION_2_SERVICES',
|
||||
'PROJECT_DOMAIN_NAME')
|
||||
else:
|
||||
project_domain = DEFAULT_DOMAIN_NAME
|
||||
project_domain_id = domains.get_domain_id(project_domain)
|
||||
if not project_domain_id:
|
||||
if create and config_type == REGION_CONFIG:
|
||||
region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME',
|
||||
project_domain)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Keystone configuration error: service project domain '%s' is "
|
||||
"not configured." % project_domain)
|
||||
|
||||
# Verify service project, creating if necessary
|
||||
if region_config.has_option('SHARED_SERVICES',
|
||||
'SERVICE_PROJECT_NAME'):
|
||||
service_project = region_config.get('SHARED_SERVICES',
|
||||
'SERVICE_PROJECT_NAME')
|
||||
else:
|
||||
service_project = region_config.get('SHARED_SERVICES',
|
||||
'SERVICE_TENANT_NAME')
|
||||
projects = rutils.get_projects(token, api_url)
|
||||
project_id = projects.get_project_id(service_project)
|
||||
if not project_id:
|
||||
if create and config_type == REGION_CONFIG:
|
||||
region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME',
|
||||
service_project)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Keystone configuration error: service project '%s' is not "
|
||||
"configured." % service_project)
|
||||
|
||||
# Verify and retrieve the id of the admin role (only needed when creating)
|
||||
roles = rutils.get_roles(token, api_url)
|
||||
role_id = roles.get_role_id('admin')
|
||||
if not role_id and create:
|
||||
raise ConfigFail("Keystone configuration error: No admin role present")
|
||||
|
||||
# verify that the heat admin domain is configured, creating if necessary
|
||||
heat_admin_domain = region_config.get('REGION_2_SERVICES',
|
||||
'HEAT_ADMIN_DOMAIN')
|
||||
domains = rutils.get_domains(token, api_url)
|
||||
heat_domain_id = domains.get_domain_id(heat_admin_domain)
|
||||
if not heat_domain_id:
|
||||
if create and config_type == REGION_CONFIG:
|
||||
region_config.set('REGION_2_SERVICES', 'HEAT_ADMIN_DOMAIN',
|
||||
heat_admin_domain)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain id for %s domain. Please ensure "
|
||||
"keystone configuration is correct." % heat_admin_domain)
|
||||
|
||||
# Verify that the heat stack user is configured, creating if necessary
|
||||
heat_stack_user = region_config.get('REGION_2_SERVICES',
|
||||
'HEAT_ADMIN_USER_NAME')
|
||||
if not users.get_user_id(heat_stack_user):
|
||||
if create and config_type == REGION_CONFIG:
|
||||
if not region_config.has_option('REGION_2_SERVICES',
|
||||
'HEAT_ADMIN_PASSWORD'):
|
||||
try:
|
||||
region_config.set('REGION_2_SERVICES',
|
||||
'HEAT_ADMIN_PASSWORD',
|
||||
uuid.uuid4().hex[:10] + "TiC2*")
|
||||
except Exception as e:
|
||||
raise ConfigFail("Failed to generate random user "
|
||||
"password: %s" % e)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain user (%s) from domain (%s). Please ensure "
|
||||
"keystone configuration is correct." % (heat_stack_user,
|
||||
heat_admin_domain))
|
||||
elif config_type == SUBCLOUD_CONFIG:
|
||||
# Add the password to the region config so it will be used when
|
||||
# configuring services.
|
||||
auth_password = user_config.get_password(heat_stack_user)
|
||||
region_config.set('REGION_2_SERVICES', 'HEAT_ADMIN_PASSWORD',
|
||||
auth_password)
|
||||
|
||||
# verify that the service user domain is configured, creating if necessary
|
||||
if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'):
|
||||
user_domain = region_config.get('REGION_2_SERVICES',
|
||||
'USER_DOMAIN_NAME')
|
||||
else:
|
||||
user_domain = DEFAULT_DOMAIN_NAME
|
||||
domains = rutils.get_domains(token, api_url)
|
||||
user_domain_id = domains.get_domain_id(user_domain)
|
||||
if not user_domain_id:
|
||||
if create and config_type == REGION_CONFIG:
|
||||
region_config.set('REGION_2_SERVICES',
|
||||
'USER_DOMAIN_NAME')
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain id for for %s domain. Please ensure "
|
||||
"keystone configuration is correct." % user_domain)
|
||||
|
||||
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
|
||||
if config_type == REGION_CONFIG:
|
||||
# Verify that all users are configured and can retrieve a token,
|
||||
# Optionally set up to create missing users + their admin role
|
||||
for user in expected_users:
|
||||
auth_user = region_config.get(user[REGION_NAME],
|
||||
user[USER_KEY] + '_USER_NAME')
|
||||
user_id = users.get_user_id(auth_user)
|
||||
auth_password = None
|
||||
if not user_id and create:
|
||||
if not region_config.has_option(
|
||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
|
||||
# Generate random password for new user via
|
||||
# /dev/urandom if necessary
|
||||
try:
|
||||
region_config.set(
|
||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD',
|
||||
uuid.uuid4().hex[:10] + "TiC2*")
|
||||
except Exception as e:
|
||||
raise ConfigFail("Failed to generate random user "
|
||||
"password: %s" % e)
|
||||
elif user_id and user_domain_id and\
|
||||
project_id and project_domain_id:
|
||||
# If there is a user_id existing then we cannot use
|
||||
# a randomized password as it was either created by
|
||||
# a previous run of regionconfig or was created as
|
||||
# part of Titanium Cloud Primary region config
|
||||
if not region_config.has_option(
|
||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
|
||||
raise ConfigFail("Failed to find configured password "
|
||||
"for pre-defined user %s" % auth_user)
|
||||
auth_password = region_config.get(user[REGION_NAME],
|
||||
user[USER_KEY] + '_PASSWORD')
|
||||
# Verify that the existing user can seek an auth token
|
||||
user_token = rutils.get_token(auth_url, service_project,
|
||||
auth_user,
|
||||
auth_password, user_domain,
|
||||
project_domain)
|
||||
if not user_token:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain keystone token for %s user. "
|
||||
"Please ensure keystone configuration is correct."
|
||||
% auth_user)
|
||||
else:
|
||||
# For subcloud configs we re-use the users from the system controller
|
||||
# (the primary region).
|
||||
for user in expected_users:
|
||||
auth_user = user[USER_NAME]
|
||||
user_id = users.get_user_id(auth_user)
|
||||
auth_password = None
|
||||
|
||||
if user_id:
|
||||
# Add the password to the region config so it will be used when
|
||||
# configuring services.
|
||||
auth_password = user_config.get_password(user[USER_NAME])
|
||||
region_config.set(user[REGION_NAME],
|
||||
user[USER_KEY] + '_PASSWORD',
|
||||
auth_password)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain user (%s). Please ensure "
|
||||
"keystone configuration is correct." % user[USER_NAME])
|
||||
|
||||
# Verify that the existing user can seek an auth token
|
||||
user_token = rutils.get_token(auth_url, service_project, auth_user,
|
||||
auth_password, user_domain,
|
||||
project_domain)
|
||||
if not user_token:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain keystone token for %s user. "
|
||||
"Please ensure keystone configuration is correct." %
|
||||
auth_user)
|
||||
|
||||
# Verify that region two endpoints & services for shared services
|
||||
# match our requirements, optionally creating missing entries
|
||||
for endpoint in expected_region_1_endpoints:
|
||||
service_name = region_config.get('SHARED_SERVICES',
|
||||
endpoint[SERVICE_NAME])
|
||||
service_type = region_config.get('SHARED_SERVICES',
|
||||
endpoint[SERVICE_TYPE])
|
||||
|
||||
try:
|
||||
service_id = services.get_service_id(service_name, service_type)
|
||||
except KeystoneFail as ex:
|
||||
# No option to create services for region one, if those are not
|
||||
# present, something is seriously wrong
|
||||
raise ex
|
||||
|
||||
# Extract region one url information from the existing endpoint entry:
|
||||
try:
|
||||
endpoints.get_service_url(
|
||||
region_1_name, service_id, "public")
|
||||
endpoints.get_service_url(
|
||||
region_1_name, service_id, "internal")
|
||||
endpoints.get_service_url(
|
||||
region_1_name, service_id, "admin")
|
||||
except KeystoneFail as ex:
|
||||
# Fail since shared services endpoints are not found
|
||||
raise ConfigFail("Endpoint for shared service %s "
|
||||
"is not configured" % service_name)
|
||||
|
||||
# Verify that region two endpoints & services match our requirements,
|
||||
# optionally creating missing entries
|
||||
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
|
||||
'CAN_IP_START_ADDRESS')
|
||||
if not public_address:
|
||||
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
|
||||
'CAN_IP_FLOATING_ADDRESS')
|
||||
if not public_address:
|
||||
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
|
||||
'IP_START_ADDRESS')
|
||||
if not public_address:
|
||||
# AIO-SX configuration
|
||||
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
|
||||
'IP_ADDRESS')
|
||||
if not public_address:
|
||||
public_address = region_config.get('OAM_NETWORK',
|
||||
'IP_FLOATING_ADDRESS')
|
||||
|
||||
if region_config.has_section('CLM_NETWORK'):
|
||||
internal_address = region_config.get('CLM_NETWORK',
|
||||
'CLM_IP_START_ADDRESS')
|
||||
else:
|
||||
internal_address = region_config.get('MGMT_NETWORK',
|
||||
'IP_START_ADDRESS')
|
||||
|
||||
internal_infra_address = utils.get_optional(
|
||||
region_config, 'BLS_NETWORK', 'BLS_IP_START_ADDRESS')
|
||||
if not internal_infra_address:
|
||||
internal_infra_address = utils.get_optional(
|
||||
region_config, 'INFRA_NETWORK', 'IP_START_ADDRESS')
|
||||
|
||||
for endpoint in expected_region_2_endpoints:
|
||||
service_name = utils.get_service(region_config, 'REGION_2_SERVICES',
|
||||
endpoint[SERVICE_NAME])
|
||||
service_type = utils.get_service(region_config, 'REGION_2_SERVICES',
|
||||
endpoint[SERVICE_TYPE])
|
||||
|
||||
expected_public_url = endpoint[PUBLIC_URL].format(public_address)
|
||||
|
||||
if internal_infra_address and service_type == 'image':
|
||||
nfs_address = IPAddress(internal_infra_address) + 3
|
||||
expected_internal_url = endpoint[INTERNAL_URL].format(nfs_address)
|
||||
expected_admin_url = endpoint[ADMIN_URL].format(nfs_address)
|
||||
else:
|
||||
expected_internal_url = endpoint[INTERNAL_URL].format(
|
||||
internal_address)
|
||||
expected_admin_url = endpoint[ADMIN_URL].format(internal_address)
|
||||
|
||||
try:
|
||||
public_url = endpoints.get_service_url(region_2_name, service_id,
|
||||
"public")
|
||||
internal_url = endpoints.get_service_url(region_2_name, service_id,
|
||||
"internal")
|
||||
admin_url = endpoints.get_service_url(region_2_name, service_id,
|
||||
"admin")
|
||||
except KeystoneFail as ex:
|
||||
# The endpoint will be created optionally
|
||||
if not create:
|
||||
raise ConfigFail("Keystone configuration error: Unable to "
|
||||
"find endpoints for service %s"
|
||||
% service_name)
|
||||
continue
|
||||
|
||||
# Validate the existing endpoints
|
||||
for endpointtype, found, expected in [
|
||||
('public', public_url, expected_public_url),
|
||||
('internal', internal_url, expected_internal_url),
|
||||
('admin', admin_url, expected_admin_url)]:
|
||||
if found != expected:
|
||||
raise ConfigFail(
|
||||
"Keystone configuration error for:\nregion ({}), "
|
||||
"service name ({}), service type ({})\n"
|
||||
"expected {}: {}\nconfigured {}: {}".format(
|
||||
region_2_name, service_name, service_type,
|
||||
endpointtype, expected, endpointtype, found))
|
||||
|
||||
|
||||
def set_subcloud_config_defaults(region_config):
|
||||
"""Set defaults in region_config for subclouds"""
|
||||
|
||||
# We always create endpoints for subclouds
|
||||
region_config.set('REGION_2_SERVICES', 'CREATE', 'Y')
|
||||
|
||||
# We use the default service project
|
||||
region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME',
|
||||
constants.DEFAULT_SERVICE_PROJECT_NAME)
|
||||
|
||||
# We use the default heat admin domain
|
||||
region_config.set('REGION_2_SERVICES', 'HEAT_ADMIN_DOMAIN',
|
||||
DEFAULT_HEAT_ADMIN_DOMAIN)
|
||||
|
||||
# We use the heat admin user already created in the system controller
|
||||
region_config.set('REGION_2_SERVICES', 'HEAT_ADMIN_USER_NAME',
|
||||
DEFAULT_HEAT_ADMIN_USER_NAME)
|
||||
|
||||
# Add the necessary users to the region config, which will allow the
|
||||
# validation code to run and will later result in services being
|
||||
# configured to use the users from the system controller.
|
||||
expected_users = EXPECTED_USERS
|
||||
|
||||
expected_users.append(EXPECTED_REGION_2_NEUTRON_USER)
|
||||
|
||||
if not region_config.has_option('SHARED_SERVICES',
|
||||
'GLANCE_SERVICE_NAME'):
|
||||
expected_users.append(EXPECTED_REGION_2_GLANCE_USER)
|
||||
elif region_config.has_option(
|
||||
'SHARED_SERVICES', 'GLANCE_CACHED'):
|
||||
if region_config.get('SHARED_SERVICES',
|
||||
'GLANCE_CACHED').upper() == 'TRUE':
|
||||
expected_users.append(EXPECTED_REGION_2_GLANCE_USER)
|
||||
|
||||
for user in expected_users:
|
||||
# Add the user to the region config so to allow validation.
|
||||
region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME',
|
||||
user[USER_NAME])
|
||||
|
||||
|
||||
def configure_region(config_file, config_type=REGION_CONFIG):
|
||||
"""Configure the region"""
|
||||
|
||||
# Parse the region/subcloud config file
|
||||
print "Parsing configuration file... ",
|
||||
region_config = parse_system_config(config_file)
|
||||
print "DONE"
|
||||
|
||||
if config_type == SUBCLOUD_CONFIG:
|
||||
# Set defaults in region_config for subclouds
|
||||
set_subcloud_config_defaults(region_config)
|
||||
|
||||
# Validate the region/subcloud config file
|
||||
print "Validating configuration file... ",
|
||||
try:
|
||||
create_cgcs_config_file(None, region_config, None, None, None,
|
||||
config_type=config_type,
|
||||
validate_only=True)
|
||||
except ConfigParser.Error as e:
|
||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
||||
(config_file, e))
|
||||
print "DONE"
|
||||
|
||||
# Bring up management interface to allow us to reach Region 1
|
||||
print "Configuring management interface... ",
|
||||
configure_management_interface(region_config)
|
||||
print "DONE"
|
||||
|
||||
# Get token from keystone
|
||||
print "Retrieving keystone token...",
|
||||
sys.stdout.flush()
|
||||
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
|
||||
if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'):
|
||||
auth_project = region_config.get('SHARED_SERVICES',
|
||||
'ADMIN_TENANT_NAME')
|
||||
else:
|
||||
auth_project = region_config.get('SHARED_SERVICES',
|
||||
'ADMIN_PROJECT_NAME')
|
||||
auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME')
|
||||
auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD')
|
||||
if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'):
|
||||
admin_user_domain = region_config.get('SHARED_SERVICES',
|
||||
'ADMIN_USER_DOMAIN')
|
||||
else:
|
||||
admin_user_domain = DEFAULT_DOMAIN_NAME
|
||||
if region_config.has_option('SHARED_SERVICES',
|
||||
'ADMIN_PROJECT_DOMAIN'):
|
||||
admin_project_domain = region_config.get('SHARED_SERVICES',
|
||||
'ADMIN_PROJECT_DOMAIN')
|
||||
else:
|
||||
admin_project_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
attempts = 0
|
||||
token = None
|
||||
# Wait for connectivity to region one. It can take some time, especially if
|
||||
# we have LAG on the management network.
|
||||
while not token:
|
||||
token = rutils.get_token(auth_url, auth_project, auth_user,
|
||||
auth_password, admin_user_domain,
|
||||
admin_project_domain)
|
||||
if not token:
|
||||
attempts += 1
|
||||
if attempts < 10:
|
||||
print "\rRetrieving keystone token...{}".format(
|
||||
'.' * attempts),
|
||||
sys.stdout.flush()
|
||||
time.sleep(10)
|
||||
else:
|
||||
raise ConfigFail(
|
||||
"Unable to obtain keystone token. Please ensure "
|
||||
"networking and keystone configuration is correct.")
|
||||
print "DONE"
|
||||
|
||||
# Get services, endpoints, users and domains from keystone
|
||||
print "Retrieving services, endpoints and users from keystone... ",
|
||||
region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
|
||||
service_name = region_config.get('SHARED_SERVICES',
|
||||
'KEYSTONE_SERVICE_NAME')
|
||||
service_type = region_config.get('SHARED_SERVICES',
|
||||
'KEYSTONE_SERVICE_TYPE')
|
||||
|
||||
api_url = token.get_service_url(
|
||||
region_name, service_name, service_type, "admin").replace(
|
||||
'v2.0', 'v3')
|
||||
|
||||
services = rutils.get_services(token, api_url)
|
||||
endpoints = rutils.get_endpoints(token, api_url)
|
||||
users = rutils.get_users(token, api_url)
|
||||
domains = rutils.get_domains(token, api_url)
|
||||
if not services or not endpoints or not users:
|
||||
raise ConfigFail(
|
||||
"Unable to retrieve services, endpoints or users from keystone. "
|
||||
"Please ensure networking and keystone configuration is correct.")
|
||||
print "DONE"
|
||||
|
||||
user_config = None
|
||||
if config_type == SUBCLOUD_CONFIG:
|
||||
# Retrieve subcloud configuration from dcmanager
|
||||
print "Retrieving configuration from dcmanager... ",
|
||||
dcmanager_url = token.get_service_url(
|
||||
'SystemController', 'dcmanager', 'dcmanager', "admin")
|
||||
subcloud_name = region_config.get('REGION_2_SERVICES',
|
||||
'REGION_NAME')
|
||||
subcloud_management_subnet = region_config.get('MGMT_NETWORK',
|
||||
'CIDR')
|
||||
hash_string = subcloud_name + subcloud_management_subnet
|
||||
subcloud_config = rutils.get_subcloud_config(token, dcmanager_url,
|
||||
subcloud_name,
|
||||
hash_string)
|
||||
user_config = subcloud_config['users']
|
||||
print "DONE"
|
||||
|
||||
try:
|
||||
# Configure missing region one keystone entries
|
||||
create = True
|
||||
# Prepare region configuration for puppet to create keystone identities
|
||||
if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and
|
||||
region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
|
||||
print "Preparing keystone configuration... ",
|
||||
# If keystone configuration for this region already in place,
|
||||
# validate it only
|
||||
else:
|
||||
# Validate region one keystone config
|
||||
create = False
|
||||
print "Validating keystone configuration... ",
|
||||
|
||||
validate_region_one_keystone_config(region_config, token, api_url,
|
||||
users, services, endpoints, create,
|
||||
config_type=config_type,
|
||||
user_config=user_config)
|
||||
print "DONE"
|
||||
|
||||
# Create cgcs_config file
|
||||
print "Creating config apply file... ",
|
||||
try:
|
||||
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config,
|
||||
services, endpoints, domains,
|
||||
config_type=config_type)
|
||||
except ConfigParser.Error as e:
|
||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
||||
(config_file, e))
|
||||
print "DONE"
|
||||
|
||||
# Configure controller
|
||||
assistant = ConfigAssistant()
|
||||
assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False)
|
||||
|
||||
except ConfigFail as e:
|
||||
print "A configuration failure has occurred.",
|
||||
raise e
|
||||
|
||||
|
||||
def show_help_region():
|
||||
print ("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
|
||||
print textwrap.fill(
|
||||
"Perform region configuration using the region "
|
||||
"configuration from CONFIG_FILE.", 80)
|
||||
|
||||
|
||||
def show_help_subcloud():
|
||||
print ("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
|
||||
print textwrap.fill(
|
||||
"Perform subcloud configuration using the subcloud "
|
||||
"configuration from CONFIG_FILE.", 80)
|
||||
|
||||
|
||||
def config_main(config_type=REGION_CONFIG):
|
||||
if config_type == REGION_CONFIG:
|
||||
config_file = "/home/wrsroot/region_config"
|
||||
elif config_type == SUBCLOUD_CONFIG:
|
||||
config_file = "/home/wrsroot/subcloud_config"
|
||||
else:
|
||||
raise ConfigFail("Invalid config_type: %s" % config_type)
|
||||
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if sys.argv[arg] in ['--help', '-h', '-?']:
|
||||
if config_type == REGION_CONFIG:
|
||||
show_help_region()
|
||||
else:
|
||||
show_help_subcloud()
|
||||
exit(1)
|
||||
elif arg == len(sys.argv) - 1:
|
||||
config_file = sys.argv[arg]
|
||||
else:
|
||||
print "Invalid option. Use --help for more information."
|
||||
exit(1)
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if not os.path.isfile(config_file):
|
||||
print "Config file %s does not exist." % config_file
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
configure_region(config_file, config_type=config_type)
|
||||
except KeyboardInterrupt:
|
||||
print "\nAborting configuration"
|
||||
except ConfigFail as e:
|
||||
LOG.exception(e)
|
||||
print "\nConfiguration failed: {}".format(e)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
print "\nConfiguration failed: {}".format(e)
|
||||
else:
|
||||
print("\nConfiguration finished successfully.")
|
||||
finally:
|
||||
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
|
||||
os.remove(TEMP_CGCS_CONFIG_FILE)
|
||||
|
||||
|
||||
def region_main():
|
||||
config_main(REGION_CONFIG)
|
||||
|
||||
|
||||
def subcloud_main():
|
||||
config_main(SUBCLOUD_CONFIG)
|
||||
575
controllerconfig/controllerconfig/controllerconfig/sysinv_api.py
Normal file
575
controllerconfig/controllerconfig/controllerconfig/sysinv_api.py
Normal file
@@ -0,0 +1,575 @@
|
||||
#
|
||||
# Copyright (c) 2014-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
System Inventory Interactions
|
||||
"""
|
||||
|
||||
import json
|
||||
import openstack
|
||||
import urllib2
|
||||
|
||||
from common import log
|
||||
from common.exceptions import KeystoneFail
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
API_VERSION = 1
|
||||
|
||||
# Host Personality Constants
|
||||
HOST_PERSONALITY_NOT_SET = ""
|
||||
HOST_PERSONALITY_UNKNOWN = "unknown"
|
||||
HOST_PERSONALITY_CONTROLLER = "controller"
|
||||
HOST_PERSONALITY_COMPUTE = "compute"
|
||||
HOST_PERSONALITY_STORAGE = "storage"
|
||||
|
||||
# Host Administrative State Constants
|
||||
HOST_ADMIN_STATE_NOT_SET = ""
|
||||
HOST_ADMIN_STATE_UNKNOWN = "unknown"
|
||||
HOST_ADMIN_STATE_LOCKED = "locked"
|
||||
HOST_ADMIN_STATE_UNLOCKED = "unlocked"
|
||||
|
||||
# Host Operational State Constants
|
||||
HOST_OPERATIONAL_STATE_NOT_SET = ""
|
||||
HOST_OPERATIONAL_STATE_UNKNOWN = "unknown"
|
||||
HOST_OPERATIONAL_STATE_ENABLED = "enabled"
|
||||
HOST_OPERATIONAL_STATE_DISABLED = "disabled"
|
||||
|
||||
# Host Availability State Constants
|
||||
HOST_AVAIL_STATE_NOT_SET = ""
|
||||
HOST_AVAIL_STATE_UNKNOWN = "unknown"
|
||||
HOST_AVAIL_STATE_AVAILABLE = "available"
|
||||
HOST_AVAIL_STATE_ONLINE = "online"
|
||||
HOST_AVAIL_STATE_OFFLINE = "offline"
|
||||
HOST_AVAIL_STATE_POWERED_OFF = "powered-off"
|
||||
HOST_AVAIL_STATE_POWERED_ON = "powered-on"
|
||||
|
||||
# Host Board Management Constants
|
||||
HOST_BM_TYPE_NOT_SET = ""
|
||||
HOST_BM_TYPE_UNKNOWN = "unknown"
|
||||
HOST_BM_TYPE_ILO3 = 'ilo3'
|
||||
HOST_BM_TYPE_ILO4 = 'ilo4'
|
||||
|
||||
# Host invprovision state
|
||||
HOST_PROVISIONING = "provisioning"
|
||||
HOST_PROVISIONED = "provisioned"
|
||||
|
||||
|
||||
class Host(object):
|
||||
def __init__(self, hostname, host_data=None):
|
||||
self.name = hostname
|
||||
self.personality = HOST_PERSONALITY_NOT_SET
|
||||
self.admin_state = HOST_ADMIN_STATE_NOT_SET
|
||||
self.operational_state = HOST_OPERATIONAL_STATE_NOT_SET
|
||||
self.avail_status = []
|
||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
||||
self.uuid = None
|
||||
self.config_status = None
|
||||
self.invprovision = None
|
||||
self.boot_device = None
|
||||
self.rootfs_device = None
|
||||
self.console = None
|
||||
self.tboot = None
|
||||
|
||||
if host_data is not None:
|
||||
self.__host_set_state__(host_data)
|
||||
|
||||
def __host_set_state__(self, host_data):
|
||||
if host_data is None:
|
||||
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
|
||||
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
|
||||
self.avail_status = []
|
||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
||||
|
||||
# Set personality
|
||||
if host_data['personality'] == "controller":
|
||||
self.personality = HOST_PERSONALITY_CONTROLLER
|
||||
elif host_data['personality'] == "compute":
|
||||
self.personality = HOST_PERSONALITY_COMPUTE
|
||||
elif host_data['personality'] == "storage":
|
||||
self.personality = HOST_PERSONALITY_STORAGE
|
||||
else:
|
||||
self.personality = HOST_PERSONALITY_UNKNOWN
|
||||
|
||||
# Set administrative state
|
||||
if host_data['administrative'] == "locked":
|
||||
self.admin_state = HOST_ADMIN_STATE_LOCKED
|
||||
elif host_data['administrative'] == "unlocked":
|
||||
self.admin_state = HOST_ADMIN_STATE_UNLOCKED
|
||||
else:
|
||||
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
|
||||
|
||||
# Set operational state
|
||||
if host_data['operational'] == "enabled":
|
||||
self.operational_state = HOST_OPERATIONAL_STATE_ENABLED
|
||||
elif host_data['operational'] == "disabled":
|
||||
self.operational_state = HOST_OPERATIONAL_STATE_DISABLED
|
||||
else:
|
||||
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
|
||||
|
||||
# Set availability status
|
||||
self.avail_status[:] = []
|
||||
if host_data['availability'] == "available":
|
||||
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
|
||||
elif host_data['availability'] == "online":
|
||||
self.avail_status.append(HOST_AVAIL_STATE_ONLINE)
|
||||
elif host_data['availability'] == "offline":
|
||||
self.avail_status.append(HOST_AVAIL_STATE_OFFLINE)
|
||||
elif host_data['availability'] == "power-on":
|
||||
self.avail_status.append(HOST_AVAIL_STATE_POWERED_ON)
|
||||
elif host_data['availability'] == "power-off":
|
||||
self.avail_status.append(HOST_AVAIL_STATE_POWERED_OFF)
|
||||
else:
|
||||
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
|
||||
|
||||
# Set board management type
|
||||
if host_data['bm_type'] is None:
|
||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
||||
elif host_data['bm_type'] == 'ilo3':
|
||||
self.bm_type = HOST_BM_TYPE_ILO3
|
||||
elif host_data['bm_type'] == 'ilo4':
|
||||
self.bm_type = HOST_BM_TYPE_ILO4
|
||||
else:
|
||||
self.bm_type = HOST_BM_TYPE_UNKNOWN
|
||||
|
||||
if host_data['invprovision'] == 'provisioned':
|
||||
self.invprovision = HOST_PROVISIONED
|
||||
else:
|
||||
self.invprovision = HOST_PROVISIONING
|
||||
|
||||
self.uuid = host_data['uuid']
|
||||
self.config_status = host_data['config_status']
|
||||
self.boot_device = host_data['boot_device']
|
||||
self.rootfs_device = host_data['rootfs_device']
|
||||
self.console = host_data['console']
|
||||
self.tboot = host_data['tboot']
|
||||
|
||||
def __host_update__(self, admin_token, region_name):
|
||||
try:
|
||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
||||
region_name)
|
||||
url += "/ihosts/" + self.name
|
||||
|
||||
request_info = urllib2.Request(url)
|
||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
||||
request_info.add_header("Accept", "application/json")
|
||||
|
||||
request = urllib2.urlopen(request_info)
|
||||
response = json.loads(request.read())
|
||||
request.close()
|
||||
return response
|
||||
|
||||
except KeystoneFail as e:
|
||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
||||
return None
|
||||
|
||||
except urllib2.HTTPError as e:
|
||||
LOG.error("%s, %s" % (e.code, e.read()))
|
||||
if e.code == 401:
|
||||
admin_token.set_expired()
|
||||
return None
|
||||
|
||||
except urllib2.URLError as e:
|
||||
LOG.error(e)
|
||||
return None
|
||||
|
||||
def __host_action__(self, admin_token, action, region_name):
|
||||
try:
|
||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
||||
region_name)
|
||||
url += "/ihosts/" + self.name
|
||||
|
||||
request_info = urllib2.Request(url)
|
||||
request_info.get_method = lambda: 'PATCH'
|
||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
||||
request_info.add_header("Content-type", "application/json")
|
||||
request_info.add_header("Accept", "application/json")
|
||||
request_info.add_data(action)
|
||||
|
||||
request = urllib2.urlopen(request_info)
|
||||
request.close()
|
||||
return True
|
||||
|
||||
except KeystoneFail as e:
|
||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
||||
return False
|
||||
|
||||
except urllib2.HTTPError as e:
|
||||
LOG.error("%s, %s" % (e.code, e.read()))
|
||||
if e.code == 401:
|
||||
admin_token.set_expired()
|
||||
return False
|
||||
|
||||
except urllib2.URLError as e:
|
||||
LOG.error(e)
|
||||
return False
|
||||
|
||||
def is_unlocked(self):
|
||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED)
|
||||
|
||||
def is_locked(self):
|
||||
return(not self.is_unlocked())
|
||||
|
||||
def is_enabled(self):
|
||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
|
||||
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED)
|
||||
|
||||
def is_controller_enabled_provisioned(self):
|
||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
|
||||
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED and
|
||||
self.personality == HOST_PERSONALITY_CONTROLLER and
|
||||
self.invprovision == HOST_PROVISIONED)
|
||||
|
||||
def is_disabled(self):
|
||||
return(not self.is_enabled())
|
||||
|
||||
def support_power_off(self):
|
||||
return(HOST_BM_TYPE_NOT_SET != self.bm_type)
|
||||
|
||||
def is_powered_off(self):
|
||||
for status in self.avail_status:
|
||||
if status == HOST_AVAIL_STATE_POWERED_OFF:
|
||||
return(self.admin_state == HOST_ADMIN_STATE_LOCKED and
|
||||
self.operational_state ==
|
||||
HOST_OPERATIONAL_STATE_DISABLED)
|
||||
return False
|
||||
|
||||
def is_powered_on(self):
|
||||
return not self.is_powered_off()
|
||||
|
||||
def refresh_data(self, admin_token, region_name):
|
||||
""" Ask the System Inventory for an update view of the host """
|
||||
|
||||
host_data = self.__host_update__(admin_token, region_name)
|
||||
self.__host_set_state__(host_data)
|
||||
|
||||
def lock(self, admin_token, region_name):
|
||||
""" Asks the Platform to perform a lock against a host """
|
||||
|
||||
if self.is_unlocked():
|
||||
action = json.dumps([{"path": "/action",
|
||||
"value": "lock", "op": "replace"}])
|
||||
|
||||
return self.__host_action__(admin_token, action, region_name)
|
||||
|
||||
return True
|
||||
|
||||
def force_lock(self, admin_token, region_name):
|
||||
""" Asks the Platform to perform a force lock against a host """
|
||||
|
||||
if self.is_unlocked():
|
||||
action = json.dumps([{"path": "/action",
|
||||
"value": "force-lock", "op": "replace"}])
|
||||
|
||||
return self.__host_action__(admin_token, action, region_name)
|
||||
|
||||
return True
|
||||
|
||||
def unlock(self, admin_token, region_name):
|
||||
""" Asks the Platform to perform an ulock against a host """
|
||||
|
||||
if self.is_locked():
|
||||
action = json.dumps([{"path": "/action",
|
||||
"value": "unlock", "op": "replace"}])
|
||||
|
||||
return self.__host_action__(admin_token, action, region_name)
|
||||
|
||||
return True
|
||||
|
||||
def power_off(self, admin_token, region_name):
|
||||
""" Asks the Platform to perform a power-off against a host """
|
||||
|
||||
if self.is_powered_on():
|
||||
action = json.dumps([{"path": "/action",
|
||||
"value": "power-off", "op": "replace"}])
|
||||
|
||||
return self.__host_action__(admin_token, action, region_name)
|
||||
|
||||
return True
|
||||
|
||||
def power_on(self, admin_token, region_name):
|
||||
""" Asks the Platform to perform a power-on against a host """
|
||||
|
||||
if self.is_powered_off():
|
||||
action = json.dumps([{"path": "/action",
|
||||
"value": "power-on", "op": "replace"}])
|
||||
|
||||
return self.__host_action__(admin_token, action, region_name)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_hosts(admin_token, region_name, personality=None,
|
||||
exclude_hostnames=None):
|
||||
""" Asks System Inventory for a list of hosts """
|
||||
|
||||
if exclude_hostnames is None:
|
||||
exclude_hostnames = []
|
||||
|
||||
try:
|
||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
||||
region_name)
|
||||
url += "/ihosts/"
|
||||
|
||||
request_info = urllib2.Request(url)
|
||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
||||
request_info.add_header("Accept", "application/json")
|
||||
|
||||
request = urllib2.urlopen(request_info)
|
||||
response = json.loads(request.read())
|
||||
request.close()
|
||||
|
||||
host_list = []
|
||||
if personality is None:
|
||||
for host in response['ihosts']:
|
||||
if host['hostname'] not in exclude_hostnames:
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
else:
|
||||
for host in response['ihosts']:
|
||||
if host['hostname'] not in exclude_hostnames:
|
||||
if (host['personality'] == "controller" and
|
||||
personality == HOST_PERSONALITY_CONTROLLER):
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
|
||||
elif (host['personality'] == "compute" and
|
||||
personality == HOST_PERSONALITY_COMPUTE):
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
|
||||
elif (host['personality'] == "storage" and
|
||||
personality == HOST_PERSONALITY_STORAGE):
|
||||
host_list.append(Host(host['hostname'], host))
|
||||
|
||||
return host_list
|
||||
|
||||
except KeystoneFail as e:
|
||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
||||
return []
|
||||
|
||||
except urllib2.HTTPError as e:
|
||||
LOG.error("%s, %s" % (e.code, e.read()))
|
||||
if e.code == 401:
|
||||
admin_token.set_expired()
|
||||
return []
|
||||
|
||||
except urllib2.URLError as e:
|
||||
LOG.error(e)
|
||||
return []
|
||||
|
||||
|
||||
def dict_to_patch(values, install_action=False):
|
||||
# install default action
|
||||
if install_action:
|
||||
values.update({'action': 'install'})
|
||||
patch = []
|
||||
for key, value in values.iteritems():
|
||||
path = '/' + key
|
||||
patch.append({'op': 'replace', 'path': path, 'value': value})
|
||||
return patch
|
||||
|
||||
|
||||
def get_shared_services():
|
||||
try:
|
||||
services = ""
|
||||
with openstack.OpenStack() as client:
|
||||
systems = client.sysinv.isystem.list()
|
||||
if systems:
|
||||
services = systems[0].capabilities.get("shared_services", "")
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get shared services")
|
||||
raise e
|
||||
|
||||
return services
|
||||
|
||||
|
||||
def get_alarms():
|
||||
""" get all alarms """
|
||||
alarm_list = []
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
alarm_list = client.sysinv.ialarm.list()
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get alarms")
|
||||
raise e
|
||||
return alarm_list
|
||||
|
||||
|
||||
def controller_enabled_provisioned(hostname):
|
||||
""" check if host is enabled """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if (hostname == host.name and
|
||||
host.is_controller_enabled_provisioned()):
|
||||
LOG.info("host %s is enabled/provisioned" % host.name)
|
||||
return True
|
||||
except Exception as e:
|
||||
LOG.exception("failed to check if host is enabled/provisioned")
|
||||
raise e
|
||||
return False
|
||||
|
||||
|
||||
def get_system_uuid():
|
||||
""" get system uuid """
|
||||
try:
|
||||
sysuuid = ""
|
||||
with openstack.OpenStack() as client:
|
||||
systems = client.sysinv.isystem.list()
|
||||
if systems:
|
||||
sysuuid = systems[0].uuid
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get system uuid")
|
||||
raise e
|
||||
return sysuuid
|
||||
|
||||
|
||||
def get_oam_ip():
|
||||
""" get OAM ip details """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
oam_list = client.sysinv.iextoam.list()
|
||||
if oam_list:
|
||||
return oam_list[0]
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get OAM IP")
|
||||
raise e
|
||||
return None
|
||||
|
||||
|
||||
def get_mac_addresses(hostname):
|
||||
""" get MAC addresses for the host """
|
||||
macs = {}
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
port_list = client.sysinv.ethernet_port.list(host.uuid)
|
||||
macs = {port.name: port.mac for port in port_list}
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get MAC addresses")
|
||||
raise e
|
||||
return macs
|
||||
|
||||
|
||||
def get_disk_serial_ids(hostname):
|
||||
""" get disk serial ids for the host """
|
||||
disk_serial_ids = {}
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
disk_list = client.sysinv.idisk.list(host.uuid)
|
||||
disk_serial_ids = {
|
||||
disk.device_node: disk.serial_id for disk in disk_list}
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get disks")
|
||||
raise e
|
||||
return disk_serial_ids
|
||||
|
||||
|
||||
def update_clone_system(descr, hostname):
|
||||
""" update system parameters on clone installation """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
systems = client.sysinv.isystem.list()
|
||||
if not systems:
|
||||
return False
|
||||
values = {
|
||||
'name': "Cloned_system",
|
||||
'description': descr
|
||||
}
|
||||
patch = dict_to_patch(values)
|
||||
LOG.info("Updating system: {} [{}]".format(systems[0].name, patch))
|
||||
client.sysinv.isystem.update(systems[0].uuid, patch)
|
||||
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
values = {
|
||||
'location': {},
|
||||
'serialid': ""
|
||||
}
|
||||
patch = dict_to_patch(values)
|
||||
client.sysinv.ihost.update(host.uuid, patch)
|
||||
LOG.info("Updating host: {} [{}]".format(host, patch))
|
||||
except Exception as e:
|
||||
LOG.exception("failed to update system parameters")
|
||||
raise e
|
||||
return True
|
||||
|
||||
|
||||
def get_config_status(hostname):
|
||||
""" get config status of the host """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
return host.config_status
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get config status")
|
||||
raise e
|
||||
return None
|
||||
|
||||
|
||||
def get_host_data(hostname):
|
||||
""" get data for the specified host """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
return host
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get host data")
|
||||
raise e
|
||||
return None
|
||||
|
||||
|
||||
def do_compute_config_complete(hostname):
|
||||
""" enable compute functionality """
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
hosts = get_hosts(client.admin_token,
|
||||
client.conf['region_name'])
|
||||
for host in hosts:
|
||||
if hostname == host.name:
|
||||
# Create/apply compute manifests
|
||||
values = {
|
||||
'action': "subfunction_config"
|
||||
}
|
||||
patch = dict_to_patch(values)
|
||||
LOG.info("Applying compute manifests: {} [{}]"
|
||||
.format(host, patch))
|
||||
client.sysinv.ihost.update(host.uuid, patch)
|
||||
except Exception as e:
|
||||
LOG.exception("compute_config_complete failed")
|
||||
raise e
|
||||
|
||||
|
||||
def get_storage_backend_services():
|
||||
""" get all storage backends and their assigned services """
|
||||
backend_service_dict = {}
|
||||
try:
|
||||
with openstack.OpenStack() as client:
|
||||
backend_list = client.sysinv.storage_backend.list()
|
||||
for backend in backend_list:
|
||||
backend_service_dict.update(
|
||||
{backend.backend: backend.services})
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception("failed to get storage backend services")
|
||||
raise e
|
||||
|
||||
return backend_service_dict
|
||||
@@ -0,0 +1,500 @@
|
||||
"""
|
||||
Copyright (c) 2015-2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import readline
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from common import constants
|
||||
from common import log
|
||||
from common.exceptions import (BackupFail, RestoreFail, UserQuit, CloneFail)
|
||||
from configutilities import lag_mode_to_str, Network, validate
|
||||
from configutilities import ConfigFail
|
||||
from configutilities import DEFAULT_CONFIG, REGION_CONFIG, SUBCLOUD_CONFIG
|
||||
from configutilities import MGMT_TYPE, HP_NAMES, DEFAULT_NAMES
|
||||
from configassistant import ConfigAssistant, check_for_ssh_parent
|
||||
import backup_restore
|
||||
import utils
|
||||
import clone
|
||||
|
||||
# Temporary file for building cgcs_config
|
||||
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def parse_system_config(config_file):
|
||||
"""Parse system config file"""
|
||||
system_config = ConfigParser.RawConfigParser()
|
||||
try:
|
||||
system_config.read(config_file)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise ConfigFail("Error parsing system config file")
|
||||
|
||||
# Dump configuration for debugging
|
||||
# for section in config.sections():
|
||||
# print "Section: %s" % section
|
||||
# for (name, value) in config.items(section):
|
||||
# print "name: %s, value: %s" % (name, value)
|
||||
return system_config
|
||||
|
||||
|
||||
def configure_management_interface(region_config, config_type=REGION_CONFIG):
|
||||
"""Bring up management interface
|
||||
"""
|
||||
mgmt_network = Network()
|
||||
if region_config.has_section('CLM_NETWORK'):
|
||||
naming_type = HP_NAMES
|
||||
else:
|
||||
naming_type = DEFAULT_NAMES
|
||||
try:
|
||||
mgmt_network.parse_config(region_config, config_type, MGMT_TYPE,
|
||||
min_addresses=8, naming_type=naming_type)
|
||||
except ConfigFail:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.exception("Error parsing configuration file")
|
||||
raise ConfigFail("Error parsing configuration file: %s" % e)
|
||||
|
||||
try:
|
||||
# Remove interface config files currently installed
|
||||
utils.remove_interface_config_files()
|
||||
|
||||
# Create the management interface configuration files.
|
||||
# Code based on ConfigAssistant._write_interface_config_management
|
||||
parameters = utils.get_interface_config_static(
|
||||
mgmt_network.start_address,
|
||||
mgmt_network.cidr,
|
||||
mgmt_network.gateway_address)
|
||||
|
||||
if mgmt_network.logical_interface.lag_interface:
|
||||
management_interface = 'bond0'
|
||||
else:
|
||||
management_interface = mgmt_network.logical_interface.ports[0]
|
||||
|
||||
if mgmt_network.vlan:
|
||||
management_interface_name = "%s.%s" % (management_interface,
|
||||
mgmt_network.vlan)
|
||||
utils.write_interface_config_vlan(
|
||||
management_interface_name,
|
||||
mgmt_network.logical_interface.mtu,
|
||||
parameters)
|
||||
|
||||
# underlying interface has no additional parameters
|
||||
parameters = None
|
||||
else:
|
||||
management_interface_name = management_interface
|
||||
|
||||
if mgmt_network.logical_interface.lag_interface:
|
||||
utils.write_interface_config_bond(
|
||||
management_interface,
|
||||
mgmt_network.logical_interface.mtu,
|
||||
lag_mode_to_str(mgmt_network.logical_interface.lag_mode),
|
||||
None,
|
||||
constants.LAG_MIIMON_FREQUENCY,
|
||||
mgmt_network.logical_interface.ports[0],
|
||||
mgmt_network.logical_interface.ports[1],
|
||||
parameters)
|
||||
else:
|
||||
utils.write_interface_config_ethernet(
|
||||
management_interface,
|
||||
mgmt_network.logical_interface.mtu,
|
||||
parameters)
|
||||
|
||||
# Restart networking with the new management interface configuration
|
||||
utils.restart_networking()
|
||||
|
||||
# Send a GARP for floating address. Doing this to help in
|
||||
# cases where we are re-installing in a lab and another node
|
||||
# previously held the floating address.
|
||||
if mgmt_network.cidr.version == 4:
|
||||
utils.send_interface_garp(management_interface_name,
|
||||
mgmt_network.start_address)
|
||||
except Exception:
|
||||
LOG.exception("Failed to configure management interface")
|
||||
raise ConfigFail("Failed to configure management interface")
|
||||
|
||||
|
||||
def create_cgcs_config_file(output_file, system_config,
|
||||
services, endpoints, domains,
|
||||
config_type=REGION_CONFIG, validate_only=False):
|
||||
"""
|
||||
Create cgcs_config file or just perform validation of the system_config if
|
||||
validate_only=True.
|
||||
:param output_file: filename of output cgcs_config file
|
||||
:param system_config: system configuration
|
||||
:param services: keystone services (not used if validate_only)
|
||||
:param endpoints: keystone endpoints (not used if validate_only)
|
||||
:param domains: keystone domains (not used if validate_only)
|
||||
:param config_type: specify region, subcloud or standard config
|
||||
:param validate_only: used to validate the input system_config
|
||||
:return:
|
||||
"""
|
||||
cgcs_config = None
|
||||
if not validate_only:
|
||||
cgcs_config = ConfigParser.RawConfigParser()
|
||||
cgcs_config.optionxform = str
|
||||
|
||||
# general error checking, if not validate_only cgcs config data is returned
|
||||
validate(system_config, config_type, cgcs_config)
|
||||
|
||||
# Region configuration: services, endpoints and domain
|
||||
if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only:
|
||||
# The services and endpoints are not available in the validation phase
|
||||
region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME')
|
||||
keystone_service_name = system_config.get('SHARED_SERVICES',
|
||||
'KEYSTONE_SERVICE_NAME')
|
||||
keystone_service_type = system_config.get('SHARED_SERVICES',
|
||||
'KEYSTONE_SERVICE_TYPE')
|
||||
keystone_service_id = services.get_service_id(keystone_service_name,
|
||||
keystone_service_type)
|
||||
keystone_admin_url = endpoints.get_service_url(region_1_name,
|
||||
keystone_service_id,
|
||||
"admin")
|
||||
keystone_internal_url = endpoints.get_service_url(region_1_name,
|
||||
keystone_service_id,
|
||||
"internal")
|
||||
keystone_public_url = endpoints.get_service_url(region_1_name,
|
||||
keystone_service_id,
|
||||
"public")
|
||||
|
||||
cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url)
|
||||
cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url)
|
||||
cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url)
|
||||
cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI',
|
||||
keystone_internal_url)
|
||||
cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url)
|
||||
|
||||
is_glance_cached = False
|
||||
if system_config.has_option('SHARED_SERVICES', 'GLANCE_CACHED'):
|
||||
if (system_config.get('SHARED_SERVICES',
|
||||
'GLANCE_CACHED').upper() == 'TRUE'):
|
||||
is_glance_cached = True
|
||||
cgcs_config.set('cREGION', 'GLANCE_CACHED', is_glance_cached)
|
||||
|
||||
if (system_config.has_option('SHARED_SERVICES',
|
||||
'GLANCE_SERVICE_NAME') and
|
||||
not is_glance_cached):
|
||||
glance_service_name = system_config.get('SHARED_SERVICES',
|
||||
'GLANCE_SERVICE_NAME')
|
||||
glance_service_type = system_config.get('SHARED_SERVICES',
|
||||
'GLANCE_SERVICE_TYPE')
|
||||
glance_region_name = region_1_name
|
||||
glance_service_id = services.get_service_id(glance_service_name,
|
||||
glance_service_type)
|
||||
glance_internal_url = endpoints.get_service_url(glance_region_name,
|
||||
glance_service_id,
|
||||
"internal")
|
||||
glance_public_url = endpoints.get_service_url(glance_region_name,
|
||||
glance_service_id,
|
||||
"public")
|
||||
|
||||
cgcs_config.set('cREGION', 'GLANCE_ADMIN_URI', glance_internal_url)
|
||||
cgcs_config.set('cREGION', 'GLANCE_PUBLIC_URI', glance_public_url)
|
||||
cgcs_config.set('cREGION', 'GLANCE_INTERNAL_URI',
|
||||
glance_internal_url)
|
||||
|
||||
# The domains are not available in the validation phase
|
||||
heat_admin_domain = system_config.get('REGION_2_SERVICES',
|
||||
'HEAT_ADMIN_DOMAIN')
|
||||
cgcs_config.set('cREGION', 'HEAT_ADMIN_DOMAIN_NAME', heat_admin_domain)
|
||||
|
||||
# If primary region is non-TiC and keystone entries already created,
|
||||
# the flag will tell puppet not to create them.
|
||||
if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and
|
||||
system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
|
||||
cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True')
|
||||
|
||||
# System Timezone configuration
|
||||
if system_config.has_option('SYSTEM', 'TIMEZONE'):
|
||||
timezone = system_config.get('SYSTEM', 'TIMEZONE')
|
||||
if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone):
|
||||
raise ConfigFail(
|
||||
"Timezone file %s does not exist" % timezone)
|
||||
|
||||
# Dump results for debugging
|
||||
# for section in cgcs_config.sections():
|
||||
# print "[%s]" % section
|
||||
# for (name, value) in cgcs_config.items(section):
|
||||
# print "%s=%s" % (name, value)
|
||||
|
||||
if not validate_only:
|
||||
# Write config file
|
||||
with open(output_file, 'w') as config_file:
|
||||
cgcs_config.write(config_file)
|
||||
|
||||
|
||||
def configure_system(config_file):
|
||||
"""Configure the system"""
|
||||
|
||||
# Parse the system config file
|
||||
print "Parsing system configuration file... ",
|
||||
system_config = parse_system_config(config_file)
|
||||
print "DONE"
|
||||
|
||||
# Validate the system config file
|
||||
print "Validating system configuration file... ",
|
||||
try:
|
||||
create_cgcs_config_file(None, system_config, None, None, None,
|
||||
DEFAULT_CONFIG, validate_only=True)
|
||||
except ConfigParser.Error as e:
|
||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
||||
(config_file, e))
|
||||
print "DONE"
|
||||
|
||||
# Create cgcs_config file
|
||||
print "Creating config apply file... ",
|
||||
try:
|
||||
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config,
|
||||
None, None, None, DEFAULT_CONFIG)
|
||||
except ConfigParser.Error as e:
|
||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
||||
(config_file, e))
|
||||
print "DONE"
|
||||
|
||||
|
||||
def show_help():
|
||||
print ("Usage: %s\n"
|
||||
"Perform system configuration\n"
|
||||
"\nThe default action is to perform the initial configuration for "
|
||||
"the system. The following options are also available:\n"
|
||||
"--config-file <name> Perform configuration using INI file\n"
|
||||
"--backup <name> Backup configuration using the given "
|
||||
"name\n"
|
||||
"--clone-iso <name> Clone and create an image with "
|
||||
"the given file name\n"
|
||||
"--clone-status Status of the last installation of "
|
||||
"cloned image\n"
|
||||
"--restore-system <name> Restore system configuration from backup "
|
||||
"file with\n"
|
||||
" the given name, full path required\n"
|
||||
"--restore-images <name> Restore images from backup file with the "
|
||||
"given name,\n"
|
||||
" full path required\n"
|
||||
"--restore-compute Restore controller-0 compute function "
|
||||
"for All-In-One system,\n"
|
||||
" controller-0 will reboot\n"
|
||||
% sys.argv[0])
|
||||
|
||||
|
||||
def show_help_lab_only():
|
||||
print ("Usage: %s\n"
|
||||
"Perform initial configuration\n"
|
||||
"\nThe following options are for lab use only:\n"
|
||||
"--answerfile <file> Apply the configuration from the specified "
|
||||
"file without\n"
|
||||
" any validation or user interaction\n"
|
||||
"--default Apply default configuration with no NTP or "
|
||||
"DNS server\n"
|
||||
" configuration (suitable for testing in a "
|
||||
"virtual\n"
|
||||
" environment)\n"
|
||||
"--archive-dir <dir> Directory to store the archive in\n"
|
||||
"--provision Provision initial system data only\n"
|
||||
% sys.argv[0])
|
||||
|
||||
|
||||
def no_complete(text, state):
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
options = {}
|
||||
answerfile = None
|
||||
backup_name = None
|
||||
archive_dir = constants.BACKUPS_PATH
|
||||
do_default_config = False
|
||||
do_backup = False
|
||||
do_system_restore = False
|
||||
do_images_restore = False
|
||||
do_compute_restore = False
|
||||
do_clone = False
|
||||
do_non_interactive = False
|
||||
do_provision = False
|
||||
system_config_file = "/home/wrsroot/system_config"
|
||||
|
||||
# Disable completion as the default completer shows python commands
|
||||
readline.set_completer(no_complete)
|
||||
|
||||
# remove any previous config fail flag file
|
||||
if os.path.exists(constants.CONFIG_FAIL_FILE) is True:
|
||||
os.remove(constants.CONFIG_FAIL_FILE)
|
||||
|
||||
if os.environ.get('CGCS_LABMODE'):
|
||||
options['labmode'] = True
|
||||
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if sys.argv[arg] == "--answerfile":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
answerfile = sys.argv[arg]
|
||||
else:
|
||||
print "--answerfile option requires a file to be specified"
|
||||
exit(1)
|
||||
elif sys.argv[arg] == "--backup":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
backup_name = sys.argv[arg]
|
||||
else:
|
||||
print "--backup requires the name of the backup"
|
||||
exit(1)
|
||||
do_backup = True
|
||||
elif sys.argv[arg] == "--restore-system":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
backup_name = sys.argv[arg]
|
||||
else:
|
||||
print "--restore-system requires the filename of the backup"
|
||||
exit(1)
|
||||
do_system_restore = True
|
||||
elif sys.argv[arg] == "--restore-images":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
backup_name = sys.argv[arg]
|
||||
else:
|
||||
print "--restore-images requires the filename of the backup"
|
||||
exit(1)
|
||||
do_images_restore = True
|
||||
elif sys.argv[arg] == "--restore-compute":
|
||||
do_compute_restore = True
|
||||
elif sys.argv[arg] == "--archive-dir":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
archive_dir = sys.argv[arg]
|
||||
else:
|
||||
print "--archive-dir requires a directory"
|
||||
exit(1)
|
||||
elif sys.argv[arg] == "--clone-iso":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
backup_name = sys.argv[arg]
|
||||
else:
|
||||
print "--clone-iso requires the name of the image"
|
||||
exit(1)
|
||||
do_clone = True
|
||||
elif sys.argv[arg] == "--clone-status":
|
||||
clone.clone_status()
|
||||
exit(0)
|
||||
elif sys.argv[arg] == "--default":
|
||||
do_default_config = True
|
||||
elif sys.argv[arg] == "--config-file":
|
||||
arg += 1
|
||||
if arg < len(sys.argv):
|
||||
system_config_file = sys.argv[arg]
|
||||
else:
|
||||
print "--config-file requires the filename of the config file"
|
||||
exit(1)
|
||||
do_non_interactive = True
|
||||
elif sys.argv[arg] in ["--help", "-h", "-?"]:
|
||||
show_help()
|
||||
exit(1)
|
||||
elif sys.argv[arg] == "--labhelp":
|
||||
show_help_lab_only()
|
||||
exit(1)
|
||||
elif sys.argv[arg] == "--provision":
|
||||
do_provision = True
|
||||
else:
|
||||
print "Invalid option. Use --help for more information."
|
||||
exit(1)
|
||||
arg += 1
|
||||
|
||||
if [do_backup,
|
||||
do_system_restore,
|
||||
do_images_restore,
|
||||
do_compute_restore,
|
||||
do_clone,
|
||||
do_default_config,
|
||||
do_non_interactive].count(True) > 1:
|
||||
print "Invalid combination of options selected"
|
||||
exit(1)
|
||||
|
||||
if answerfile and [do_backup,
|
||||
do_system_restore,
|
||||
do_images_restore,
|
||||
do_compute_restore,
|
||||
do_clone,
|
||||
do_default_config,
|
||||
do_non_interactive].count(True) > 0:
|
||||
print "The --answerfile option cannot be used with the selected option"
|
||||
exit(1)
|
||||
|
||||
log.configure()
|
||||
|
||||
# Reduce the printk console log level to avoid noise during configuration
|
||||
printk_levels = ''
|
||||
with open('/proc/sys/kernel/printk', 'r') as f:
|
||||
printk_levels = f.readline()
|
||||
|
||||
temp_printk_levels = '3' + printk_levels[1:]
|
||||
with open('/proc/sys/kernel/printk', 'w') as f:
|
||||
f.write(temp_printk_levels)
|
||||
|
||||
if not do_backup and not do_clone:
|
||||
check_for_ssh_parent()
|
||||
|
||||
try:
|
||||
if do_backup:
|
||||
backup_restore.backup(backup_name, archive_dir)
|
||||
print "\nBackup complete"
|
||||
elif do_system_restore:
|
||||
backup_restore.restore_system(backup_name)
|
||||
print "\nSystem restore complete"
|
||||
elif do_images_restore:
|
||||
backup_restore.restore_images(backup_name)
|
||||
print "\nImages restore complete"
|
||||
elif do_compute_restore:
|
||||
backup_restore.restore_compute()
|
||||
elif do_clone:
|
||||
clone.clone(backup_name, archive_dir)
|
||||
print "\nCloning complete"
|
||||
elif do_provision:
|
||||
assistant = ConfigAssistant(**options)
|
||||
assistant.provision(answerfile)
|
||||
else:
|
||||
if do_non_interactive:
|
||||
if not os.path.isfile(system_config_file):
|
||||
raise ConfigFail("Config file %s does not exist." %
|
||||
system_config_file)
|
||||
if (os.path.exists(constants.CGCS_CONFIG_FILE) or
|
||||
os.path.exists(constants.CONFIG_PERMDIR) or
|
||||
os.path.exists(
|
||||
constants.INITIAL_CONFIG_COMPLETE_FILE)):
|
||||
raise ConfigFail("Configuration has already been done "
|
||||
"and cannot be repeated.")
|
||||
configure_system(system_config_file)
|
||||
answerfile = TEMP_CGCS_CONFIG_FILE
|
||||
assistant = ConfigAssistant(**options)
|
||||
assistant.configure(answerfile, do_default_config)
|
||||
print "\nConfiguration was applied\n"
|
||||
print textwrap.fill(
|
||||
"Please complete any out of service commissioning steps "
|
||||
"with system commands and unlock controller to proceed.", 80)
|
||||
assistant.check_required_interfaces_status()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print "\nAborting configuration"
|
||||
except BackupFail as e:
|
||||
print "\nBackup failed: {}".format(e)
|
||||
except RestoreFail as e:
|
||||
print "\nRestore failed: {}".format(e)
|
||||
except ConfigFail as e:
|
||||
print "\nConfiguration failed: {}".format(e)
|
||||
except CloneFail as e:
|
||||
print "\nCloning failed: {}".format(e)
|
||||
except UserQuit:
|
||||
print "\nAborted configuration"
|
||||
finally:
|
||||
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
|
||||
os.remove(TEMP_CGCS_CONFIG_FILE)
|
||||
|
||||
# Restore the printk console log level
|
||||
with open('/proc/sys/kernel/printk', 'w') as f:
|
||||
f.write(printk_levels)
|
||||
@@ -0,0 +1,126 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE=duplex
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_3]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth2
|
||||
|
||||
[MGMT_NETWORK]
|
||||
VLAN=121
|
||||
IP_START_ADDRESS=192.168.204.102
|
||||
IP_END_ADDRESS=192.168.204.199
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
DYNAMIC_ALLOCATION=N
|
||||
|
||||
[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
IP_START_ADDRESS=192.168.205.102
|
||||
IP_END_ADDRESS=192.168.205.199
|
||||
CIDR=192.168.205.0/24
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_3
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.99
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
[REGION2_PXEBOOT_NETWORK]
|
||||
PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_USER_DOMAIN=admin_domain
|
||||
ADMIN_PROJECT_DOMAIN=admin_domain
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=FULL_TEST
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
USER_DOMAIN_NAME=service_domain
|
||||
PROJECT_DOMAIN_NAME=service_domain
|
||||
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
CINDER_USER_NAME=cinderTWO
|
||||
CINDER_PASSWORD=password2WO*
|
||||
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
GLANCE_USER_NAME=glanceTWO
|
||||
GLANCE_PASSWORD=password2WO*
|
||||
|
||||
NOVA_USER_NAME=novaTWO
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
NEUTRON_USER_NAME=neutronTWO
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinvTWO
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patchingTWO
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heatTWO
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_adminTWO
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometerTWO
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vimTWO
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodhTWO
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtceTWO
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=pankoTWO
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,122 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = None
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = no
|
||||
MANAGEMENT_INTERFACE = eth0
|
||||
MANAGEMENT_VLAN = 121
|
||||
MANAGEMENT_INTERFACE_NAME = eth0.121
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
INFRASTRUCTURE_MTU = 1500
|
||||
INFRASTRUCTURE_LINK_CAPACITY = None
|
||||
INFRASTRUCTURE_SUBNET = 192.168.205.0/24
|
||||
LAG_INFRASTRUCTURE_INTERFACE = no
|
||||
INFRASTRUCTURE_INTERFACE = eth2
|
||||
INFRASTRUCTURE_INTERFACE_NAME = eth2
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS = 192.168.205.103
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS = 192.168.205.104
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1 = 192.168.205.105
|
||||
INFRASTRUCTURE_START_ADDRESS = 192.168.205.102
|
||||
INFRASTRUCTURE_END_ADDRESS = 192.168.205.199
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = eth1
|
||||
EXTERNAL_OAM_INTERFACE_NAME = eth1
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = avs
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = admin_domain
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = admin_domain
|
||||
SERVICE_PROJECT_NAME = FULL_TEST
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_USER_NAME = glanceTWO
|
||||
GLANCE_PASSWORD = password2WO*
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionTwo
|
||||
NOVA_USER_NAME = novaTWO
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutronTWO
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionTwo
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometerTWO
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patchingTWO
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinvTWO
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heatTWO
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_adminTWO
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodhTWO
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vimTWO
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtceTWO
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = pankoTWO
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = service_domain
|
||||
PROJECT_DOMAIN_NAME = service_domain
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1,118 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[STORAGE]
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_3]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_PORTS=eth2
|
||||
|
||||
[MGMT_NETWORK]
|
||||
VLAN=121
|
||||
IP_START_ADDRESS=192.168.204.102
|
||||
IP_END_ADDRESS=192.168.204.199
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
DYNAMIC_ALLOCATION=N
|
||||
|
||||
[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
IP_START_ADDRESS=192.168.205.102
|
||||
IP_END_ADDRESS=192.168.205.199
|
||||
CIDR=192.168.205.0/24
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_3
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.99
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
[REGION2_PXEBOOT_NETWORK]
|
||||
PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=FULL_TEST
|
||||
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=novaTWO
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
NEUTRON_USER_NAME=neutronTWO
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinvTWO
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patchingTWO
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heatTWO
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_adminTWO
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometerTWO
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vimTWO
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodhTWO
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtceTWO
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=pankoTWO
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,123 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = None
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = no
|
||||
MANAGEMENT_INTERFACE = eth0
|
||||
MANAGEMENT_VLAN = 121
|
||||
MANAGEMENT_INTERFACE_NAME = eth0.121
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
INFRASTRUCTURE_MTU = 1500
|
||||
INFRASTRUCTURE_LINK_CAPACITY = None
|
||||
INFRASTRUCTURE_SUBNET = 192.168.205.0/24
|
||||
LAG_INFRASTRUCTURE_INTERFACE = no
|
||||
INFRASTRUCTURE_INTERFACE = eth2
|
||||
INFRASTRUCTURE_INTERFACE_NAME = eth2
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS = 192.168.205.103
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS = 192.168.205.104
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1 = 192.168.205.105
|
||||
INFRASTRUCTURE_START_ADDRESS = 192.168.205.102
|
||||
INFRASTRUCTURE_END_ADDRESS = 192.168.205.199
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = eth1
|
||||
EXTERNAL_OAM_INTERFACE_NAME = eth1
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = avs
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = Default
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = Default
|
||||
SERVICE_PROJECT_NAME = FULL_TEST
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionOne
|
||||
NOVA_USER_NAME = novaTWO
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutronTWO
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionTwo
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometerTWO
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patchingTWO
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinvTWO
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heatTWO
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_adminTWO
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodhTWO
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vimTWO
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtceTWO
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = pankoTWO
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = Default
|
||||
PROJECT_DOMAIN_NAME = Default
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
GLANCE_ADMIN_URI = http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI = http://10.10.10.2:9292/v2
|
||||
GLANCE_INTERNAL_URI = http://192.168.204.12:9292/v2
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
# Dummy certificate file
|
||||
@@ -0,0 +1,78 @@
|
||||
[cSYSTEM]
|
||||
# System Configuration
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[cPXEBOOT]
|
||||
# PXEBoot Network Support Configuration
|
||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
# Management Network Configuration
|
||||
MANAGEMENT_INTERFACE_NAME=eth1
|
||||
MANAGEMENT_INTERFACE=eth1
|
||||
MANAGEMENT_MTU=1500
|
||||
MANAGEMENT_LINK_CAPACITY=1000
|
||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE=no
|
||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
|
||||
CONTROLLER_0_ADDRESS=192.168.204.3
|
||||
CONTROLLER_1_ADDRESS=192.168.204.4
|
||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.7
|
||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
# Infrastructure Network Configuration
|
||||
INFRASTRUCTURE_INTERFACE_NAME=eth2
|
||||
INFRASTRUCTURE_INTERFACE=eth2
|
||||
INFRASTRUCTURE_VLAN=
|
||||
INFRASTRUCTURE_MTU=1500
|
||||
INFRASTRUCTURE_LINK_CAPACITY=1000
|
||||
INFRASTRUCTURE_SUBNET=192.168.205.0/24
|
||||
LAG_INFRASTRUCTURE_INTERFACE=no
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS=192.168.205.3
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS=192.168.205.4
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1=192.168.205.7
|
||||
CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=-infra
|
||||
INFRASTRUCTURE_START_ADDRESS=192.168.205.2
|
||||
INFRASTRUCTURE_END_ADDRESS=192.168.205.254
|
||||
|
||||
[cEXT_OAM]
|
||||
# External OAM Network Configuration
|
||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
||||
EXTERNAL_OAM_INTERFACE=eth0
|
||||
EXTERNAL_OAM_VLAN=NC
|
||||
EXTERNAL_OAM_MTU=1500
|
||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
# Data Network Configuration
|
||||
VSWITCH_TYPE=avs
|
||||
NEUTRON_L2_PLUGIN=ml2
|
||||
NEUTRON_L2_AGENT=vswitch
|
||||
NEUTRON_L3_EXT_BRIDGE=provider
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS=vswitch,sriovnicswitch
|
||||
NEUTRON_ML2_TYPE_DRIVERS=managed_flat,managed_vlan,managed_vxlan
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED=False
|
||||
NEUTRON_HOST_DRIVER=neutron.plugins.wrs.drivers.host.DefaultHostDriver
|
||||
NEUTRON_FM_DRIVER=neutron.plugins.wrs.drivers.fm.DefaultFmDriver
|
||||
NEUTRON_NETWORK_SCHEDULER=neutron.scheduler.dhcp_host_agent_scheduler.HostChanceScheduler
|
||||
NEUTRON_ROUTER_SCHEDULER=neutron.scheduler.l3_host_agent_scheduler.HostChanceScheduler
|
||||
|
||||
[cSECURITY]
|
||||
[cREGION]
|
||||
# Region Configuration
|
||||
REGION_CONFIG=False
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
@@ -0,0 +1,84 @@
|
||||
[cSYSTEM]
|
||||
# System Configuration
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[cPXEBOOT]
|
||||
# PXEBoot Network Support Configuration
|
||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
# Management Network Configuration
|
||||
MANAGEMENT_INTERFACE_NAME=eth1
|
||||
MANAGEMENT_INTERFACE=eth1
|
||||
MANAGEMENT_MTU=1500
|
||||
MANAGEMENT_LINK_CAPACITY=1000
|
||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE=no
|
||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
|
||||
CONTROLLER_0_ADDRESS=192.168.204.3
|
||||
CONTROLLER_1_ADDRESS=192.168.204.4
|
||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.5
|
||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.6
|
||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
# Infrastructure Network Configuration
|
||||
INFRASTRUCTURE_INTERFACE_NAME=NC
|
||||
INFRASTRUCTURE_INTERFACE=NC
|
||||
INFRASTRUCTURE_VLAN=NC
|
||||
INFRASTRUCTURE_MTU=NC
|
||||
INFRASTRUCTURE_LINK_CAPACITY=NC
|
||||
INFRASTRUCTURE_SUBNET=NC
|
||||
LAG_INFRASTRUCTURE_INTERFACE=no
|
||||
INFRASTRUCTURE_BOND_MEMBER_0=NC
|
||||
INFRASTRUCTURE_BOND_MEMBER_1=NC
|
||||
INFRASTRUCTURE_BOND_POLICY=NC
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1=NC
|
||||
STORAGE_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
STORAGE_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
|
||||
INFRASTRUCTURE_START_ADDRESS=NC
|
||||
INFRASTRUCTURE_END_ADDRESS=NC
|
||||
|
||||
[cEXT_OAM]
|
||||
# External OAM Network Configuration
|
||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
||||
EXTERNAL_OAM_INTERFACE=eth0
|
||||
EXTERNAL_OAM_VLAN=NC
|
||||
EXTERNAL_OAM_MTU=1500
|
||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
# Data Network Configuration
|
||||
VSWITCH_TYPE=avs
|
||||
NEUTRON_L2_PLUGIN=ml2
|
||||
NEUTRON_L2_AGENT=vswitch
|
||||
NEUTRON_L3_EXT_BRIDGE=provider
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS=vswitch,sriovnicswitch
|
||||
NEUTRON_ML2_TYPE_DRIVERS=managed_flat,managed_vlan,managed_vxlan
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED=False
|
||||
NEUTRON_HOST_DRIVER=neutron.plugins.wrs.drivers.host.DefaultHostDriver
|
||||
NEUTRON_FM_DRIVER=neutron.plugins.wrs.drivers.fm.DefaultFmDriver
|
||||
NEUTRON_NETWORK_SCHEDULER=neutron.scheduler.dhcp_host_agent_scheduler.HostChanceScheduler
|
||||
NEUTRON_ROUTER_SCHEDULER=neutron.scheduler.l3_host_agent_scheduler.HostChanceScheduler
|
||||
|
||||
[cSECURITY]
|
||||
[cREGION]
|
||||
# Region Configuration
|
||||
REGION_CONFIG=False
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
@@ -0,0 +1,84 @@
|
||||
[cSYSTEM]
|
||||
# System Configuration
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[cPXEBOOT]
|
||||
# PXEBoot Network Support Configuration
|
||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
# Management Network Configuration
|
||||
MANAGEMENT_INTERFACE_NAME=eth1
|
||||
MANAGEMENT_INTERFACE=eth1
|
||||
MANAGEMENT_MTU=1500
|
||||
MANAGEMENT_LINK_CAPACITY=1000
|
||||
MANAGEMENT_SUBNET=1234::/64
|
||||
LAG_MANAGEMENT_INTERFACE=no
|
||||
CONTROLLER_FLOATING_ADDRESS=1234::2
|
||||
CONTROLLER_0_ADDRESS=1234::3
|
||||
CONTROLLER_1_ADDRESS=1234::4
|
||||
NFS_MANAGEMENT_ADDRESS_1=1234::5
|
||||
NFS_MANAGEMENT_ADDRESS_2=1234::6
|
||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
||||
MANAGEMENT_MULTICAST_SUBNET=ff08::1:1:0/124
|
||||
|
||||
[cINFRA]
|
||||
# Infrastructure Network Configuration
|
||||
INFRASTRUCTURE_INTERFACE_NAME=NC
|
||||
INFRASTRUCTURE_INTERFACE=NC
|
||||
INFRASTRUCTURE_VLAN=NC
|
||||
INFRASTRUCTURE_MTU=NC
|
||||
INFRASTRUCTURE_LINK_CAPACITY=NC
|
||||
INFRASTRUCTURE_SUBNET=NC
|
||||
LAG_INFRASTRUCTURE_INTERFACE=no
|
||||
INFRASTRUCTURE_BOND_MEMBER_0=NC
|
||||
INFRASTRUCTURE_BOND_MEMBER_1=NC
|
||||
INFRASTRUCTURE_BOND_POLICY=NC
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1=NC
|
||||
STORAGE_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
STORAGE_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
|
||||
INFRASTRUCTURE_START_ADDRESS=NC
|
||||
INFRASTRUCTURE_END_ADDRESS=NC
|
||||
|
||||
[cEXT_OAM]
|
||||
# External OAM Network Configuration
|
||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
||||
EXTERNAL_OAM_INTERFACE=eth0
|
||||
EXTERNAL_OAM_VLAN=NC
|
||||
EXTERNAL_OAM_MTU=1500
|
||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
||||
EXTERNAL_OAM_SUBNET=abcd::/64
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS=abcd::1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2
|
||||
EXTERNAL_OAM_0_ADDRESS=abcd::3
|
||||
EXTERNAL_OAM_1_ADDRESS=abcd::4
|
||||
|
||||
[cNETWORK]
|
||||
# Data Network Configuration
|
||||
VSWITCH_TYPE=avs
|
||||
NEUTRON_L2_PLUGIN=ml2
|
||||
NEUTRON_L2_AGENT=vswitch
|
||||
NEUTRON_L3_EXT_BRIDGE=provider
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS=vswitch,sriovnicswitch
|
||||
NEUTRON_ML2_TYPE_DRIVERS=managed_flat,managed_vlan,managed_vxlan
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED=False
|
||||
NEUTRON_HOST_DRIVER=neutron.plugins.wrs.drivers.host.DefaultHostDriver
|
||||
NEUTRON_FM_DRIVER=neutron.plugins.wrs.drivers.fm.DefaultFmDriver
|
||||
NEUTRON_NETWORK_SCHEDULER=neutron.scheduler.dhcp_host_agent_scheduler.HostChanceScheduler
|
||||
NEUTRON_ROUTER_SCHEDULER=neutron.scheduler.l3_host_agent_scheduler.HostChanceScheduler
|
||||
|
||||
[cSECURITY]
|
||||
[cREGION]
|
||||
# Region Configuration
|
||||
REGION_CONFIG=False
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
@@ -0,0 +1,145 @@
|
||||
[cSYSTEM]
|
||||
# System Configuration
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[cPXEBOOT]
|
||||
# PXEBoot Network Support Configuration
|
||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
# Management Network Configuration
|
||||
MANAGEMENT_INTERFACE_NAME=eth1
|
||||
MANAGEMENT_INTERFACE=eth1
|
||||
MANAGEMENT_MTU=1500
|
||||
MANAGEMENT_LINK_CAPACITY=1000
|
||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE=no
|
||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
|
||||
CONTROLLER_0_ADDRESS=192.168.204.103
|
||||
CONTROLLER_1_ADDRESS=192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
|
||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
|
||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
||||
MANAGEMENT_START_ADDRESS=192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS=192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
# Infrastructure Network Configuration
|
||||
INFRASTRUCTURE_INTERFACE_NAME=NC
|
||||
INFRASTRUCTURE_INTERFACE=NC
|
||||
INFRASTRUCTURE_VLAN=NC
|
||||
INFRASTRUCTURE_MTU=NC
|
||||
INFRASTRUCTURE_LINK_CAPACITY=NC
|
||||
INFRASTRUCTURE_SUBNET=NC
|
||||
LAG_INFRASTRUCTURE_INTERFACE=no
|
||||
INFRASTRUCTURE_BOND_MEMBER_0=NC
|
||||
INFRASTRUCTURE_BOND_MEMBER_1=NC
|
||||
INFRASTRUCTURE_BOND_POLICY=NC
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1=NC
|
||||
STORAGE_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
STORAGE_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
|
||||
INFRASTRUCTURE_START_ADDRESS=NC
|
||||
INFRASTRUCTURE_END_ADDRESS=NC
|
||||
|
||||
[cEXT_OAM]
|
||||
# External OAM Network Configuration
|
||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
||||
EXTERNAL_OAM_INTERFACE=eth0
|
||||
EXTERNAL_OAM_VLAN=NC
|
||||
EXTERNAL_OAM_MTU=1500
|
||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
# Data Network Configuration
|
||||
VSWITCH_TYPE=avs
|
||||
NEUTRON_L2_PLUGIN=ml2
|
||||
NEUTRON_L2_AGENT=vswitch
|
||||
NEUTRON_L3_EXT_BRIDGE=provider
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS=vswitch,sriovnicswitch
|
||||
NEUTRON_ML2_TYPE_DRIVERS=managed_flat,managed_vlan,managed_vxlan
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED=False
|
||||
NEUTRON_HOST_DRIVER=neutron.plugins.wrs.drivers.host.DefaultHostDriver
|
||||
NEUTRON_FM_DRIVER=neutron.plugins.wrs.drivers.fm.DefaultFmDriver
|
||||
NEUTRON_NETWORK_SCHEDULER=neutron.scheduler.dhcp_host_agent_scheduler.HostChanceScheduler
|
||||
NEUTRON_ROUTER_SCHEDULER=neutron.scheduler.l3_host_agent_scheduler.HostChanceScheduler
|
||||
|
||||
[cSECURITY]
|
||||
[cREGION]
|
||||
# Region Configuration
|
||||
REGION_CONFIG=True
|
||||
REGION_1_NAME=RegionOne
|
||||
REGION_2_NAME=RegionTwo
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_USER_DOMAIN=Default
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_PROJECT_DOMAIN=Default
|
||||
SERVICE_PROJECT_NAME=service
|
||||
SERVICE_USER_DOMAIN=Default
|
||||
SERVICE_PROJECT_DOMAIN=Default
|
||||
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
GLANCE_CACHED=False
|
||||
GLANCE_REGION=RegionOne
|
||||
GLANCE_ADMIN_URI=http://192.168.204.12:9292/v2
|
||||
GLANCE_INTERNAL_URI=http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI=http://10.10.10.2:9292/v2
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_REGION_NAME=RegionTwo
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN_NAME=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
@@ -0,0 +1,146 @@
|
||||
[cSYSTEM]
|
||||
# System Configuration
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[cPXEBOOT]
|
||||
# PXEBoot Network Support Configuration
|
||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
# Management Network Configuration
|
||||
MANAGEMENT_INTERFACE_NAME=eth1
|
||||
MANAGEMENT_INTERFACE=eth1
|
||||
MANAGEMENT_MTU=1500
|
||||
MANAGEMENT_LINK_CAPACITY=1000
|
||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE=no
|
||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
|
||||
CONTROLLER_0_ADDRESS=192.168.204.103
|
||||
CONTROLLER_1_ADDRESS=192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
|
||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
|
||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
||||
MANAGEMENT_START_ADDRESS=192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS=192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
# Infrastructure Network Configuration
|
||||
INFRASTRUCTURE_INTERFACE_NAME=NC
|
||||
INFRASTRUCTURE_INTERFACE=NC
|
||||
INFRASTRUCTURE_VLAN=NC
|
||||
INFRASTRUCTURE_MTU=NC
|
||||
INFRASTRUCTURE_LINK_CAPACITY=NC
|
||||
INFRASTRUCTURE_SUBNET=NC
|
||||
LAG_INFRASTRUCTURE_INTERFACE=no
|
||||
INFRASTRUCTURE_BOND_MEMBER_0=NC
|
||||
INFRASTRUCTURE_BOND_MEMBER_1=NC
|
||||
INFRASTRUCTURE_BOND_POLICY=NC
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1=NC
|
||||
STORAGE_0_INFRASTRUCTURE_ADDRESS=NC
|
||||
STORAGE_1_INFRASTRUCTURE_ADDRESS=NC
|
||||
CONTROLLER_INFRASTRUCTURE_HOSTNAME_SUFFIX=NC
|
||||
INFRASTRUCTURE_START_ADDRESS=NC
|
||||
INFRASTRUCTURE_END_ADDRESS=NC
|
||||
|
||||
[cEXT_OAM]
|
||||
# External OAM Network Configuration
|
||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
||||
EXTERNAL_OAM_INTERFACE=eth0
|
||||
EXTERNAL_OAM_VLAN=NC
|
||||
EXTERNAL_OAM_MTU=1500
|
||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
# Data Network Configuration
|
||||
VSWITCH_TYPE=nuage_vrs
|
||||
NEUTRON_L2_PLUGIN=NC
|
||||
NEUTRON_L2_AGENT=nuage_vrs
|
||||
NEUTRON_L3_EXT_BRIDGE=provider
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS=NC
|
||||
NEUTRON_ML2_TYPE_DRIVERS=NC
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED=NC
|
||||
NEUTRON_HOST_DRIVER=NC
|
||||
NEUTRON_FM_DRIVER=NC
|
||||
NEUTRON_NETWORK_SCHEDULER=NC
|
||||
NEUTRON_ROUTER_SCHEDULER=NC
|
||||
METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret
|
||||
|
||||
[cSECURITY]
|
||||
[cREGION]
|
||||
# Region Configuration
|
||||
REGION_CONFIG=True
|
||||
REGION_1_NAME=RegionOne
|
||||
REGION_2_NAME=RegionTwo
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_USER_DOMAIN=Default
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_PROJECT_DOMAIN=Default
|
||||
SERVICE_PROJECT_NAME=service
|
||||
SERVICE_USER_DOMAIN=Default
|
||||
SERVICE_PROJECT_DOMAIN=Default
|
||||
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
GLANCE_CACHED=False
|
||||
GLANCE_REGION=RegionOne
|
||||
GLANCE_ADMIN_URI=http://192.168.204.12:9292/v2
|
||||
GLANCE_INTERNAL_URI=http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI=http://10.10.10.2:9292/v2
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_REGION_NAME=RegionOne
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN_NAME=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
@@ -0,0 +1,116 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE=duplex
|
||||
TIMEZONE=UTC
|
||||
|
||||
[STORAGE]
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=Y
|
||||
LAG_MODE=4
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1,eth2
|
||||
|
||||
[CLM_NETWORK]
|
||||
CLM_VLAN=123
|
||||
CLM_IP_START_ADDRESS=192.168.204.102
|
||||
CLM_IP_END_ADDRESS=192.168.204.199
|
||||
CLM_CIDR=192.168.204.0/24
|
||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
||||
CLM_GATEWAY=192.168.204.12
|
||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[BLS_NETWORK]
|
||||
BLS_VLAN=124
|
||||
BLS_IP_START_ADDRESS=192.168.205.102
|
||||
BLS_IP_END_ADDRESS=192.168.205.199
|
||||
BLS_CIDR=192.168.205.0/24
|
||||
BLS_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[CAN_NETWORK]
|
||||
CAN_VLAN=125
|
||||
CAN_IP_START_ADDRESS=10.10.10.2
|
||||
CAN_IP_END_ADDRESS=10.10.10.4
|
||||
CAN_CIDR=10.10.10.0/24
|
||||
;CAN_GATEWAY=10.10.10.1
|
||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[REGION2_PXEBOOT_NETWORK]
|
||||
PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=service
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,128 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = 1000
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = yes
|
||||
MANAGEMENT_BOND_MEMBER_0 = eth1
|
||||
MANAGEMENT_BOND_MEMBER_1 = eth2
|
||||
MANAGEMENT_BOND_POLICY = 802.3ad
|
||||
MANAGEMENT_INTERFACE = bond0
|
||||
MANAGEMENT_VLAN = 123
|
||||
MANAGEMENT_INTERFACE_NAME = bond0.123
|
||||
MANAGEMENT_GATEWAY_ADDRESS = 192.168.204.12
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cINFRA]
|
||||
INFRASTRUCTURE_MTU = 1500
|
||||
INFRASTRUCTURE_LINK_CAPACITY = 1000
|
||||
INFRASTRUCTURE_SUBNET = 192.168.205.0/24
|
||||
LAG_INFRASTRUCTURE_INTERFACE = no
|
||||
INFRASTRUCTURE_INTERFACE = bond0
|
||||
INFRASTRUCTURE_VLAN = 124
|
||||
INFRASTRUCTURE_INTERFACE_NAME = bond0.124
|
||||
CONTROLLER_0_INFRASTRUCTURE_ADDRESS = 192.168.205.103
|
||||
CONTROLLER_1_INFRASTRUCTURE_ADDRESS = 192.168.205.104
|
||||
NFS_INFRASTRUCTURE_ADDRESS_1 = 192.168.205.105
|
||||
INFRASTRUCTURE_START_ADDRESS = 192.168.205.102
|
||||
INFRASTRUCTURE_END_ADDRESS = 192.168.205.199
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = bond0
|
||||
EXTERNAL_OAM_VLAN = 125
|
||||
EXTERNAL_OAM_INTERFACE_NAME = bond0.125
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = avs
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = Default
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = Default
|
||||
SERVICE_PROJECT_NAME = service
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionOne
|
||||
NOVA_USER_NAME = nova
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutron
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionTwo
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometer
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patching
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinv
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heat
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodh
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vim
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtce
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = panko
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = Default
|
||||
PROJECT_DOMAIN_NAME = Default
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
GLANCE_ADMIN_URI = http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI = http://10.10.10.2:9292/v2
|
||||
GLANCE_INTERNAL_URI = http://192.168.204.12:9292/v2
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[STORAGE]
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[CLM_NETWORK]
|
||||
;CLM_VLAN=123
|
||||
CLM_IP_START_ADDRESS=192.168.204.102
|
||||
CLM_IP_END_ADDRESS=192.168.204.199
|
||||
CLM_CIDR=192.168.204.0/24
|
||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
||||
;CLM_GATEWAY=192.168.204.12
|
||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[BLS_NETWORK]
|
||||
;BLS_VLAN=124
|
||||
;BLS_IP_START_ADDRESS=192.168.205.102
|
||||
;BLS_IP_END_ADDRESS=192.168.205.199
|
||||
;BLS_CIDR=192.168.205.0/24
|
||||
;BLS_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[CAN_NETWORK]
|
||||
;CAN_VLAN=
|
||||
CAN_IP_START_ADDRESS=10.10.10.2
|
||||
CAN_IP_END_ADDRESS=10.10.10.4
|
||||
CAN_CIDR=10.10.10.0/24
|
||||
CAN_GATEWAY=10.10.10.1
|
||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[REGION2_PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[NETWORK]
|
||||
VSWITCH_TYPE=nuage_vrs
|
||||
METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=service
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,118 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = 1000
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = no
|
||||
MANAGEMENT_INTERFACE = eth1
|
||||
MANAGEMENT_INTERFACE_NAME = eth1
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = eth0
|
||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = nuage_vrs
|
||||
NEUTRON_L2_AGENT = nuage_vrs
|
||||
NEUTRON_L3_EXT_BRIDGE = provider
|
||||
NEUTRON_L2_PLUGIN = NC
|
||||
NEUTRON_ML2_MECHANISM_DRIVERS = NC
|
||||
NEUTRON_ML2_SRIOV_AGENT_REQUIRED = NC
|
||||
NEUTRON_ML2_TYPE_DRIVERS = NC
|
||||
NEUTRON_ML2_TENANT_NETWORK_TYPES = vlan,vxlan
|
||||
NEUTRON_HOST_DRIVER = NC
|
||||
NEUTRON_FM_DRIVER = NC
|
||||
NEUTRON_NETWORK_SCHEDULER = NC
|
||||
NEUTRON_ROUTER_SCHEDULER = NC
|
||||
METADATA_PROXY_SHARED_SECRET = NuageNetworksSharedSecret
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = Default
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = Default
|
||||
SERVICE_PROJECT_NAME = service
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionOne
|
||||
NOVA_USER_NAME = nova
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutron
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionOne
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometer
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patching
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinv
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heat
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodh
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vim
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtce
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = panko
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = Default
|
||||
PROJECT_DOMAIN_NAME = Default
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
GLANCE_ADMIN_URI = http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI = http://10.10.10.2:9292/v2
|
||||
GLANCE_INTERNAL_URI = http://192.168.204.12:9292/v2
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1,122 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[STORAGE]
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[CLM_NETWORK]
|
||||
;CLM_VLAN=123
|
||||
CLM_IP_START_ADDRESS=192.168.204.102
|
||||
CLM_IP_END_ADDRESS=192.168.204.199
|
||||
CLM_CIDR=192.168.204.0/24
|
||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
||||
;CLM_GATEWAY=192.168.204.12
|
||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[BLS_NETWORK]
|
||||
;BLS_VLAN=124
|
||||
;BLS_IP_START_ADDRESS=192.168.205.102
|
||||
;BLS_IP_END_ADDRESS=192.168.205.199
|
||||
;BLS_CIDR=192.168.205.0/24
|
||||
;BLS_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[CAN_NETWORK]
|
||||
;CAN_VLAN=
|
||||
CAN_IP_START_ADDRESS=10.10.10.2
|
||||
CAN_IP_END_ADDRESS=10.10.10.4
|
||||
CAN_CIDR=10.10.10.0/24
|
||||
CAN_GATEWAY=10.10.10.1
|
||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[REGION2_PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=service
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,106 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = 1000
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = no
|
||||
MANAGEMENT_INTERFACE = eth1
|
||||
MANAGEMENT_INTERFACE_NAME = eth1
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = eth0
|
||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = avs
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = Default
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = Default
|
||||
SERVICE_PROJECT_NAME = service
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionOne
|
||||
NOVA_USER_NAME = nova
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutron
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionTwo
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometer
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patching
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinv
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heat
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodh
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vim
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtce
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = panko
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = Default
|
||||
PROJECT_DOMAIN_NAME = Default
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
GLANCE_ADMIN_URI = http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI = http://10.10.10.2:9292/v2
|
||||
GLANCE_INTERNAL_URI = http://192.168.204.12:9292/v2
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1,122 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[STORAGE]
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[CLM_NETWORK]
|
||||
;CLM_VLAN=123
|
||||
CLM_IP_START_ADDRESS=192.168.204.102
|
||||
CLM_IP_END_ADDRESS=192.168.204.199
|
||||
CLM_CIDR=192.168.204.0/24
|
||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
||||
;CLM_GATEWAY=192.168.204.12
|
||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[BLS_NETWORK]
|
||||
;BLS_VLAN=124
|
||||
;BLS_IP_START_ADDRESS=192.168.205.102
|
||||
;BLS_IP_END_ADDRESS=192.168.205.199
|
||||
;BLS_CIDR=192.168.205.0/24
|
||||
;BLS_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[CAN_NETWORK]
|
||||
;CAN_VLAN=
|
||||
CAN_IP_START_ADDRESS=10.10.10.2
|
||||
CAN_IP_END_ADDRESS=10.10.10.4
|
||||
CAN_CIDR=10.10.10.0/24
|
||||
CAN_GATEWAY=10.10.10.1
|
||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[REGION2_PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=service
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,123 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[STORAGE]
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[CLM_NETWORK]
|
||||
;CLM_VLAN=123
|
||||
CLM_IP_START_ADDRESS=192.168.204.102
|
||||
CLM_IP_END_ADDRESS=192.168.204.199
|
||||
CLM_CIDR=192.168.204.0/24
|
||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
||||
;CLM_GATEWAY=192.168.204.12
|
||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[BLS_NETWORK]
|
||||
;BLS_VLAN=124
|
||||
;BLS_IP_START_ADDRESS=192.168.205.102
|
||||
;BLS_IP_END_ADDRESS=192.168.205.199
|
||||
;BLS_CIDR=192.168.205.0/24
|
||||
;BLS_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[CAN_NETWORK]
|
||||
;CAN_VLAN=
|
||||
CAN_IP_FLOATING_ADDRESS=10.10.10.2
|
||||
CAN_IP_UNIT_0_ADDRESS=10.10.10.3
|
||||
CAN_IP_UNIT_1_ADDRESS=10.10.10.4
|
||||
CAN_CIDR=10.10.10.0/24
|
||||
CAN_GATEWAY=10.10.10.1
|
||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[REGION2_PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[SHARED_SERVICES]
|
||||
REGION_NAME=RegionOne
|
||||
ADMIN_PROJECT_NAME=admin
|
||||
ADMIN_USER_NAME=admin
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_SERVICE_NAME=keystone
|
||||
KEYSTONE_SERVICE_TYPE=identity
|
||||
SERVICE_PROJECT_NAME=service
|
||||
CINDER_SERVICE_NAME=cinder
|
||||
CINDER_SERVICE_TYPE=volume
|
||||
CINDER_V2_SERVICE_NAME=cinderv2
|
||||
CINDER_V2_SERVICE_TYPE=volumev2
|
||||
CINDER_V3_SERVICE_NAME=cinderv3
|
||||
CINDER_V3_SERVICE_TYPE=volumev3
|
||||
GLANCE_SERVICE_NAME=glance
|
||||
GLANCE_SERVICE_TYPE=image
|
||||
|
||||
[REGION_2_SERVICES]
|
||||
REGION_NAME=RegionTwo
|
||||
NOVA_USER_NAME=nova
|
||||
NOVA_PASSWORD=password2WO*
|
||||
NOVA_SERVICE_NAME=nova
|
||||
NOVA_SERVICE_TYPE=compute
|
||||
PLACEMENT_USER_NAME=placement
|
||||
PLACEMENT_PASSWORD=password2WO*
|
||||
PLACEMENT_SERVICE_NAME=placement
|
||||
PLACEMENT_SERVICE_TYPE=placement
|
||||
NOVA_V3_SERVICE_NAME=novav3
|
||||
NOVA_V3_SERVICE_TYPE=computev3
|
||||
NEUTRON_USER_NAME=neutron
|
||||
NEUTRON_PASSWORD=password2WO*
|
||||
NEUTRON_SERVICE_NAME=neutron
|
||||
NEUTRON_SERVICE_TYPE=network
|
||||
SYSINV_USER_NAME=sysinv
|
||||
SYSINV_PASSWORD=password2WO*
|
||||
SYSINV_SERVICE_NAME=sysinv
|
||||
SYSINV_SERVICE_TYPE=platform
|
||||
PATCHING_USER_NAME=patching
|
||||
PATCHING_PASSWORD=password2WO*
|
||||
PATCHING_SERVICE_NAME=patching
|
||||
PATCHING_SERVICE_TYPE=patching
|
||||
HEAT_USER_NAME=heat
|
||||
HEAT_PASSWORD=password2WO*
|
||||
HEAT_ADMIN_DOMAIN=heat
|
||||
HEAT_ADMIN_USER_NAME=heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD=password2WO*
|
||||
HEAT_SERVICE_NAME=heat
|
||||
HEAT_SERVICE_TYPE=orchestration
|
||||
HEAT_CFN_SERVICE_NAME=heat-cfn
|
||||
HEAT_CFN_SERVICE_TYPE=cloudformation
|
||||
CEILOMETER_USER_NAME=ceilometer
|
||||
CEILOMETER_PASSWORD=password2WO*
|
||||
CEILOMETER_SERVICE_NAME=ceilometer
|
||||
CEILOMETER_SERVICE_TYPE=metering
|
||||
NFV_USER_NAME=vim
|
||||
NFV_PASSWORD=password2WO*
|
||||
AODH_USER_NAME=aodh
|
||||
AODH_PASSWORD=password2WO*
|
||||
MTCE_USER_NAME=mtce
|
||||
MTCE_PASSWORD=password2WO*
|
||||
PANKO_USER_NAME=panko
|
||||
PANKO_PASSWORD=password2WO*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,106 @@
|
||||
[cSYSTEM]
|
||||
TIMEZONE = UTC
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
[cPXEBOOT]
|
||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
||||
|
||||
[cMGMT]
|
||||
MANAGEMENT_MTU = 1500
|
||||
MANAGEMENT_LINK_CAPACITY = 1000
|
||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
||||
LAG_MANAGEMENT_INTERFACE = no
|
||||
MANAGEMENT_INTERFACE = eth1
|
||||
MANAGEMENT_INTERFACE_NAME = eth1
|
||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
||||
|
||||
[cEXT_OAM]
|
||||
EXTERNAL_OAM_MTU = 1500
|
||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
||||
EXTERNAL_OAM_INTERFACE = eth0
|
||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
||||
|
||||
[cNETWORK]
|
||||
VSWITCH_TYPE = avs
|
||||
|
||||
[cREGION]
|
||||
REGION_CONFIG = True
|
||||
REGION_1_NAME = RegionOne
|
||||
REGION_2_NAME = RegionTwo
|
||||
ADMIN_USER_NAME = admin
|
||||
ADMIN_USER_DOMAIN = Default
|
||||
ADMIN_PROJECT_NAME = admin
|
||||
ADMIN_PROJECT_DOMAIN = Default
|
||||
SERVICE_PROJECT_NAME = service
|
||||
KEYSTONE_SERVICE_NAME = keystone
|
||||
KEYSTONE_SERVICE_TYPE = identity
|
||||
GLANCE_SERVICE_NAME = glance
|
||||
GLANCE_SERVICE_TYPE = image
|
||||
GLANCE_CACHED = False
|
||||
GLANCE_REGION = RegionOne
|
||||
NOVA_USER_NAME = nova
|
||||
NOVA_PASSWORD = password2WO*
|
||||
NOVA_SERVICE_NAME = nova
|
||||
NOVA_SERVICE_TYPE = compute
|
||||
PLACEMENT_USER_NAME = placement
|
||||
PLACEMENT_PASSWORD = password2WO*
|
||||
PLACEMENT_SERVICE_NAME = placement
|
||||
PLACEMENT_SERVICE_TYPE = placement
|
||||
NEUTRON_USER_NAME = neutron
|
||||
NEUTRON_PASSWORD = password2WO*
|
||||
NEUTRON_REGION_NAME = RegionTwo
|
||||
NEUTRON_SERVICE_NAME = neutron
|
||||
NEUTRON_SERVICE_TYPE = network
|
||||
CEILOMETER_USER_NAME = ceilometer
|
||||
CEILOMETER_PASSWORD = password2WO*
|
||||
CEILOMETER_SERVICE_NAME = ceilometer
|
||||
CEILOMETER_SERVICE_TYPE = metering
|
||||
PATCHING_USER_NAME = patching
|
||||
PATCHING_PASSWORD = password2WO*
|
||||
SYSINV_USER_NAME = sysinv
|
||||
SYSINV_PASSWORD = password2WO*
|
||||
SYSINV_SERVICE_NAME = sysinv
|
||||
SYSINV_SERVICE_TYPE = platform
|
||||
HEAT_USER_NAME = heat
|
||||
HEAT_PASSWORD = password2WO*
|
||||
HEAT_ADMIN_USER_NAME = heat_stack_admin
|
||||
HEAT_ADMIN_PASSWORD = password2WO*
|
||||
AODH_USER_NAME = aodh
|
||||
AODH_PASSWORD = password2WO*
|
||||
NFV_USER_NAME = vim
|
||||
NFV_PASSWORD = password2WO*
|
||||
MTCE_USER_NAME = mtce
|
||||
MTCE_PASSWORD = password2WO*
|
||||
PANKO_USER_NAME = panko
|
||||
PANKO_PASSWORD = password2WO*
|
||||
USER_DOMAIN_NAME = Default
|
||||
PROJECT_DOMAIN_NAME = Default
|
||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
||||
GLANCE_ADMIN_URI = http://192.168.204.12:9292/v2
|
||||
GLANCE_PUBLIC_URI = http://10.10.10.2:9292/v2
|
||||
GLANCE_INTERNAL_URI = http://192.168.204.12:9292/v2
|
||||
HEAT_ADMIN_DOMAIN_NAME = heat
|
||||
|
||||
[cAUTHENTICATION]
|
||||
ADMIN_PASSWORD = Li69nux*
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE = duplex
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[MGMT_NETWORK]
|
||||
;VLAN=123
|
||||
IP_START_ADDRESS=192.168.204.2
|
||||
IP_END_ADDRESS=192.168.204.99
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
DYNAMIC_ALLOCATION=Y
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[INFRA_NETWORK]
|
||||
VLAN=124
|
||||
IP_START_ADDRESS=192.168.205.102
|
||||
IP_END_ADDRESS=192.168.205.199
|
||||
DYNAMIC_ALLOCATION=Y
|
||||
CIDR=192.168.205.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.4
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
;[BOARD_MANAGEMENT_NETWORK]
|
||||
;VLAN=1
|
||||
;MTU=1496
|
||||
;SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,64 @@
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[MGMT_NETWORK]
|
||||
;VLAN=123
|
||||
CIDR=1234::/64
|
||||
MULTICAST_CIDR=ff08::1:1:0/124
|
||||
DYNAMIC_ALLOCATION=Y
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
;IP_START_ADDRESS=192.168.205.102
|
||||
;IP_END_ADDRESS=192.168.205.199
|
||||
;DYNAMIC_ALLOCATION=Y
|
||||
;CIDR=192.168.205.0/24
|
||||
;LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
;IP_START_ADDRESS=abcd::2
|
||||
;IP_END_ADDRESS=abcd::4
|
||||
IP_FLOATING_ADDRESS=abcd::2
|
||||
IP_UNIT_0_ADDRESS=abcd::3
|
||||
IP_UNIT_1_ADDRESS=abcd::4
|
||||
CIDR=abcd::/64
|
||||
GATEWAY=abcd::1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
;[BOARD_MANAGEMENT_NETWORK]
|
||||
;VLAN=1
|
||||
;MTU=1496
|
||||
;SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,57 @@
|
||||
[SYSTEM]
|
||||
SYSTEM_MODE=duplex
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=Y
|
||||
LAG_MODE=4
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1,eth2
|
||||
|
||||
[MGMT_NETWORK]
|
||||
VLAN=123
|
||||
IP_START_ADDRESS=192.168.204.102
|
||||
IP_END_ADDRESS=192.168.204.199
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[INFRA_NETWORK]
|
||||
VLAN=124
|
||||
IP_START_ADDRESS=192.168.205.102
|
||||
IP_END_ADDRESS=192.168.205.199
|
||||
CIDR=192.168.205.0/24
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
VLAN=125
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.4
|
||||
CIDR=10.10.10.0/24
|
||||
;GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[PXEBOOT_NETWORK]
|
||||
PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
;[BOARD_MANAGEMENT_NETWORK]
|
||||
;VLAN=1
|
||||
;MTU=1496
|
||||
;SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,61 @@
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[MGMT_NETWORK]
|
||||
;VLAN=123
|
||||
IP_START_ADDRESS=192.168.204.102
|
||||
IP_END_ADDRESS=192.168.204.199
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
;IP_START_ADDRESS=192.168.205.102
|
||||
;IP_END_ADDRESS=192.168.205.199
|
||||
;CIDR=192.168.205.0/24
|
||||
;_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.4
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
[BOARD_MANAGEMENT_NETWORK]
|
||||
VLAN=1
|
||||
MTU=1496
|
||||
SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,74 @@
|
||||
;[DNS]
|
||||
;NAMESERVER_1=8.8.8.8
|
||||
;NAMESERVER_2=8.8.4.4
|
||||
;NAMESERVER_3=
|
||||
|
||||
;[NTP]
|
||||
;NTP_SERVER_1=0.pool.ntp.org
|
||||
;NTP_SERVER_2=1.pool.ntp.org
|
||||
;NTP_SERVER_3=2.pool.ntp.org
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[MGMT_NETWORK]
|
||||
;VLAN=123
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
DYNAMIC_ALLOCATION=Y
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
;IP_START_ADDRESS=192.168.205.102
|
||||
;IP_END_ADDRESS=192.168.205.199
|
||||
;DYNAMIC_ALLOCATION=Y
|
||||
;CIDR=192.168.205.0/24
|
||||
;LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
;IP_START_ADDRESS=10.10.10.2
|
||||
;IP_END_ADDRESS=10.10.10.4
|
||||
IP_FLOATING_ADDRESS=10.10.10.20
|
||||
IP_UNIT_0_ADDRESS=10.10.10.30
|
||||
IP_UNIT_1_ADDRESS=10.10.10.40
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
;[BOARD_MANAGEMENT_NETWORK]
|
||||
;VLAN=1
|
||||
;MTU=1496
|
||||
;SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,49 @@
|
||||
;[DNS]
|
||||
;NAMESERVER_1=8.8.8.8
|
||||
;NAMESERVER_2=8.8.4.4
|
||||
;NAMESERVER_3=
|
||||
|
||||
;[NTP]
|
||||
;NTP_SERVER_1=0.pool.ntp.org
|
||||
;NTP_SERVER_2=1.pool.ntp.org
|
||||
;NTP_SERVER_3=2.pool.ntp.org
|
||||
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[OAM_NETWORK]
|
||||
IP_ADDRESS=10.10.10.20
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
|
||||
[SYSTEM]
|
||||
SYSTEM_TYPE=All-in-one
|
||||
SYSTEM_MODE=simplex
|
||||
@@ -0,0 +1,63 @@
|
||||
;LOGICAL_INTERFACE_<number>
|
||||
; LAG_INTERFACE <Y/N>
|
||||
; LAG_MODE One of 1) Active-backup policy
|
||||
; 2) Balanced XOR policy
|
||||
; 4) 802.3ad (LACP) policy
|
||||
; Interface for pxebooting can only be LACP
|
||||
; INTERFACE_MTU <mtu size>
|
||||
; INTERFACE_LINK_CAPACITY <Mbps>
|
||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
||||
|
||||
[LOGICAL_INTERFACE_1]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
INTERFACE_LINK_CAPACITY=1000
|
||||
INTERFACE_PORTS=eth1
|
||||
|
||||
[LOGICAL_INTERFACE_2]
|
||||
LAG_INTERFACE=N
|
||||
;LAG_MODE=
|
||||
INTERFACE_MTU=1500
|
||||
;INTERFACE_LINK_CAPACITY=
|
||||
INTERFACE_PORTS=eth0
|
||||
|
||||
[MGMT_NETWORK]
|
||||
;VLAN=123
|
||||
IP_START_ADDRESS=192.168.204.20
|
||||
IP_END_ADDRESS=192.168.204.99
|
||||
CIDR=192.168.204.0/24
|
||||
MULTICAST_CIDR=239.1.1.0/28
|
||||
DYNAMIC_ALLOCATION=N
|
||||
;GATEWAY=192.168.204.12
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
;[INFRA_NETWORK]
|
||||
;VLAN=124
|
||||
;IP_START_ADDRESS=192.168.205.102
|
||||
;IP_END_ADDRESS=192.168.205.199
|
||||
;DYNAMIC_ALLOCATION=N
|
||||
;CIDR=192.168.205.0/24
|
||||
;LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
||||
|
||||
[OAM_NETWORK]
|
||||
;VLAN=
|
||||
IP_START_ADDRESS=10.10.10.2
|
||||
IP_END_ADDRESS=10.10.10.4
|
||||
CIDR=10.10.10.0/24
|
||||
GATEWAY=10.10.10.1
|
||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
||||
|
||||
;[PXEBOOT_NETWORK]
|
||||
;PXEBOOT_CIDR=192.168.203.0/24
|
||||
|
||||
;[BOARD_MANAGEMENT_NETWORK]
|
||||
;VLAN=1
|
||||
;MTU=1496
|
||||
;SUBNET=192.168.203.0/24
|
||||
|
||||
[AUTHENTICATION]
|
||||
ADMIN_PASSWORD=Li69nux*
|
||||
|
||||
[VERSION]
|
||||
RELEASE = 18.03
|
||||
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
Copyright (c) 2014 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import difflib
|
||||
import filecmp
|
||||
import os
|
||||
from mock import patch
|
||||
|
||||
import controllerconfig.configassistant as ca
|
||||
import controllerconfig.common.constants as constants
|
||||
|
||||
|
||||
@patch('controllerconfig.configassistant.get_rootfs_node')
|
||||
@patch('controllerconfig.configassistant.get_net_device_list')
|
||||
def _test_answerfile(tmpdir, filename,
|
||||
mock_get_net_device_list,
|
||||
mock_get_rootfs_node,
|
||||
compare_results=True):
|
||||
""" Test import and generation of answerfile """
|
||||
mock_get_net_device_list.return_value = \
|
||||
['eth0', 'eth1', 'eth2']
|
||||
mock_get_rootfs_node.return_value = '/dev/sda'
|
||||
|
||||
assistant = ca.ConfigAssistant()
|
||||
|
||||
# Create the path to the answerfile
|
||||
answerfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", filename)
|
||||
|
||||
# Input the config from the answerfile
|
||||
assistant.input_config_from_file(answerfile)
|
||||
|
||||
# Test the display method
|
||||
print "Output from display_config:"
|
||||
assistant.display_config()
|
||||
|
||||
# Ensure we can write the configuration
|
||||
constants.CONFIG_WORKDIR = os.path.join(str(tmpdir), 'config_workdir')
|
||||
constants.CGCS_CONFIG_FILE = os.path.join(constants.CONFIG_WORKDIR,
|
||||
'cgcs_config')
|
||||
assistant.write_config_file()
|
||||
|
||||
# Add the password to the generated file so it can be compared with the
|
||||
# answerfile
|
||||
with open(constants.CGCS_CONFIG_FILE, 'a') as f:
|
||||
f.write("\n[cAUTHENTICATION]\nADMIN_PASSWORD=Li69nux*\n")
|
||||
|
||||
# Do a diff between the answerfile and the generated config file
|
||||
print "\n\nDiff of answerfile vs. generated config file:\n"
|
||||
with open(answerfile) as a, open(constants.CGCS_CONFIG_FILE) as b:
|
||||
a_lines = a.readlines()
|
||||
b_lines = b.readlines()
|
||||
|
||||
differ = difflib.Differ()
|
||||
diff = differ.compare(a_lines, b_lines)
|
||||
print(''.join(diff))
|
||||
|
||||
if compare_results:
|
||||
# Fail the testcase if the answerfile and generated config file don't
|
||||
# match.
|
||||
assert filecmp.cmp(answerfile, constants.CGCS_CONFIG_FILE)
|
||||
|
||||
|
||||
def test_answerfile_default(tmpdir):
|
||||
""" Test import of answerfile with default values """
|
||||
|
||||
_test_answerfile(tmpdir, "cgcs_config.default")
|
||||
|
||||
|
||||
def test_answerfile_ipv6(tmpdir):
|
||||
""" Test import of answerfile with ipv6 oam values """
|
||||
|
||||
_test_answerfile(tmpdir, "cgcs_config.ipv6")
|
||||
|
||||
|
||||
def test_answerfile_ceph(tmpdir):
|
||||
""" Test import of answerfile with ceph backend values """
|
||||
|
||||
_test_answerfile(tmpdir, "cgcs_config.ceph")
|
||||
|
||||
|
||||
def test_answerfile_region(tmpdir):
|
||||
""" Test import of answerfile with region values """
|
||||
|
||||
_test_answerfile(tmpdir, "cgcs_config.region")
|
||||
|
||||
|
||||
def test_answerfile_region_nuage_vrs(tmpdir):
|
||||
""" Test import of answerfile with region values for nuage_vrs"""
|
||||
|
||||
_test_answerfile(tmpdir, "cgcs_config.region_nuage_vrs")
|
||||
857
controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py
Executable file
857
controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py
Executable file
@@ -0,0 +1,857 @@
|
||||
"""
|
||||
Copyright (c) 2014-2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import difflib
|
||||
import filecmp
|
||||
import fileinput
|
||||
from mock import patch
|
||||
import os
|
||||
import pytest
|
||||
import shutil
|
||||
|
||||
import controllerconfig.systemconfig as cr
|
||||
import configutilities.common.exceptions as exceptions
|
||||
from configutilities import validate, REGION_CONFIG
|
||||
import controllerconfig.common.keystone as keystone
|
||||
import test_answerfile
|
||||
|
||||
|
||||
FAKE_SERVICE_DATA = {u'services': [
|
||||
{u'type': u'keystore', u'description': u'Barbican Key Management Service',
|
||||
u'enabled': True, u'id': u'9029af23540f4eecb0b7f70ac5e00152',
|
||||
u'name': u'barbican'},
|
||||
{u'type': u'network', u'description': u'OpenStack Networking service',
|
||||
u'enabled': True, u'id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'name': u'neutron'}, {u'type': u'cloudformation',
|
||||
u'description':
|
||||
u'OpenStack Cloudformation Service',
|
||||
u'enabled': True,
|
||||
u'id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
||||
u'name': u'heat-cfn'},
|
||||
{u'type': u'object-store', u'description': u'OpenStack object-store',
|
||||
u'enabled': True, u'id': u'd588956f759f4bbda9e65a1019902b9c',
|
||||
u'name': u'swift'},
|
||||
{u'type': u'metering', u'description': u'OpenStack Metering Service',
|
||||
u'enabled': True, u'id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'name': u'ceilometer'},
|
||||
{u'type': u'volumev2',
|
||||
u'description': u'OpenStack Volume Service v2.0 API',
|
||||
u'enabled': True, u'id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
||||
u'name': u'cinderv2'},
|
||||
{u'type': u'volume', u'description': u'OpenStack Volume Service',
|
||||
u'enabled': True, u'id': u'505aa37457774e55b545654aa8630822',
|
||||
u'name': u'cinder'}, {u'type': u'orchestration',
|
||||
u'description': u'OpenStack Orchestration Service',
|
||||
u'enabled': True,
|
||||
u'id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'name': u'heat'},
|
||||
{u'type': u'compute', u'description': u'OpenStack Compute Service',
|
||||
u'enabled': True, u'id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'name': u'nova'},
|
||||
{u'type': u'identity', u'description': u'OpenStack Identity',
|
||||
u'enabled': True, u'id': u'1fe7b1de187b47228fe853fbbd149664',
|
||||
u'name': u'keystone'},
|
||||
{u'type': u'image', u'description': u'OpenStack Image Service',
|
||||
u'enabled': True, u'id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'name': u'glance'},
|
||||
{u'type': u'database', u'description': u'Trove Database As A Service',
|
||||
u'enabled': True, u'id': u'82265e39a77b4097bd8aee4f78e13867',
|
||||
u'name': u'trove'},
|
||||
{u'type': u'patching', u'description': u'Patching Service',
|
||||
u'enabled': True, u'id': u'8515c4f28f9346199eb8704bca4f5db4',
|
||||
u'name': u'patching'},
|
||||
{u'type': u'platform', u'description': u'SysInv Service', u'enabled': True,
|
||||
u'id': u'08758bed8d894ddaae744a97db1080b3', u'name': u'sysinv'},
|
||||
{u'type': u'computev3', u'description': u'Openstack Compute Service v3',
|
||||
u'enabled': True, u'id': u'959f2214543a47549ffd8c66f98d27d4',
|
||||
u'name': u'novav3'}]}
|
||||
|
||||
FAKE_ENDPOINT_DATA = {u'endpoints': [
|
||||
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a0',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a1',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8776/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a2',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'373259a6bbcf493b86c9f9530e86d323',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'373259a6bbcf493b86c9f9530e86d324',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8774/v2/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'373259a6bbcf493b86c9f9530e86d324',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd271',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd272',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8004/v1/%(tenant_id)s',
|
||||
u'region': u'RegionTwo', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd273',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
|
||||
u'enabled': True, u'interface': u'admin',
|
||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ac',
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
||||
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
|
||||
u'enabled': True, u'interface': u'internal',
|
||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ad',
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
||||
{u'url': u'http://10.10.10.2:8000/v1', u'region': u'RegionOne',
|
||||
u'enabled': True, u'interface': u'public',
|
||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ae',
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe266',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe267',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8774/v3', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe268',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8081/keystone/admin/v2.0',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a1159f',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8081/keystone/main/v2.0',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a11510',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8081/keystone/main/v2.0',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a11512',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768bc',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768bd',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:9696/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768be',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
|
||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
|
||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a4',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:6385/v1', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'08758bed8d894ddaae744a97db1080b5',
|
||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'61ad227efa3b4cdd867618041a7064dc',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'61ad227efa3b4cdd867618041a7064dd',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8004/v1/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
||||
u'id': u'61ad227efa3b4cdd867618041a7064de',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8888/v1', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
||||
u'id': u'be557ddb742e46328159749a21e6e286',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8888/v1/AUTH_$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
||||
u'id': u'be557ddb742e46328159749a21e6e287',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:8888/v1/AUTH_$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
||||
u'id': u'be557ddb742e46328159749a21e6e288',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'050d07db8c5041288f29020079177f0b',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'050d07db8c5041288f29020079177f0c',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8777', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'050d07db8c5041288f29020079177f0d',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
||||
u'id': u'53af565e4d7245929df7af2ba0ff46db',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
||||
u'id': u'53af565e4d7245929df7af2ba0ff46dc',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:5491', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
||||
u'id': u'53af565e4d7245929df7af2ba0ff46dd',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8779/v1.0/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10a',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10b',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:9292/v2', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10c',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10a',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10b',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:9292/v2', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
||||
u'id': u'06fdb367cb63414987ee1653a016d10c',
|
||||
u'interface': u'public'},
|
||||
|
||||
|
||||
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'f15d22a9526648ff8833460e2dce1431',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'f15d22a9526648ff8833460e2dce1432',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:8777/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
||||
u'id': u'f15d22a9526648ff8833460e2dce1433',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a7',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a8',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8000/v1/', u'region': u'RegionTwo',
|
||||
u'enabled': True,
|
||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a9',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028d',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028e',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.2:8774/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028f',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'd326bf63f6f94b12924b03ff42ba63bd',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'd326bf63f6f94b12924b03ff42ba63be',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:9696/', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
||||
u'id': u'd326bf63f6f94b12924b03ff42ba63bf',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c7',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c8',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:8776/v2/$(tenant_id)s',
|
||||
u'region': u'RegionOne', u'enabled': True,
|
||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c9',
|
||||
u'interface': u'public'},
|
||||
|
||||
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6125',
|
||||
u'interface': u'admin'},
|
||||
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6126',
|
||||
u'interface': u'internal'},
|
||||
{u'url': u'http://10.10.10.12:9312/v1', u'region': u'RegionOne',
|
||||
u'enabled': True,
|
||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6127',
|
||||
u'interface': u'public'}]}
|
||||
|
||||
FAKE_DOMAIN_DATA = {u'domains': [
|
||||
{u'id': u'default', u'enabled': True,
|
||||
u'description':
|
||||
u'Owns users and tenants (i.e. projects) available on Identity API '
|
||||
u'v2.',
|
||||
u'links': {
|
||||
u'self':
|
||||
u'http://192.168.204.12:8081/keystone/main/v3/domains/default'},
|
||||
u'name': u'Default'},
|
||||
{u'id': u'05d847889e9a4cb9aa94f541eb6b9e2e',
|
||||
u'enabled': True,
|
||||
u'description': u'Contains users and projects created by heat',
|
||||
u'links': {
|
||||
u'self':
|
||||
u'http://192.168.204.12:8081/keystone/main/v3/domains/'
|
||||
u'05d847889e9a4cb9aa94f541eb6b9e2e'},
|
||||
u'name': u'heat'}],
|
||||
u'links': {
|
||||
u'self': u'http://192.168.204.12:8081/keystone/main/v3/domains',
|
||||
u'next': None,
|
||||
u'previous': None}}
|
||||
|
||||
|
||||
def _dump_config(config):
|
||||
""" Prints contents of config object """
|
||||
for section in config.sections():
|
||||
print "[%s]" % section
|
||||
for (name, value) in config.items(section):
|
||||
print "%s=%s" % (name, value)
|
||||
|
||||
|
||||
def _replace_in_file(filename, old, new):
|
||||
""" Replaces old with new in file filename. """
|
||||
for line in fileinput.FileInput(filename, inplace=1):
|
||||
line = line.replace(old, new)
|
||||
print line,
|
||||
fileinput.close()
|
||||
|
||||
|
||||
@patch('controllerconfig.configassistant.ConfigAssistant.get_wrsroot_sig')
|
||||
def _test_region_config(tmpdir, inputfile, resultfile,
|
||||
mock_get_wrsroot_sig):
|
||||
""" Test import and generation of answerfile """
|
||||
|
||||
mock_get_wrsroot_sig.return_value = None
|
||||
|
||||
# Create the path to the output file
|
||||
outputfile = os.path.join(str(tmpdir), 'output')
|
||||
|
||||
# Parse the region_config file
|
||||
region_config = cr.parse_system_config(inputfile)
|
||||
|
||||
# Dump results for debugging
|
||||
print "Parsed region_config:\n"
|
||||
_dump_config(region_config)
|
||||
|
||||
# Validate the region config file
|
||||
cr.create_cgcs_config_file(outputfile, region_config,
|
||||
keystone.ServiceList(FAKE_SERVICE_DATA),
|
||||
keystone.EndpointList(FAKE_ENDPOINT_DATA),
|
||||
keystone.DomainList(FAKE_DOMAIN_DATA))
|
||||
|
||||
# Make a local copy of the results file
|
||||
local_resultfile = os.path.join(str(tmpdir), 'result')
|
||||
shutil.copyfile(resultfile, local_resultfile)
|
||||
|
||||
# Do a diff between the output and the expected results
|
||||
print "\n\nDiff of output file vs. expected results file:\n"
|
||||
with open(outputfile) as a, open(local_resultfile) as b:
|
||||
a_lines = a.readlines()
|
||||
b_lines = b.readlines()
|
||||
|
||||
differ = difflib.Differ()
|
||||
diff = differ.compare(a_lines, b_lines)
|
||||
print(''.join(diff))
|
||||
# Fail the testcase if the output doesn't match the expected results
|
||||
assert filecmp.cmp(outputfile, local_resultfile)
|
||||
|
||||
# Now test that configassistant can parse this answerfile. We can't
|
||||
# compare the resulting cgcs_config file because the ordering, spacing
|
||||
# and comments are different between the answerfile generated by
|
||||
# systemconfig and ConfigAssistant.
|
||||
test_answerfile._test_answerfile(tmpdir, outputfile, compare_results=False)
|
||||
|
||||
# Validate the region config file.
|
||||
# Using onboard validation since the validator's reference version number
|
||||
# is only set at build-time when validating offboard
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
|
||||
def test_region_config_simple(tmpdir):
|
||||
""" Test import of simple region_config file """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.simple")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.simple.result")
|
||||
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_simple_can_ips(tmpdir):
|
||||
""" Test import of simple region_config file with unit ips for CAN """
|
||||
print "IN TEST ################################################"
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.simple.can_ips")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.simple.result")
|
||||
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_lag_vlan(tmpdir):
|
||||
""" Test import of region_config file with lag and vlan """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.lag.vlan")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.lag.vlan.result")
|
||||
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_security(tmpdir):
|
||||
""" Test import of region_config file with security config """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.security")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.security.result")
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_nuage_vrs(tmpdir):
|
||||
""" Test import of region_config file with nuage vrs config """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.nuage_vrs")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"region_config.nuage_vrs.result")
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_share_keystone_only(tmpdir):
|
||||
""" Test import of Titanium Cloud region_config file with
|
||||
shared keystone """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"TiS_region_config.share.keystoneonly")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"TiS_region_config.share.keystoneonly.result")
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_share_keystone_glance_cinder(tmpdir):
|
||||
""" Test import of Titanium Cloud region_config file with shared keystone,
|
||||
glance and cinder """
|
||||
|
||||
regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"TiS_region_config.shareall")
|
||||
resultfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"TiS_region_config.shareall.result")
|
||||
_test_region_config(tmpdir, regionfile, resultfile)
|
||||
|
||||
|
||||
def test_region_config_validation():
|
||||
""" Test detection of various errors in region_config file """
|
||||
|
||||
# Create the path to the region_config files
|
||||
simple_regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "region_config.simple")
|
||||
lag_vlan_regionfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "region_config.lag.vlan")
|
||||
nuage_vrs_regionfile = os.path.join(os.getcwd(),
|
||||
"controllerconfig/tests/files/",
|
||||
"region_config.nuage_vrs")
|
||||
|
||||
# Test detection of non-required CINDER_* parameters
|
||||
region_config = cr.parse_system_config(simple_regionfile)
|
||||
region_config.set('STORAGE', 'CINDER_BACKEND', 'lvm')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, True)
|
||||
|
||||
region_config = cr.parse_system_config(simple_regionfile)
|
||||
region_config.set('STORAGE', 'CINDER_DEVICE',
|
||||
'/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config = cr.parse_system_config(simple_regionfile)
|
||||
region_config.set('STORAGE', 'CINDER_STORAGE', '10')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test detection of an invalid PXEBOOT_CIDR
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'192.168.1.4/24')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'FD00::0000/64')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'192.168.1.0/29')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.remove_option('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
|
||||
with pytest.raises(ConfigParser.NoOptionError):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(ConfigParser.NoOptionError):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test overlap of CLM_CIDR
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('CLM_NETWORK', 'CLM_CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test invalid CLM LAG_MODE
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test CLM_VLAN not allowed
|
||||
region_config = cr.parse_system_config(simple_regionfile)
|
||||
region_config.set('CLM_NETWORK', 'CLM_VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test CLM_VLAN missing
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.remove_option('CLM_NETWORK', 'CLM_VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test overlap of BLS_CIDR
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('BLS_NETWORK', 'BLS_CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('BLS_NETWORK', 'BLS_CIDR', '192.168.204.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test invalid BLS LAG_MODE
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.add_section('LOGICAL_INTERFACE_2')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
||||
region_config.set('BLS_NETWORK', 'BLS_LOGICAL_INTERFACE',
|
||||
'LOGICAL_INTERFACE_2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test BLS_VLAN overlap
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('BLS_NETWORK', 'BLS_VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test BLS_VLAN missing
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.remove_option('BLS_NETWORK', 'BLS_VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test overlap of CAN_CIDR
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.204.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.205.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test invalid CAN LAG_MODE
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.add_section('LOGICAL_INTERFACE_2')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
||||
region_config.set('CAN_NETWORK', 'CAN_LOGICAL_INTERFACE',
|
||||
'LOGICAL_INTERFACE_2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test CAN_VLAN overlap
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('CAN_NETWORK', 'CAN_VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
region_config.set('CAN_NETWORK', 'CAN_VLAN', '124')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test CAN_VLAN missing
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.remove_option('CAN_NETWORK', 'CAN_VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test missing gateway
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.remove_option('CLM_NETWORK', 'CLM_GATEWAY')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test two gateways
|
||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
||||
region_config.set('CAN_NETWORK', 'CAN_GATEWAY', '10.10.10.1')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test detection of invalid VSWITCH_TYPE
|
||||
region_config = cr.parse_system_config(nuage_vrs_regionfile)
|
||||
region_config.set('NETWORK', 'VSWITCH_TYPE', 'invalid')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test detection of neutron in wrong region for AVS VSWITCH_TYPE
|
||||
region_config = cr.parse_system_config(nuage_vrs_regionfile)
|
||||
region_config.set('NETWORK', 'VSWITCH_TYPE', 'AVS')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
|
||||
# Test detection of neutron in wrong region for NUAGE_VRS VSWITCH_TYPE
|
||||
region_config = cr.parse_system_config(nuage_vrs_regionfile)
|
||||
region_config.remove_option('SHARED_SERVICES', 'NEUTRON_USER_NAME')
|
||||
region_config.remove_option('SHARED_SERVICES', 'NEUTRON_PASSWORD')
|
||||
region_config.remove_option('SHARED_SERVICES', 'NEUTRON_SERVICE_NAME')
|
||||
region_config.remove_option('SHARED_SERVICES', 'NEUTRON_SERVICE_TYPE')
|
||||
region_config.set('REGION_2_SERVICES', 'NEUTRON_USER_NAME', 'neutron')
|
||||
region_config.set('REGION_2_SERVICES', 'NEUTRON_PASSWORD', 'password2WO*')
|
||||
region_config.set('REGION_2_SERVICES', 'NEUTRON_SERVICE_NAME', 'neutron')
|
||||
region_config.set('REGION_2_SERVICES', 'NEUTRON_SERVICE_TYPE', 'network')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(region_config, REGION_CONFIG, None, False)
|
||||
@@ -0,0 +1,457 @@
|
||||
"""
|
||||
Copyright (c) 2014, 2017 Wind River Systems, Inc.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import controllerconfig.systemconfig as cr
|
||||
import configutilities.common.exceptions as exceptions
|
||||
from configutilities import validate, DEFAULT_CONFIG
|
||||
|
||||
|
||||
def _dump_config(config):
|
||||
""" Prints contents of config object """
|
||||
for section in config.sections():
|
||||
print "[%s]" % section
|
||||
for (name, value) in config.items(section):
|
||||
print "%s=%s" % (name, value)
|
||||
|
||||
|
||||
def _test_system_config(filename):
|
||||
""" Test import and generation of answerfile """
|
||||
|
||||
# Parse the system_config file
|
||||
system_config = cr.parse_system_config(filename)
|
||||
|
||||
# Dump results for debugging
|
||||
print "Parsed system_config:\n"
|
||||
_dump_config(system_config)
|
||||
|
||||
# Validate the system config file
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
|
||||
# Validate the region config file.
|
||||
# Using onboard validation since the validator's reference version number
|
||||
# is only set at build-time when validating offboard
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
|
||||
def test_system_config_simple():
|
||||
""" Test import of simple system_config file """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_ipv6():
|
||||
""" Test import of system_config file with ipv6 oam """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_lag_vlan():
|
||||
""" Test import of system_config file with lag and vlan """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_security():
|
||||
""" Test import of system_config file with security config """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.security")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_ceph():
|
||||
""" Test import of system_config file with ceph config """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_simplex():
|
||||
""" Test import of system_config file for AIO-simplex """
|
||||
|
||||
# Create the path to the system_config file
|
||||
systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simplex")
|
||||
|
||||
_test_system_config(systemfile)
|
||||
|
||||
|
||||
def test_system_config_validation():
|
||||
""" Test detection of various errors in system_config file """
|
||||
|
||||
# Create the path to the system_config files
|
||||
simple_systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
|
||||
ipv6_systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
|
||||
lag_vlan_systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
|
||||
ceph_systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
|
||||
static_addr_systemfile = os.path.join(
|
||||
os.getcwd(), "controllerconfig/tests/files/",
|
||||
"system_config.static_addr")
|
||||
|
||||
# Test floating outside of OAM_NETWORK CIDR
|
||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'IP_FLOATING_ADDRESS', '5555::5')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test non-ipv6 unit address
|
||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'IP_UNIT_0_ADDRESS', '10.10.10.3')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test using start/end addresses
|
||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'IP_START_ADDRESS', 'abcd::2')
|
||||
system_config.set('OAM_NETWORK', 'IP_END_ADDRESS', 'abcd::4')
|
||||
system_config.remove_option('OAM_NETWORK', 'IP_FLOATING_ADDRESS')
|
||||
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_0_ADDRESS')
|
||||
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_1_ADDRESS')
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test detection of an invalid PXEBOOT_CIDR
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'192.168.1.4/24')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'FD00::0000/64')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
||||
'192.168.1.0/29')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.remove_option('PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
|
||||
with pytest.raises(ConfigParser.NoOptionError):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(ConfigParser.NoOptionError):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test overlap of MGMT_NETWORK CIDR
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test invalid MGMT_NETWORK LAG_MODE
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK VLAN not allowed
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK VLAN missing
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.remove_option('MGMT_NETWORK', 'VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK start address specified without end address
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK end address specified without start address
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK start and end range does not have enough addresses
|
||||
system_config = cr.parse_system_config(static_addr_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.8')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK start address not in subnet
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.200.2')
|
||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.254')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test MGMT_NETWORK end address not in subnet
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.214.254')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test overlap of INFRA_NETWORK CIDR
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('INFRA_NETWORK', 'CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('INFRA_NETWORK', 'CIDR', '192.168.204.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test invalid INFRA_NETWORK LAG_MODE
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.add_section('LOGICAL_INTERFACE_2')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
||||
system_config.set('INFRA_NETWORK', 'LOGICAL_INTERFACE',
|
||||
'LOGICAL_INTERFACE_2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test INFRA_NETWORK VLAN overlap
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('INFRA_NETWORK', 'VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test INFRA_NETWORK VLAN missing
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.remove_option('INFRA_NETWORK', 'VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test overlap of OAM_NETWORK CIDR
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.203.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.204.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.205.0/26')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test invalid OAM_NETWORK LAG_MODE
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.add_section('LOGICAL_INTERFACE_2')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
||||
system_config.set('OAM_NETWORK', 'LOGICAL_INTERFACE',
|
||||
'LOGICAL_INTERFACE_2')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test OAM_NETWORK VLAN overlap
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'VLAN', '123')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
system_config.set('OAM_NETWORK', 'VLAN', '124')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test OAM_NETWORK VLAN missing
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.remove_option('OAM_NETWORK', 'VLAN')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test missing gateway
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.remove_option('MGMT_NETWORK', 'GATEWAY')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test two gateways
|
||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
||||
system_config.set('OAM_NETWORK', 'GATEWAY', '10.10.10.1')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test detection of unsupported DNS NAMESERVER
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.add_section('DNS')
|
||||
system_config.set('DNS', 'NAMESERVER_1', '8.8.8.8')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
|
||||
# Test detection of unsupported NTP NTP_SERVER
|
||||
system_config = cr.parse_system_config(simple_systemfile)
|
||||
system_config.add_section('NTP')
|
||||
system_config.set('NTP', 'NTP_SERVER_1', '0.pool.ntp.org')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
|
||||
# Test detection of overspecification of MGMT network addresses
|
||||
system_config = cr.parse_system_config(ceph_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '192.168.204.3')
|
||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '192.168.204.6')
|
||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '192.168.204.9')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test detection of overspecification of INFRA network addresses
|
||||
system_config = cr.parse_system_config(ceph_systemfile)
|
||||
system_config.set('INFRA_NETWORK', 'IP_FLOATING_ADDRESS',
|
||||
'192.168.205.103')
|
||||
system_config.set('INFRA_NETWORK', 'IP_IP_UNIT_0_ADDRESS',
|
||||
'192.168.205.106')
|
||||
system_config.set('INFRA_NETWORK', 'IP_IP_UNIT_1_ADDRESS',
|
||||
'192.168.205.109')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test detection of overspecification of OAM network addresses
|
||||
system_config = cr.parse_system_config(ceph_systemfile)
|
||||
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '10.10.10.2')
|
||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '10.10.10.3')
|
||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '10.10.10.4')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
|
||||
# Test detection of invalid release version
|
||||
system_config = cr.parse_system_config(ceph_systemfile)
|
||||
system_config.set('VERSION', 'RELEASE', '15.12')
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
||||
validate_only=True)
|
||||
with pytest.raises(exceptions.ConfigFail):
|
||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
||||
@@ -0,0 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,372 @@
|
||||
#
|
||||
# Copyright (c) 2015-2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# This file contains functions used by sysinv to manage upgrades.
|
||||
#
|
||||
|
||||
import json
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
import tsconfig.tsconfig as tsc
|
||||
|
||||
from controllerconfig import backup_restore
|
||||
from controllerconfig.common import log
|
||||
from controllerconfig.common import constants
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
import utils
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def get_upgrade_databases(shared_services):
|
||||
|
||||
UPGRADE_DATABASES = ('postgres', 'template1', 'nova', 'sysinv', 'murano',
|
||||
'ceilometer', 'neutron', 'heat', 'nova_api', 'aodh',
|
||||
'magnum', 'panko', 'ironic')
|
||||
|
||||
UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (),
|
||||
'heat': (), 'nova': (), 'nova_api': (),
|
||||
'sysinv': ('i_alarm',),
|
||||
'neutron': (),
|
||||
'aodh': (),
|
||||
'murano': (),
|
||||
'magnum': (),
|
||||
'panko': (),
|
||||
'ironic': (),
|
||||
'ceilometer': ('metadata_bool',
|
||||
'metadata_float',
|
||||
'metadata_int',
|
||||
'metadata_text',
|
||||
'meter', 'sample', 'fault',
|
||||
'resource')}
|
||||
|
||||
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
|
||||
UPGRADE_DATABASES += ('cinder',)
|
||||
UPGRADE_DATABASE_SKIP_TABLES.update({'cinder': ()})
|
||||
|
||||
if sysinv_constants.SERVICE_TYPE_IMAGE not in shared_services:
|
||||
UPGRADE_DATABASES += ('glance',)
|
||||
UPGRADE_DATABASE_SKIP_TABLES.update({'glance': ()})
|
||||
|
||||
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
|
||||
UPGRADE_DATABASES += ('keystone',)
|
||||
UPGRADE_DATABASE_SKIP_TABLES.update({'keystone': ('token',)})
|
||||
|
||||
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES
|
||||
|
||||
|
||||
def export_postgres(dest_dir, shared_services):
|
||||
""" Export postgres databases """
|
||||
devnull = open(os.devnull, 'w')
|
||||
try:
|
||||
upgrade_databases, upgrade_database_skip_tables = \
|
||||
get_upgrade_databases(shared_services)
|
||||
# Dump roles, table spaces and schemas for databases.
|
||||
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
|
||||
'--schema-only > %s/%s' %
|
||||
(dest_dir, 'postgres.sql.config'))],
|
||||
shell=True, stderr=devnull)
|
||||
|
||||
# Dump data for databases.
|
||||
for _a, db_elem in enumerate(upgrade_databases):
|
||||
|
||||
db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts '
|
||||
db_cmd += '--disable-triggers --data-only %s ' % db_elem
|
||||
|
||||
for _b, table_elem in \
|
||||
enumerate(upgrade_database_skip_tables[db_elem]):
|
||||
db_cmd += '--exclude-table=%s ' % table_elem
|
||||
|
||||
db_cmd += '> %s/%s.sql.data' % (dest_dir, db_elem)
|
||||
|
||||
subprocess.check_call([db_cmd], shell=True, stderr=devnull)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to export postgres databases for upgrade.")
|
||||
raise
|
||||
|
||||
|
||||
def export_vim(dest_dir):
|
||||
""" Export VIM database """
|
||||
devnull = open(os.devnull, 'w')
|
||||
try:
|
||||
vim_cmd = ("nfv-vim-manage db-dump-data -d %s -f %s" %
|
||||
(os.path.join(tsc.PLATFORM_PATH, 'nfv/vim', tsc.SW_VERSION),
|
||||
os.path.join(dest_dir, 'vim.data')))
|
||||
subprocess.check_call([vim_cmd], shell=True, stderr=devnull)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to export VIM databases for upgrade.")
|
||||
raise
|
||||
|
||||
|
||||
def prepare_upgrade(from_load, to_load, i_system):
|
||||
""" Executed on the release N side to prepare for an upgrade. """
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
LOG.info("Starting upgrade preparations - from: %s, to: %s" %
|
||||
(from_load, to_load))
|
||||
dest_dir = os.path.join(utils.POSTGRES_PATH, "upgrade")
|
||||
try:
|
||||
os.mkdir(dest_dir, 0755)
|
||||
except OSError:
|
||||
LOG.exception("Failed to create upgrade export directory %s." %
|
||||
dest_dir)
|
||||
raise
|
||||
|
||||
# Export databases
|
||||
shared_services = i_system.capabilities.get("shared_services", "")
|
||||
export_postgres(dest_dir, shared_services)
|
||||
export_vim(dest_dir)
|
||||
|
||||
# Export filesystems so controller-1 can access them
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["exportfs",
|
||||
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.POSTGRES_PATH),
|
||||
"-o",
|
||||
"rw,no_root_squash"],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to export %s" % utils.POSTGRES_PATH)
|
||||
raise
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["exportfs",
|
||||
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.RABBIT_PATH),
|
||||
"-o",
|
||||
"rw,no_root_squash"],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to export %s" % utils.RABBIT_PATH)
|
||||
raise
|
||||
|
||||
if tsc.infrastructure_interface:
|
||||
# The mate controller needs access to the /opt/cgcs directory during
|
||||
# the upgrade. If an infrastructure interface exists, then /opt/cgcs
|
||||
# is exported over the infrastructure network, which the mate does
|
||||
# not have access to during the upgrade. So... export it over the
|
||||
# management network here as well.
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["exportfs",
|
||||
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, tsc.CGCS_PATH),
|
||||
"-o",
|
||||
"rw,no_root_squash"],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to export %s" % utils.POSTGRES_PATH)
|
||||
raise
|
||||
|
||||
# Migrate /opt/platform/config so controller-1 can access when it
|
||||
# runs controller_config
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["cp",
|
||||
"-a",
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", to_load)],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to migrate %s" % os.path.join(tsc.PLATFORM_PATH,
|
||||
"config"))
|
||||
raise
|
||||
|
||||
# Remove branding tar files from the release N+1 directory as branding
|
||||
# files are not compatible between releases.
|
||||
branding_files = os.path.join(
|
||||
tsc.PLATFORM_PATH, "config", to_load, "branding", "*.tgz")
|
||||
try:
|
||||
subprocess.check_call(["rm -f %s" % branding_files], shell=True,
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to remove branding files %s" % branding_files)
|
||||
|
||||
# Execute migration scripts
|
||||
utils.execute_migration_scripts(
|
||||
from_load, to_load, utils.ACTION_START)
|
||||
|
||||
LOG.info("Finished upgrade preparations")
|
||||
|
||||
|
||||
def create_simplex_backup(controller_fs, software_upgrade):
|
||||
"""Creates the upgrade metadata and creates the system backup"""
|
||||
backup_data = {}
|
||||
fs_data = {}
|
||||
fs_data['database_gib'] = controller_fs.database_gib * 2
|
||||
backup_data['filesystem'] = fs_data
|
||||
upgrade_data = software_upgrade.as_dict()
|
||||
if upgrade_data['created_at']:
|
||||
upgrade_data['created_at'] = \
|
||||
upgrade_data['created_at'].replace(
|
||||
microsecond=0).replace(tzinfo=None).isoformat()
|
||||
if upgrade_data['updated_at']:
|
||||
upgrade_data['updated_at'] = \
|
||||
upgrade_data['updated_at'].replace(
|
||||
microsecond=0).replace(tzinfo=None).isoformat()
|
||||
backup_data['upgrade'] = upgrade_data
|
||||
json_data = json.dumps(backup_data)
|
||||
metadata_path = os.path.join(tsc.CONFIG_PATH, 'upgrades')
|
||||
os.mkdir(metadata_path)
|
||||
metadata_filename = os.path.join(metadata_path, 'metadata')
|
||||
with open(metadata_filename, 'w') as metadata_file:
|
||||
metadata_file.write(json_data)
|
||||
|
||||
backup_filename = get_upgrade_backup_filename(software_upgrade)
|
||||
backup_restore.backup(backup_filename, constants.BACKUPS_PATH)
|
||||
LOG.info("Create simplex backup complete")
|
||||
|
||||
|
||||
def get_upgrade_backup_filename(software_upgrade):
|
||||
"""Generates the simplex upgrade backup filename"""
|
||||
created_at_date = software_upgrade.created_at.replace(
|
||||
microsecond=0).replace(tzinfo=None)
|
||||
date_time = created_at_date.isoformat().replace(':', '')
|
||||
filename = 'upgrade_data_' + date_time + '_' + software_upgrade.uuid
|
||||
return filename
|
||||
|
||||
|
||||
def abort_upgrade(from_load, to_load, upgrade):
|
||||
""" Executed on the release N side, cleans up data created for upgrade. """
|
||||
devnull = open(os.devnull, 'w')
|
||||
LOG.info("Starting aborting upgrade - from: %s, to: %s" %
|
||||
(from_load, to_load))
|
||||
|
||||
# remove upgrade flags
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade flag")
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade complete flag")
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_FAIL_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade fail flag")
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_STARTED_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove the upgrade started flag")
|
||||
|
||||
# unexport filesystems
|
||||
export_list = [utils.POSTGRES_PATH, utils.RABBIT_PATH]
|
||||
if tsc.infrastructure_interface:
|
||||
export_list.append(tsc.CGCS_PATH)
|
||||
export_path = None
|
||||
try:
|
||||
for export_path in export_list:
|
||||
subprocess.check_call(
|
||||
["exportfs",
|
||||
"-u",
|
||||
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, export_path)],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to unexport %s" % export_path)
|
||||
except Exception:
|
||||
LOG.exception("Failed to unexport filesystems")
|
||||
|
||||
# Remove upgrade directories
|
||||
upgrade_dirs = [
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", to_load),
|
||||
os.path.join(utils.POSTGRES_PATH, "upgrade"),
|
||||
os.path.join(utils.POSTGRES_PATH, to_load),
|
||||
os.path.join(utils.RABBIT_PATH, to_load),
|
||||
os.path.join(utils.MURANO_RABBIT_PATH, to_load),
|
||||
os.path.join(tsc.CGCS_PATH, "ironic", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "packstack", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load),
|
||||
os.path.join(tsc.CGCS_PATH, "ceilometer", to_load),
|
||||
os.path.join(tsc.CONFIG_PATH, 'upgrades')
|
||||
]
|
||||
|
||||
for directory in upgrade_dirs:
|
||||
try:
|
||||
shutil.rmtree(directory)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade directory %s" % directory)
|
||||
|
||||
simplex_backup_filename = get_upgrade_backup_filename(upgrade) + "*"
|
||||
simplex_backup_files = glob.glob(os.path.join(
|
||||
constants.BACKUPS_PATH, simplex_backup_filename))
|
||||
|
||||
for file in simplex_backup_files:
|
||||
try:
|
||||
LOG.info("Removing simplex upgrade file %s" % file)
|
||||
os.remove(file)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove %s" % file)
|
||||
|
||||
LOG.info("Finished upgrade abort")
|
||||
|
||||
|
||||
def activate_upgrade(from_load, to_load, i_system):
|
||||
""" Executed on release N+1, activate the upgrade on all nodes. """
|
||||
LOG.info("Starting upgrade activate - from: %s, to: %s" %
|
||||
(from_load, to_load))
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
shared_services = i_system.capabilities.get("shared_services", "")
|
||||
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
|
||||
try:
|
||||
# Activate keystone
|
||||
#
|
||||
# CONTRACT - contract the previously expanded to_version DB
|
||||
# to remove the old schema and all data migration triggers.
|
||||
# When this process completes, the database will no longer
|
||||
# be able to support the previous release.
|
||||
# To avoid a deadlock during keystone contract we will use offline
|
||||
# migration for simplex upgrades. Since all db_sync operations are
|
||||
# done offline there is no need for the contract for SX systems
|
||||
if not tsc.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX:
|
||||
keystone_cmd = ('keystone-manage db_sync --contract')
|
||||
subprocess.check_call([keystone_cmd], shell=True,
|
||||
stderr=devnull)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to contract Keystone databases for upgrade.")
|
||||
raise
|
||||
utils.execute_migration_scripts(from_load, to_load, utils.ACTION_ACTIVATE)
|
||||
|
||||
LOG.info("Finished upgrade activation")
|
||||
|
||||
|
||||
def complete_upgrade(from_load, to_load):
|
||||
""" Executed on release N+1, cleans up data created for upgrade. """
|
||||
LOG.info("Starting upgrade complete - from: %s, to: %s" %
|
||||
(from_load, to_load))
|
||||
|
||||
# Remove upgrade directories
|
||||
upgrade_dirs = [
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", from_load),
|
||||
os.path.join(utils.POSTGRES_PATH, "upgrade"),
|
||||
os.path.join(utils.POSTGRES_PATH, from_load),
|
||||
os.path.join(utils.RABBIT_PATH, from_load),
|
||||
os.path.join(utils.MURANO_RABBIT_PATH, from_load),
|
||||
os.path.join(tsc.CGCS_PATH, "ironic", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "packstack", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
|
||||
]
|
||||
|
||||
upgrade_dirs.append(
|
||||
os.path.join(tsc.CGCS_PATH, "ceilometer", from_load))
|
||||
|
||||
for directory in upgrade_dirs:
|
||||
try:
|
||||
shutil.rmtree(directory)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade directory %s" % directory)
|
||||
|
||||
LOG.info("Finished upgrade complete")
|
||||
@@ -0,0 +1,756 @@
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# This file contains common upgrades functions that can be used by both sysinv
|
||||
# and during the upgrade of controller-1.
|
||||
#
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import uuid
|
||||
import yaml
|
||||
import ConfigParser
|
||||
|
||||
# WARNING: The controller-1 upgrade is done before any packstack manifests
|
||||
# have been applied, so only the static entries from tsconfig can be used.
|
||||
# (the platform.conf file will not have been updated with dynamic values).
|
||||
from tsconfig.tsconfig import (SW_VERSION, PLATFORM_PATH,
|
||||
KEYRING_PATH, CONFIG_PATH)
|
||||
|
||||
from configutilities import DEFAULT_DOMAIN_NAME
|
||||
from controllerconfig import utils as cutils
|
||||
from controllerconfig.common import log, constants
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
POSTGRES_PATH = '/var/lib/postgresql'
|
||||
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
|
||||
RABBIT_PATH = '/var/lib/rabbitmq'
|
||||
MURANO_RABBIT_PATH = '/var/lib/rabbitmq/murano'
|
||||
CONTROLLER_1_HOSTNAME = "controller-1"
|
||||
DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n"
|
||||
|
||||
# Migration script actions
|
||||
ACTION_START = "start"
|
||||
ACTION_MIGRATE = "migrate"
|
||||
ACTION_ACTIVATE = "activate"
|
||||
|
||||
|
||||
def execute_migration_scripts(from_release, to_release, action):
|
||||
""" Execute migration scripts with an action:
|
||||
start: Prepare for upgrade on release N side. Called during
|
||||
"system upgrade-start".
|
||||
migrate: Perform data migration on release N+1 side. Called while
|
||||
controller-1 is performing its upgrade.
|
||||
"""
|
||||
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
migration_script_dir = "/etc/upgrade.d"
|
||||
|
||||
LOG.info("Executing migration scripts with from_release: %s, "
|
||||
"to_release: %s, action: %s" % (from_release, to_release, action))
|
||||
|
||||
# Get a sorted list of all the migration scripts
|
||||
# Exclude any files that can not be executed, including .pyc and .pyo files
|
||||
files = [f for f in os.listdir(migration_script_dir)
|
||||
if os.path.isfile(os.path.join(migration_script_dir, f)) and
|
||||
os.access(os.path.join(migration_script_dir, f), os.X_OK)]
|
||||
files.sort()
|
||||
|
||||
# Execute each migration script
|
||||
for f in files:
|
||||
migration_script = os.path.join(migration_script_dir, f)
|
||||
try:
|
||||
LOG.info("Executing migration script %s" % migration_script)
|
||||
subprocess.check_call([migration_script,
|
||||
from_release,
|
||||
to_release,
|
||||
action],
|
||||
stdout=devnull, stderr=devnull)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.exception("Migration script %s failed with returncode %d" %
|
||||
(migration_script, e.returncode))
|
||||
# Abort when a migration script fails
|
||||
raise e
|
||||
|
||||
|
||||
def get_db_connection(hiera_db_records, database):
|
||||
username = hiera_db_records[database]['username']
|
||||
password = hiera_db_records[database]['password']
|
||||
return "postgresql://%s:%s@%s/%s" % (
|
||||
username, password, 'localhost', database)
|
||||
|
||||
|
||||
def get_upgrade_token(hiera_db_records,
|
||||
packstack_config,
|
||||
config,
|
||||
secure_config):
|
||||
# during a controller-1 upgrade, keystone is running
|
||||
# on the controller UNIT IP, however the service catalog
|
||||
# that was migrated from controller-0 since lists the
|
||||
# floating controller IP. Keystone operations that use
|
||||
# the AUTH URL will hit this service URL and fail,
|
||||
# therefore we have to issue an Upgrade token for
|
||||
# all Keystone operations during an Upgrade. This token
|
||||
# will allow us to circumvent the service catalog entry, by
|
||||
# providing a bypass endpoint.
|
||||
keystone_upgrade_url = "http://{}:5000/{}".format(
|
||||
'127.0.0.1',
|
||||
packstack_config.get('general', 'CONFIG_KEYSTONE_API_VERSION'))
|
||||
|
||||
try:
|
||||
admin_user_domain = packstack_config.get(
|
||||
'general', 'CONFIG_ADMIN_USER_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_ADMIN_USER_DOMAIN_NAME key not found. Using Default.")
|
||||
admin_user_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
try:
|
||||
admin_project_domain = packstack_config.get(
|
||||
'general', 'CONFIG_ADMIN_PROJECT_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_ADMIN_PROJECT_DOMAIN_NAME key not found. Using "
|
||||
"Default.")
|
||||
admin_project_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
# the upgrade token command
|
||||
keystone_upgrade_token = (
|
||||
"openstack "
|
||||
"--os-username {} "
|
||||
"--os-password {} "
|
||||
"--os-auth-url {} "
|
||||
"--os-project-name admin "
|
||||
"--os-user-domain-name {} "
|
||||
"--os-project-domain-name {} "
|
||||
"--os-interface internal "
|
||||
"--os-identity-api-version 3 "
|
||||
"token issue -c id -f value".format(
|
||||
packstack_config.get('general', 'CONFIG_KEYSTONE_ADMIN_USERNAME'),
|
||||
hiera_db_records['keystone']['ks_password'],
|
||||
keystone_upgrade_url,
|
||||
admin_user_domain,
|
||||
admin_project_domain
|
||||
))
|
||||
|
||||
config.update({
|
||||
'openstack::keystone::upgrade::upgrade_token_file':
|
||||
'/etc/keystone/upgrade_token',
|
||||
'openstack::keystone::upgrade::url': keystone_upgrade_url
|
||||
})
|
||||
|
||||
secure_config.update({
|
||||
'openstack::keystone::upgrade::upgrade_token_cmd':
|
||||
keystone_upgrade_token,
|
||||
})
|
||||
|
||||
|
||||
def get_platform_config(packstack_config,
|
||||
to_release,
|
||||
config,
|
||||
secure_config):
|
||||
# TODO(TLIU): for now set the hiera option for puppet-keystone
|
||||
# Not sure whether it is better to use env instead
|
||||
config.update({
|
||||
'platform::params::software_version': to_release
|
||||
})
|
||||
|
||||
amqp_passwd = packstack_config.get('general', 'CONFIG_AMQP_AUTH_PASSWORD')
|
||||
postgres_password = packstack_config.get('general', 'CONFIG_POSTGRESQL_PW')
|
||||
secure_config.update({
|
||||
'platform::amqp::params::auth_password': amqp_passwd,
|
||||
'platform::postgresql::params::password': postgres_password})
|
||||
|
||||
wrsroot_password = packstack_config.get('general', 'CONFIG_WRSROOT_PW')
|
||||
try:
|
||||
wrsroot_password_age = packstack_config.get('general',
|
||||
'CONFIG_WRSROOT_PW_AGE')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_WRSROOT_PW_AGE key not found. Setting value to 45")
|
||||
wrsroot_password_age = constants.WRSROOT_MAX_PASSWORD_AGE
|
||||
|
||||
secure_config.update({
|
||||
'platform::users::params::wrsroot_password': wrsroot_password,
|
||||
'platform::users::params::wrsroot_password_max_age':
|
||||
wrsroot_password_age
|
||||
})
|
||||
|
||||
ceph_cluster_id = packstack_config.get('general',
|
||||
'CONFIG_CEPH_CLUSTER_UUID')
|
||||
config.update({
|
||||
'platform::ceph::params::cluster_uuid': ceph_cluster_id
|
||||
})
|
||||
|
||||
try:
|
||||
ceph_pwd = packstack_config.get('general',
|
||||
'CONFIG_CEPH_OBJECT_GATEWAY_KS_PW')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_CEPH_OBJECT_GATEWAY_KS_PW key not found. Generating "
|
||||
"a new value")
|
||||
ceph_pwd = uuid.uuid4().hex[:10] + "TiC1*"
|
||||
|
||||
secure_config.update({
|
||||
'platform::ceph::params::rgw_admin_password': ceph_pwd
|
||||
})
|
||||
|
||||
ldap_hash = packstack_config.get('general',
|
||||
'CONFIG_LDAPADMIN_HASHED_PASSWORD')
|
||||
ldap_pwd = packstack_config.get('general',
|
||||
'CONFIG_LDAPADMIN_PASSWORD')
|
||||
secure_config.update({
|
||||
'platform::ldap::params::admin_hashed_pw': ldap_hash,
|
||||
'platform::ldap::params::admin_pw': ldap_pwd
|
||||
})
|
||||
|
||||
|
||||
def get_service_user_config(hiera_db_records,
|
||||
packstack_config,
|
||||
config,
|
||||
secure_config):
|
||||
# aodh user
|
||||
config.update({
|
||||
'aodh::db::postgresql::user': hiera_db_records['aodh']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'aodh::auth::auth_password': hiera_db_records['aodh']['ks_password'],
|
||||
'aodh::db::postgresql::password': hiera_db_records['aodh']['password'],
|
||||
'aodh::keystone::auth::password':
|
||||
hiera_db_records['aodh']['ks_password'],
|
||||
'aodh::keystone::authtoken::password':
|
||||
hiera_db_records['aodh']['ks_password']
|
||||
})
|
||||
|
||||
# ceilometer user
|
||||
config.update({
|
||||
'ceilometer::db::postgresql::user':
|
||||
hiera_db_records['ceilometer']['username'],
|
||||
})
|
||||
secure_config.update({
|
||||
'ceilometer::agent::auth::auth_password':
|
||||
hiera_db_records['ceilometer']['ks_password'],
|
||||
'ceilometer::db::postgresql::password':
|
||||
hiera_db_records['ceilometer']['password'],
|
||||
'ceilometer::keystone::auth::password':
|
||||
hiera_db_records['ceilometer']['ks_password'],
|
||||
'ceilometer::keystone::authtoken::password':
|
||||
hiera_db_records['ceilometer']['ks_password']
|
||||
})
|
||||
|
||||
# keystone user
|
||||
secure_config.update({
|
||||
'keystone::admin_password':
|
||||
hiera_db_records['keystone']['ks_password'],
|
||||
'keystone::admin_token':
|
||||
hiera_db_records['keystone']['admin_token'],
|
||||
'keystone::roles::admin::password':
|
||||
hiera_db_records['keystone']['ks_password']
|
||||
})
|
||||
if 'keystone' in hiera_db_records:
|
||||
config.update({
|
||||
'CONFIG_KEYSTONE_ADMIN_USERNAME':
|
||||
hiera_db_records['keystone']['ks_username'],
|
||||
'keystone::db::postgresql::user':
|
||||
hiera_db_records['keystone']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'CONFIG_KEYSTONE_ADMIN_PW':
|
||||
hiera_db_records['keystone']['ks_password'],
|
||||
'keystone::database_connection':
|
||||
get_db_connection(hiera_db_records, 'keystone'),
|
||||
'keystone::db::postgresql::password':
|
||||
hiera_db_records['keystone']['password']
|
||||
})
|
||||
|
||||
if 'cinder' in hiera_db_records:
|
||||
# cinder user
|
||||
config.update({
|
||||
'cinder::db::postgresql::user':
|
||||
hiera_db_records['cinder']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'cinder::db::postgresql::password':
|
||||
hiera_db_records['cinder']['password'],
|
||||
'cinder::keystone::auth::password':
|
||||
hiera_db_records['cinder']['ks_password'],
|
||||
'cinder::keystone::authtoken::password':
|
||||
hiera_db_records['cinder']['ks_password']
|
||||
})
|
||||
|
||||
if 'glance' in hiera_db_records:
|
||||
# glance user
|
||||
config.update({
|
||||
'glance::api::authtoken::username':
|
||||
hiera_db_records['glance']['ks_username'],
|
||||
'glance::db::postgresql::user':
|
||||
hiera_db_records['glance']['username'],
|
||||
'glance::registry::authtoken::username':
|
||||
hiera_db_records['glance']['ks_username']
|
||||
})
|
||||
secure_config.update({
|
||||
'glance::api::authtoken::password':
|
||||
hiera_db_records['glance']['ks_password'],
|
||||
'glance::db::postgresql::password':
|
||||
hiera_db_records['glance']['password'],
|
||||
'glance::keystone::auth::password':
|
||||
hiera_db_records['glance']['ks_password'],
|
||||
'glance::keystone::authtoken::password':
|
||||
hiera_db_records['glance']['ks_password'],
|
||||
'glance::registry::authtoken::password':
|
||||
hiera_db_records['glance']['ks_password']
|
||||
})
|
||||
|
||||
# heat user
|
||||
config.update({
|
||||
'heat::db::postgresql::user':
|
||||
hiera_db_records['heat']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'heat::db::postgresql::password':
|
||||
hiera_db_records['heat']['password'],
|
||||
'heat::engine::auth_encryption_key':
|
||||
hiera_db_records['heat']['auth_key'],
|
||||
'heat::keystone::auth::password':
|
||||
hiera_db_records['heat']['ks_password'],
|
||||
'heat::keystone::auth_cfn::password':
|
||||
hiera_db_records['heat']['ks_password'],
|
||||
'heat::keystone::authtoken::password':
|
||||
hiera_db_records['heat']['ks_password'],
|
||||
'heat::keystone::domain::domain_password':
|
||||
hiera_db_records['heat']['domain_password']
|
||||
})
|
||||
|
||||
# neutron
|
||||
config.update({
|
||||
'neutron::db::postgresql::user':
|
||||
hiera_db_records['neutron']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'neutron::agents::metadata::shared_secret':
|
||||
hiera_db_records['neutron']['metadata_passwd'],
|
||||
'neutron::db::postgresql::password':
|
||||
hiera_db_records['neutron']['password'],
|
||||
'neutron::keystone::auth::password':
|
||||
hiera_db_records['neutron']['ks_password'],
|
||||
'neutron::keystone::authtoken::password':
|
||||
hiera_db_records['neutron']['ks_password'],
|
||||
'neutron::server::notifications::password':
|
||||
hiera_db_records['nova']['ks_password']
|
||||
})
|
||||
|
||||
# nova
|
||||
# in 18.xx placement user is new so have to add additional
|
||||
# config to setup endpoint urls in keystone. This currently does
|
||||
# not suppport region mode.
|
||||
auth_region = packstack_config.get('general',
|
||||
'CONFIG_KEYSTONE_REGION')
|
||||
config.update({
|
||||
'nova::db::postgresql::user':
|
||||
hiera_db_records['nova']['username'],
|
||||
'nova::db::postgresql_api::user':
|
||||
hiera_db_records['nova_api']['username'],
|
||||
'nova::keystone::auth_placement::auth_name':
|
||||
hiera_db_records['placement']['ks_username'],
|
||||
'nova::keystone::auth_placement::admin_url':
|
||||
hiera_db_records['placement']['ks_admin_url'],
|
||||
'nova::keystone::auth_placement::internal_url':
|
||||
hiera_db_records['placement']['ks_internal_url'],
|
||||
'nova::keystone::auth_placement::public_url':
|
||||
hiera_db_records['placement']['ks_public_url'],
|
||||
'nova::keystone::auth_placement::region': auth_region
|
||||
})
|
||||
secure_config.update({
|
||||
'nova::api::neutron_metadata_proxy_shared_secret':
|
||||
hiera_db_records['neutron']['metadata_passwd'],
|
||||
'nova::db::postgresql::password':
|
||||
hiera_db_records['nova']['password'],
|
||||
'nova::db::postgresql_api::password':
|
||||
hiera_db_records['nova_api']['password'],
|
||||
'nova::keystone::auth::password':
|
||||
hiera_db_records['nova']['ks_password'],
|
||||
'nova::keystone::authtoken::password':
|
||||
hiera_db_records['nova']['ks_password'],
|
||||
'nova::network::neutron::neutron_password':
|
||||
hiera_db_records['neutron']['ks_password'],
|
||||
'nova_api_proxy::config::admin_password':
|
||||
hiera_db_records['nova']['ks_password'],
|
||||
'nova::keystone::auth_placement::password':
|
||||
hiera_db_records['placement']['ks_password'],
|
||||
'nova::placement::password':
|
||||
hiera_db_records['placement']['ks_password']
|
||||
})
|
||||
|
||||
# patching user
|
||||
config.update({
|
||||
'patching::api::keystone_user':
|
||||
hiera_db_records['patching']['ks_username']
|
||||
})
|
||||
secure_config.update({
|
||||
'patching::api::keystone_password':
|
||||
hiera_db_records['patching']['ks_password'],
|
||||
'patching::keystone::auth::password':
|
||||
hiera_db_records['patching']['ks_password'],
|
||||
'patching::keystone::authtoken::password':
|
||||
hiera_db_records['patching']['ks_password']
|
||||
})
|
||||
|
||||
# sysinv
|
||||
sysinv_database_connection = "postgresql://%s:%s@%s/%s" % (
|
||||
hiera_db_records['sysinv']['username'],
|
||||
hiera_db_records['sysinv']['password'],
|
||||
'localhost',
|
||||
'sysinv'
|
||||
)
|
||||
config.update({
|
||||
'sysinv::db::postgresql::user':
|
||||
hiera_db_records['sysinv']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'sysinv::api::keystone_password':
|
||||
hiera_db_records['sysinv']['ks_password'],
|
||||
'sysinv::database_connection': sysinv_database_connection,
|
||||
'sysinv::db::postgresql::password':
|
||||
hiera_db_records['sysinv']['password'],
|
||||
'sysinv::keystone::auth::password':
|
||||
hiera_db_records['sysinv']['ks_password']
|
||||
})
|
||||
|
||||
# murano
|
||||
config.update({
|
||||
'murano::db::postgresql::user':
|
||||
hiera_db_records['murano']['username']
|
||||
})
|
||||
config.update({
|
||||
'murano::db::postgresql::password':
|
||||
hiera_db_records['murano']['password'],
|
||||
'murano::keystone::auth::password':
|
||||
hiera_db_records['murano']['ks_password'],
|
||||
'murano::keystone::authtoken::password':
|
||||
hiera_db_records['murano']['ks_password'],
|
||||
'murano::admin_password':
|
||||
hiera_db_records['murano']['ks_password']
|
||||
})
|
||||
|
||||
try:
|
||||
admin_user_domain = packstack_config.get(
|
||||
'general', 'CONFIG_ADMIN_USER_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_ADMIN_USER_DOMAIN_NAME key not found. Using Default.")
|
||||
admin_user_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
try:
|
||||
admin_project_domain = packstack_config.get(
|
||||
'general', 'CONFIG_ADMIN_PROJECT_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_ADMIN_PROJECT_DOMAIN_NAME key not found. Using "
|
||||
"Default.")
|
||||
admin_project_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
config.update({
|
||||
'openstack::client::params::admin_username':
|
||||
hiera_db_records['keystone']['ks_username'],
|
||||
'openstack::client::params::admin_user_domain':
|
||||
admin_user_domain,
|
||||
'openstack::client::params::admin_project_domain':
|
||||
admin_project_domain,
|
||||
})
|
||||
secure_config.update({
|
||||
'openstack::murano::params::auth_password':
|
||||
hiera_db_records['murano']['ks_password']
|
||||
})
|
||||
|
||||
# magnum
|
||||
config.update({
|
||||
'magnum::db::postgresql::user':
|
||||
hiera_db_records['magnum']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'magnum::db::postgresql::password':
|
||||
hiera_db_records['magnum']['password'],
|
||||
'magnum::keystone::auth::password':
|
||||
hiera_db_records['magnum']['ks_password'],
|
||||
'magnum::keystone::authtoken::password':
|
||||
hiera_db_records['magnum']['ks_password'],
|
||||
'magnum::keystone::domain::domain_password':
|
||||
hiera_db_records['magnum-domain']['ks_password']
|
||||
})
|
||||
|
||||
# mtc
|
||||
# project and domains are also required for manifest to create the user
|
||||
auth_project = packstack_config.get('general',
|
||||
'CONFIG_SERVICE_TENANT_NAME')
|
||||
try:
|
||||
auth_user_domain = packstack_config.get(
|
||||
'general', 'CONFIG_SERVICE_USER_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_SERVICE_USER_DOMAIN_NAME key not found. Using "
|
||||
"Default.")
|
||||
auth_user_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
try:
|
||||
auth_project_domain = packstack_config.get(
|
||||
'general', 'CONFIG_SERVICE_PROJECT_DOMAIN_NAME')
|
||||
except ConfigParser.NoOptionError:
|
||||
# This value wasn't present in R2. So may be missing in upgrades from
|
||||
# that release
|
||||
LOG.info("CONFIG_SERVICE_PROJECT_DOMAIN_NAME key not found. Using "
|
||||
"Default.")
|
||||
auth_project_domain = DEFAULT_DOMAIN_NAME
|
||||
|
||||
config.update({
|
||||
'platform::mtce::params::auth_username':
|
||||
hiera_db_records['mtce']['ks_username'],
|
||||
'platform::mtce::params::auth_project': auth_project,
|
||||
'platform::mtce::params::auth_user_domain': auth_user_domain,
|
||||
'platform::mtce::params::auth_project_domain': auth_project_domain
|
||||
})
|
||||
secure_config.update({
|
||||
'platform::mtce::params::auth_pw':
|
||||
hiera_db_records['mtce']['ks_password'],
|
||||
})
|
||||
|
||||
# nfv
|
||||
secure_config.update({
|
||||
'nfv::keystone::auth::password':
|
||||
hiera_db_records['vim']['ks_password']
|
||||
})
|
||||
|
||||
# ironic
|
||||
config.update({
|
||||
'ironic::db::postgresql::user':
|
||||
hiera_db_records['ironic']['username'],
|
||||
})
|
||||
secure_config.update({
|
||||
'ironic::db::postgresql::password':
|
||||
hiera_db_records['ironic']['password'],
|
||||
'ironic::keystone::auth::password':
|
||||
hiera_db_records['ironic']['ks_password'],
|
||||
'ironic::keystone::authtoken::password':
|
||||
hiera_db_records['ironic']['ks_password'],
|
||||
'ironic::api::authtoken::password':
|
||||
hiera_db_records['ironic']['ks_password']
|
||||
})
|
||||
|
||||
# panko
|
||||
config.update({
|
||||
'panko::db::postgresql::user':
|
||||
hiera_db_records['panko']['username']
|
||||
})
|
||||
secure_config.update({
|
||||
'panko::db::postgresql::password':
|
||||
hiera_db_records['panko']['password'],
|
||||
'panko::keystone::auth::password':
|
||||
hiera_db_records['panko']['ks_password'],
|
||||
'panko::keystone::authtoken::password':
|
||||
hiera_db_records['panko']['ks_password']
|
||||
})
|
||||
|
||||
|
||||
def get_nova_ssh_keys(config, secure_config):
|
||||
# retrieve the nova ssh keys
|
||||
ssh_config_dir = os.path.join(CONFIG_PATH, 'ssh_config')
|
||||
migration_key = os.path.join(ssh_config_dir, 'nova_migration_key')
|
||||
system_host_key = os.path.join(ssh_config_dir, 'system_host_key')
|
||||
if not os.path.isdir(ssh_config_dir):
|
||||
LOG.error("ssh_config directory %s not found" % ssh_config_dir)
|
||||
return config
|
||||
|
||||
# Read the public/private migration keys
|
||||
with open(migration_key) as fp:
|
||||
migration_private = fp.read().strip()
|
||||
with open('%s.pub' % migration_key) as fp:
|
||||
migration_public = fp.read().strip().split()[1]
|
||||
|
||||
# Read the public/private host keys
|
||||
with open(system_host_key) as fp:
|
||||
host_private = fp.read().strip()
|
||||
with open('%s.pub' % system_host_key) as fp:
|
||||
host_header, host_public, _ = fp.read().strip().split()
|
||||
|
||||
# Add our pre-generated system host key to /etc/ssh/ssh_known_hosts
|
||||
ssh_keys = {
|
||||
'system_host_key': {
|
||||
'ensure': 'present',
|
||||
'name': '*',
|
||||
'host_aliases': [],
|
||||
'type': host_header,
|
||||
'key': host_public
|
||||
}
|
||||
}
|
||||
migration_key_type = 'ssh-rsa'
|
||||
host_key_type = 'ssh-ecdsa'
|
||||
secure_config.update({
|
||||
'openstack::nova::compute::ssh_keys': ssh_keys,
|
||||
'openstack::nova::compute::host_key_type': host_key_type,
|
||||
'openstack::nova::compute::host_private_key': host_private,
|
||||
'openstack::nova::compute::host_public_key': host_public,
|
||||
'openstack::nova::compute::host_public_header': host_header,
|
||||
'openstack::nova::compute::migration_key_type': migration_key_type,
|
||||
'openstack::nova::compute::migration_private_key':
|
||||
migration_private,
|
||||
'openstack::nova::compute::migration_public_key':
|
||||
migration_public,
|
||||
})
|
||||
|
||||
|
||||
def get_openstack_config(packstack_config, config, secure_config):
|
||||
horizon_key = packstack_config.get('general',
|
||||
'CONFIG_HORIZON_SECRET_KEY')
|
||||
config.update({
|
||||
'openstack::client::credentials::params::keyring_base':
|
||||
os.path.dirname(KEYRING_PATH),
|
||||
'openstack::client::credentials::params::keyring_directory':
|
||||
KEYRING_PATH,
|
||||
'openstack::client::credentials::params::keyring_file':
|
||||
os.path.join(KEYRING_PATH, '.CREDENTIAL'),
|
||||
})
|
||||
secure_config.update({
|
||||
'openstack::horizon::params::secret_key': horizon_key
|
||||
})
|
||||
|
||||
get_nova_ssh_keys(config, secure_config)
|
||||
|
||||
|
||||
def write_hieradata(config, secure_config):
|
||||
filename = 'static.yaml'
|
||||
secure_filename = 'secure_static.yaml'
|
||||
path = constants.HIERADATA_PERMDIR
|
||||
try:
|
||||
os.makedirs(path)
|
||||
filepath = os.path.join(path, filename)
|
||||
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
|
||||
text=True)
|
||||
with open(tmppath, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
os.close(fd)
|
||||
os.rename(tmppath, filepath)
|
||||
except Exception:
|
||||
LOG.exception("failed to write config file: %s" % filepath)
|
||||
raise
|
||||
|
||||
try:
|
||||
secure_filepath = os.path.join(path, secure_filename)
|
||||
fd, tmppath = tempfile.mkstemp(dir=path, prefix=secure_filename,
|
||||
text=True)
|
||||
with open(tmppath, 'w') as f:
|
||||
yaml.dump(secure_config, f, default_flow_style=False)
|
||||
os.close(fd)
|
||||
os.rename(tmppath, secure_filepath)
|
||||
except Exception:
|
||||
LOG.exception("failed to write secure config: %s" % secure_filepath)
|
||||
raise
|
||||
|
||||
|
||||
def generate_simplex_upgrade_hiera_record(to_release, hiera_db_records,
|
||||
packstack_config):
|
||||
""" generate static records from the packstack config. """
|
||||
LOG.info("Migrating packstack answer file to hiera data")
|
||||
|
||||
config = {}
|
||||
secure_config = {}
|
||||
get_platform_config(packstack_config,
|
||||
to_release,
|
||||
config,
|
||||
secure_config)
|
||||
get_service_user_config(hiera_db_records,
|
||||
packstack_config,
|
||||
config,
|
||||
secure_config)
|
||||
get_openstack_config(packstack_config,
|
||||
config,
|
||||
secure_config)
|
||||
|
||||
write_hieradata(config, secure_config)
|
||||
|
||||
|
||||
def generate_upgrade_hiera_record(to_release, hiera_db_records,
|
||||
packstack_config):
|
||||
""" generate static records from the packstack config. """
|
||||
LOG.info("Migrating packstack answer file to hiera data")
|
||||
|
||||
config = {}
|
||||
secure_config = {}
|
||||
config.update({'platform::params::controller_upgrade': True})
|
||||
get_platform_config(packstack_config,
|
||||
to_release,
|
||||
config,
|
||||
secure_config)
|
||||
get_service_user_config(hiera_db_records,
|
||||
packstack_config,
|
||||
config,
|
||||
secure_config)
|
||||
get_openstack_config(packstack_config,
|
||||
config,
|
||||
secure_config)
|
||||
get_upgrade_token(hiera_db_records,
|
||||
packstack_config,
|
||||
config,
|
||||
secure_config)
|
||||
|
||||
write_hieradata(config, secure_config)
|
||||
|
||||
|
||||
def create_simplex_runtime_config(filename):
|
||||
""" Create any runtime parameters needed for simplex upgrades"""
|
||||
config = {}
|
||||
# We need to disable nova cellv2 setup as this was done during the data
|
||||
# migration
|
||||
config.update({'nova::db::sync_api::cellv2_setup': False})
|
||||
cutils.create_manifest_runtime_config(filename, config)
|
||||
|
||||
|
||||
def get_packstack_config(software_release):
|
||||
from_config = os.path.join(PLATFORM_PATH, "packstack", software_release,
|
||||
"config")
|
||||
answer_file = os.path.join(from_config, "packstack-answers.txt")
|
||||
|
||||
packstack_config = ConfigParser.RawConfigParser()
|
||||
# Preserve the case in the answer file
|
||||
packstack_config.optionxform = lambda option: option
|
||||
try:
|
||||
packstack_config.read(answer_file)
|
||||
except Exception:
|
||||
LOG.exception("Error parsing answer file %s" % answer_file)
|
||||
raise
|
||||
return packstack_config
|
||||
|
||||
|
||||
def apply_upgrade_manifest(controller_address):
|
||||
"""Apply puppet upgrade manifest files."""
|
||||
|
||||
cmd = [
|
||||
"/usr/local/bin/puppet-manifest-apply.sh",
|
||||
constants.HIERADATA_PERMDIR,
|
||||
str(controller_address),
|
||||
sysinv_constants.CONTROLLER,
|
||||
'upgrade'
|
||||
]
|
||||
|
||||
logfile = "/tmp/apply_manifest.log"
|
||||
try:
|
||||
with open(logfile, "w") as flog:
|
||||
subprocess.check_call(cmd, stdout=flog, stderr=flog)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "Failed to execute upgrade manifest"
|
||||
print msg
|
||||
raise Exception(msg)
|
||||
885
controllerconfig/controllerconfig/controllerconfig/utils.py
Normal file
885
controllerconfig/controllerconfig/controllerconfig/utils.py
Normal file
@@ -0,0 +1,885 @@
|
||||
#
|
||||
# Copyright (c) 2014-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Utilities
|
||||
"""
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
import netaddr
|
||||
from tsconfig import tsconfig
|
||||
from configutilities.common.utils import is_valid_mac
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
from common import constants
|
||||
from common import log
|
||||
|
||||
LOOPBACK_IFNAME = 'lo'
|
||||
|
||||
NETWORK_SCRIPTS_PATH = '/etc/sysconfig/network-scripts'
|
||||
NETWORK_SCRIPTS_PREFIX = 'ifcfg'
|
||||
NETWORK_SCRIPTS_LOOPBACK = '%s-%s' % (NETWORK_SCRIPTS_PREFIX, LOOPBACK_IFNAME)
|
||||
|
||||
BOND_MIIMON_DEFAULT = 100
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
|
||||
|
||||
def filesystem_get_free_space(path):
|
||||
""" Get Free space of directory """
|
||||
statvfs = os.statvfs(path)
|
||||
return (statvfs.f_frsize * statvfs.f_bavail)
|
||||
|
||||
|
||||
def directory_get_size(start_dir, regex=None):
|
||||
"""
|
||||
Get total size of a directory tree in bytes
|
||||
:param start_dir: top of tree
|
||||
:param regex: only include files matching this regex (if provided)
|
||||
:return: size in bytes
|
||||
"""
|
||||
total_size = 0
|
||||
for dirpath, _, filenames in os.walk(start_dir):
|
||||
for filename in filenames:
|
||||
if regex is None or regex.match(filename):
|
||||
filep = os.path.join(dirpath, filename)
|
||||
try:
|
||||
total_size += os.path.getsize(filep)
|
||||
except OSError, e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise e
|
||||
return total_size
|
||||
|
||||
|
||||
def print_bytes(sizeof):
|
||||
""" Pretty print bytes """
|
||||
for size in ['Bytes', 'KB', 'MB', 'GB', 'TB']:
|
||||
if abs(sizeof) < 1024.0:
|
||||
return "%3.1f %s" % (sizeof, size)
|
||||
sizeof /= 1024.0
|
||||
|
||||
|
||||
def modprobe_drbd():
|
||||
"""Load DRBD module"""
|
||||
try:
|
||||
mod_parms = subprocess.check_output(['drbdadm', 'sh-mod-parms'],
|
||||
close_fds=True).rstrip()
|
||||
subprocess.call(["modprobe", "-s", "drbd", mod_parms], stdout=DEVNULL)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to load drbd module")
|
||||
raise
|
||||
|
||||
|
||||
def drbd_start(resource):
|
||||
"""Start drbd resource"""
|
||||
try:
|
||||
subprocess.check_call(["drbdadm", "up", resource],
|
||||
stdout=DEVNULL)
|
||||
|
||||
subprocess.check_call(["drbdadm", "primary", resource],
|
||||
stdout=DEVNULL)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to start drbd %s" % resource)
|
||||
raise
|
||||
|
||||
|
||||
def drbd_stop(resource):
|
||||
"""Stop drbd resource"""
|
||||
try:
|
||||
subprocess.check_call(["drbdadm", "secondary", resource],
|
||||
stdout=DEVNULL)
|
||||
# Allow time for demotion to be processed
|
||||
time.sleep(1)
|
||||
subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to stop drbd %s" % resource)
|
||||
raise
|
||||
|
||||
|
||||
def mount(device, directory):
|
||||
"""Mount a directory"""
|
||||
try:
|
||||
subprocess.check_call(["mount", device, directory], stdout=DEVNULL)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to mount %s filesystem" % directory)
|
||||
raise
|
||||
|
||||
|
||||
def umount(directory):
|
||||
"""Unmount a directory"""
|
||||
try:
|
||||
subprocess.check_call(["umount", directory], stdout=DEVNULL)
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to umount %s filesystem" % directory)
|
||||
raise
|
||||
|
||||
|
||||
def start_service(name):
|
||||
""" Start a systemd service """
|
||||
try:
|
||||
subprocess.check_call(["systemctl", "start", name], stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to start %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def stop_service(name):
|
||||
""" Stop a systemd service """
|
||||
try:
|
||||
subprocess.check_call(["systemctl", "stop", name], stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to stop %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def restart_service(name):
|
||||
""" Restart a systemd service """
|
||||
try:
|
||||
subprocess.check_call(["systemctl", "restart", name], stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to restart %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def start_lsb_service(name):
|
||||
""" Start a Linux Standard Base service """
|
||||
try:
|
||||
script = os.path.join("/etc/init.d", name)
|
||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
||||
subprocess.check_call([script, "start"],
|
||||
env=dict(os.environ,
|
||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
||||
stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to start %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def stop_lsb_service(name):
|
||||
""" Stop a Linux Standard Base service """
|
||||
try:
|
||||
script = os.path.join("/etc/init.d", name)
|
||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
||||
subprocess.check_call([script, "stop"],
|
||||
env=dict(os.environ,
|
||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
||||
stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to stop %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def restart_lsb_service(name):
|
||||
""" Restart a Linux Standard Base service """
|
||||
try:
|
||||
script = os.path.join("/etc/init.d", name)
|
||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
||||
subprocess.check_call([script, "restart"],
|
||||
env=dict(os.environ,
|
||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
||||
stdout=DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to restart %s service" % name)
|
||||
raise
|
||||
|
||||
|
||||
def check_sm_service(service, state):
|
||||
""" Check whether an SM service has the supplied state """
|
||||
try:
|
||||
output = subprocess.check_output(["sm-query", "service", service])
|
||||
return state in output
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def wait_sm_service(service, timeout=180):
|
||||
""" Check whether an SM service has been enabled.
|
||||
:param service: SM service name
|
||||
:param timeout: timeout in seconds
|
||||
:return True if the service is enabled, False otherwise
|
||||
"""
|
||||
for _ in xrange(timeout):
|
||||
if check_sm_service(service, 'enabled-active'):
|
||||
return True
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def is_active(service):
|
||||
""" Check whether an SM service is active """
|
||||
return check_sm_service(service, 'enabled-active')
|
||||
|
||||
|
||||
def get_controller_hostname():
|
||||
"""
|
||||
Get the hostname for this controller
|
||||
:return: controller hostname
|
||||
"""
|
||||
return socket.gethostname()
|
||||
|
||||
|
||||
def get_mate_controller_hostname():
|
||||
"""
|
||||
Get the hostname for the mate controller
|
||||
:return: mate controller hostname
|
||||
"""
|
||||
my_hostname = socket.gethostname()
|
||||
if my_hostname.endswith('-0'):
|
||||
postfix = '-1'
|
||||
elif my_hostname.endswith('-1'):
|
||||
postfix = '-0'
|
||||
else:
|
||||
raise Exception("Invalid controller hostname")
|
||||
return my_hostname.rsplit('-', 1)[0] + postfix
|
||||
|
||||
|
||||
def get_address_from_hosts_file(hostname):
|
||||
"""
|
||||
Get the IP address of a host from the /etc/hosts file
|
||||
:param hostname: hostname to look up
|
||||
:return: IP address of host
|
||||
"""
|
||||
hosts = open('/etc/hosts')
|
||||
for line in hosts:
|
||||
if line.strip() and line.split()[1] == hostname:
|
||||
return line.split()[0]
|
||||
raise Exception("Hostname %s not found in /etc/hosts" % hostname)
|
||||
|
||||
|
||||
def validate_and_normalize_mac(address):
|
||||
"""Validate a MAC address and return normalized form.
|
||||
|
||||
Checks whether the supplied MAC address is formally correct and
|
||||
normalize it to all lower case.
|
||||
|
||||
:param address: MAC address to be validated and normalized.
|
||||
:returns: Normalized and validated MAC address.
|
||||
:raises: InvalidMAC If the MAC address is not valid.
|
||||
|
||||
"""
|
||||
if not is_valid_mac(address):
|
||||
raise Exception("InvalidMAC %s" % address)
|
||||
return address.lower()
|
||||
|
||||
|
||||
def is_valid_ipv4(address):
|
||||
"""Verify that address represents a valid IPv4 address."""
|
||||
try:
|
||||
return netaddr.valid_ipv4(address)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_ipv6(address):
|
||||
try:
|
||||
return netaddr.valid_ipv6(address)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_ip(address):
|
||||
if not is_valid_ipv4(address):
|
||||
return is_valid_ipv6(address)
|
||||
return True
|
||||
|
||||
|
||||
def lag_mode_to_str(lag_mode):
|
||||
if lag_mode == 0:
|
||||
return "balance-rr"
|
||||
if lag_mode == 1:
|
||||
return "active-backup"
|
||||
elif lag_mode == 2:
|
||||
return "balance-xor"
|
||||
elif lag_mode == 3:
|
||||
return "broadcast"
|
||||
elif lag_mode == 4:
|
||||
return "802.3ad"
|
||||
elif lag_mode == 5:
|
||||
return "balance-tlb"
|
||||
elif lag_mode == 6:
|
||||
return "balance-alb"
|
||||
else:
|
||||
raise Exception(
|
||||
"Invalid LAG_MODE value of %d. Valid values: 0-6" % lag_mode)
|
||||
|
||||
|
||||
def is_combined_load():
|
||||
return 'compute' in tsconfig.subfunctions
|
||||
|
||||
|
||||
def get_system_type():
|
||||
if is_combined_load():
|
||||
return sysinv_constants.TIS_AIO_BUILD
|
||||
return sysinv_constants.TIS_STD_BUILD
|
||||
|
||||
|
||||
def get_security_profile():
|
||||
eprofile = sysinv_constants.SYSTEM_SECURITY_PROFILE_EXTENDED
|
||||
if tsconfig.security_profile == eprofile:
|
||||
return eprofile
|
||||
return sysinv_constants.SYSTEM_SECURITY_PROFILE_STANDARD
|
||||
|
||||
|
||||
def is_cpe():
|
||||
return get_system_type() == sysinv_constants.TIS_AIO_BUILD
|
||||
|
||||
|
||||
def get_interface_config_common(device, mtu=None):
|
||||
"""
|
||||
Return the interface configuration parameters that is common to all
|
||||
device types.
|
||||
"""
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['BOOTPROTO'] = 'none'
|
||||
parameters['ONBOOT'] = 'yes'
|
||||
parameters['DEVICE'] = device
|
||||
# Increased to accommodate devices that require more time to
|
||||
# complete link auto-negotiation
|
||||
parameters['LINKDELAY'] = '20'
|
||||
if mtu:
|
||||
parameters['MTU'] = mtu
|
||||
return parameters
|
||||
|
||||
|
||||
def get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway):
|
||||
"""
|
||||
Return the interface configuration parameters for all IPv4 static
|
||||
addressing.
|
||||
"""
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['IPADDR'] = ip_address
|
||||
parameters['NETMASK'] = ip_subnet.netmask
|
||||
parameters['BROADCAST'] = ip_subnet.broadcast
|
||||
if ip_gateway:
|
||||
parameters['GATEWAY'] = ip_gateway
|
||||
return parameters
|
||||
|
||||
|
||||
def get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway):
|
||||
"""
|
||||
Return the interface configuration parameters for all IPv6 static
|
||||
addressing.
|
||||
"""
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['IPV6INIT'] = 'yes'
|
||||
parameters['IPV6ADDR'] = netaddr.IPNetwork('%s/%u' % (ip_address,
|
||||
ip_subnet.prefixlen))
|
||||
if ip_gateway:
|
||||
parameters['IPV6_DEFAULTGW'] = ip_gateway
|
||||
return parameters
|
||||
|
||||
|
||||
def get_interface_config_static(ip_address, ip_subnet, ip_gateway=None):
|
||||
"""
|
||||
Return the interface configuration parameters for all IP static
|
||||
addressing.
|
||||
"""
|
||||
if netaddr.IPAddress(ip_address).version == 4:
|
||||
return get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway)
|
||||
else:
|
||||
return get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway)
|
||||
|
||||
|
||||
def write_interface_config_file(device, parameters):
|
||||
"""
|
||||
Write interface configuration parameters to the network scripts
|
||||
directory named after the supplied device.
|
||||
|
||||
:param device device name as str
|
||||
:param parameters dict of parameters
|
||||
"""
|
||||
filename = os.path.join(NETWORK_SCRIPTS_PATH, "%s-%s" %
|
||||
(NETWORK_SCRIPTS_PREFIX, device))
|
||||
try:
|
||||
with open(filename, 'w') as f:
|
||||
for parameter, value in parameters.items():
|
||||
f.write("%s=%s\n" % (parameter, str(value)))
|
||||
except IOError:
|
||||
LOG.error("Failed to create file: %s" % filename)
|
||||
raise
|
||||
|
||||
|
||||
def write_interface_config_ethernet(device, mtu=None, parameters=None):
|
||||
"""Write the interface configuration for an Ethernet device."""
|
||||
config = get_interface_config_common(device, mtu)
|
||||
if parameters:
|
||||
config.update(parameters)
|
||||
write_interface_config_file(device, config)
|
||||
|
||||
|
||||
def write_interface_config_vlan(device, mtu, parameters=None):
|
||||
"""Write the interface configuration for a VLAN device."""
|
||||
config = get_interface_config_vlan()
|
||||
if parameters:
|
||||
config.update(parameters)
|
||||
write_interface_config_ethernet(device, mtu, parameters=config)
|
||||
|
||||
|
||||
def write_interface_config_slave(device, master, parameters=None):
|
||||
"""Write the interface configuration for a bond slave device."""
|
||||
config = get_interface_config_slave(master)
|
||||
if parameters:
|
||||
config.update(parameters)
|
||||
write_interface_config_ethernet(device, parameters=config)
|
||||
|
||||
|
||||
def write_interface_config_bond(device, mtu, mode, txhash, miimon,
|
||||
member1, member2, parameters=None):
|
||||
"""Write the interface configuration for a bond master device."""
|
||||
config = get_interface_config_bond(mode, txhash, miimon)
|
||||
if parameters:
|
||||
config.update(parameters)
|
||||
write_interface_config_ethernet(device, mtu, parameters=config)
|
||||
|
||||
# create slave device configuration files
|
||||
if member1:
|
||||
write_interface_config_slave(member1, device)
|
||||
if member2:
|
||||
write_interface_config_slave(member2, device)
|
||||
|
||||
|
||||
def get_interface_config_vlan():
|
||||
"""
|
||||
Return the interface configuration parameters for all IP static
|
||||
addressing.
|
||||
"""
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['VLAN'] = 'yes'
|
||||
return parameters
|
||||
|
||||
|
||||
def get_interface_config_slave(master):
|
||||
"""
|
||||
Return the interface configuration parameters for bond interface
|
||||
slave devices.
|
||||
"""
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['MASTER'] = master
|
||||
parameters['SLAVE'] = 'yes'
|
||||
parameters['PROMISC'] = 'yes'
|
||||
return parameters
|
||||
|
||||
|
||||
def get_interface_config_bond(mode, txhash, miimon):
|
||||
"""
|
||||
Return the interface configuration parameters for bond interface
|
||||
master devices.
|
||||
"""
|
||||
options = "mode=%s miimon=%s" % (mode, miimon)
|
||||
|
||||
if txhash:
|
||||
options += " xmit_hash_policy=%s" % txhash
|
||||
|
||||
if mode == constants.LAG_MODE_8023AD:
|
||||
options += " lacp_rate=fast"
|
||||
|
||||
parameters = collections.OrderedDict()
|
||||
parameters['BONDING_OPTS'] = "\"%s\"" % options
|
||||
return parameters
|
||||
|
||||
|
||||
def remove_interface_config_files(stdout=None, stderr=None):
|
||||
"""
|
||||
Remove all existing interface configuration files.
|
||||
"""
|
||||
files = glob.glob1(NETWORK_SCRIPTS_PATH, "%s-*" % NETWORK_SCRIPTS_PREFIX)
|
||||
for file in [f for f in files if f != NETWORK_SCRIPTS_LOOPBACK]:
|
||||
ifname = file[len(NETWORK_SCRIPTS_PREFIX) + 1:] # remove prefix
|
||||
subprocess.check_call(["ifdown", ifname],
|
||||
stdout=stdout, stderr=stderr)
|
||||
os.remove(os.path.join(NETWORK_SCRIPTS_PATH, file))
|
||||
|
||||
|
||||
def remove_interface_ip_address(device, ip_address, ip_subnet,
|
||||
stdout=None, stderr=None):
|
||||
"""Remove an IP address from an interface"""
|
||||
subprocess.check_call(
|
||||
["ip", "addr", "del",
|
||||
str(ip_address) + "/" + str(ip_subnet.prefixlen),
|
||||
"dev", device],
|
||||
stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def send_interface_garp(device, ip_address, stdout=None, stderr=None):
|
||||
"""Send a GARP message for the supplied address"""
|
||||
subprocess.call(
|
||||
["arping", "-c", "3", "-A", "-q", "-I",
|
||||
device, str(ip_address)],
|
||||
stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def restart_networking(stdout=None, stderr=None):
|
||||
"""
|
||||
Restart networking services.
|
||||
"""
|
||||
# Kill any leftover dhclient process from the boot
|
||||
subprocess.call(["pkill", "dhclient"])
|
||||
|
||||
# remove any existing IP addresses
|
||||
ifs = glob.glob1('/sys/class/net', "*")
|
||||
for i in [i for i in ifs if i != LOOPBACK_IFNAME]:
|
||||
subprocess.call(
|
||||
["ip", "link", "set", "dev", i, "down"])
|
||||
subprocess.call(
|
||||
["ip", "addr", "flush", "dev", i])
|
||||
subprocess.call(
|
||||
["ip", "-6", "addr", "flush", "dev", i])
|
||||
|
||||
subprocess.check_call(["systemctl", "restart", "network"],
|
||||
stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def output_to_dict(output):
|
||||
dict = {}
|
||||
output = filter(None, output.split('\n'))
|
||||
|
||||
for row in output:
|
||||
values = row.split()
|
||||
if len(values) != 2:
|
||||
raise Exception("The following output does not respect the "
|
||||
"format: %s" % row)
|
||||
dict[values[1]] = values[0]
|
||||
|
||||
return dict
|
||||
|
||||
|
||||
def get_install_uuid():
|
||||
""" Get the install uuid from the feed directory. """
|
||||
uuid_fname = None
|
||||
try:
|
||||
uuid_dir = '/www/pages/feed/rel-' + tsconfig.SW_VERSION
|
||||
uuid_fname = os.path.join(uuid_dir, 'install_uuid')
|
||||
with open(uuid_fname, 'r') as uuid_file:
|
||||
install_uuid = uuid_file.readline().rstrip()
|
||||
except IOError:
|
||||
LOG.error("Failed to open file: %s", uuid_fname)
|
||||
raise Exception("Failed to retrieve install UUID")
|
||||
|
||||
return install_uuid
|
||||
|
||||
|
||||
def write_simplex_flag():
|
||||
""" Write simplex flag. """
|
||||
simplex_flag = "/etc/platform/simplex"
|
||||
try:
|
||||
open(simplex_flag, 'w')
|
||||
except IOError:
|
||||
LOG.error("Failed to open file: %s", simplex_flag)
|
||||
raise Exception("Failed to write configuration file")
|
||||
|
||||
|
||||
def create_manifest_runtime_config(filename, config):
|
||||
"""Write the runtime Puppet configuration to a runtime file."""
|
||||
if not config:
|
||||
return
|
||||
try:
|
||||
with open(filename, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
except Exception:
|
||||
LOG.exception("failed to write config file: %s" % filename)
|
||||
raise
|
||||
|
||||
|
||||
def apply_manifest(controller_address_0, personality, manifest, hieradata,
|
||||
stdout_progress=False, runtime_filename=None):
|
||||
"""Apply puppet manifest files."""
|
||||
|
||||
# FIXME(mpeters): remove once manifests and modules are not dependent
|
||||
# on checking the primary config condition
|
||||
os.environ["INITIAL_CONFIG_PRIMARY"] = "true"
|
||||
|
||||
cmd = [
|
||||
"/usr/local/bin/puppet-manifest-apply.sh",
|
||||
hieradata,
|
||||
str(controller_address_0),
|
||||
personality,
|
||||
manifest
|
||||
]
|
||||
|
||||
if runtime_filename:
|
||||
cmd.append((runtime_filename))
|
||||
|
||||
logfile = "/tmp/apply_manifest.log"
|
||||
try:
|
||||
with open(logfile, "w") as flog:
|
||||
subprocess.check_call(cmd, stdout=flog, stderr=flog)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "Failed to execute %s manifest" % manifest
|
||||
print msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def create_system_controller_config(filename):
|
||||
""" Create any additional parameters needed for system controller"""
|
||||
# set keystone endpoint region name and sysinv keystone authtoken
|
||||
# region name
|
||||
config = {
|
||||
'keystone::endpoint::region':
|
||||
sysinv_constants.SYSTEM_CONTROLLER_REGION,
|
||||
'sysinv::region_name':
|
||||
sysinv_constants.SYSTEM_CONTROLLER_REGION,
|
||||
}
|
||||
try:
|
||||
with open(filename, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
except Exception:
|
||||
LOG.exception("failed to write config file: %s" % filename)
|
||||
raise
|
||||
|
||||
|
||||
def create_static_config():
|
||||
cmd = ["/usr/bin/sysinv-puppet",
|
||||
"create-static-config",
|
||||
constants.HIERADATA_WORKDIR]
|
||||
try:
|
||||
os.makedirs(constants.HIERADATA_WORKDIR)
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "Failed to create puppet hiera static config"
|
||||
print msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def create_system_config():
|
||||
cmd = ["/usr/bin/sysinv-puppet",
|
||||
"create-system-config",
|
||||
constants.HIERADATA_PERMDIR]
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "Failed to update puppet hiera system config"
|
||||
print msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def create_host_config(hostname=None):
|
||||
cmd = ["/usr/bin/sysinv-puppet",
|
||||
"create-host-config",
|
||||
constants.HIERADATA_PERMDIR]
|
||||
if hostname:
|
||||
cmd.append(hostname)
|
||||
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = "Failed to update puppet hiera host config"
|
||||
print msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def shutdown_file_systems():
|
||||
""" Shutdown filesystems """
|
||||
|
||||
umount("/var/lib/postgresql")
|
||||
drbd_stop("drbd-pgsql")
|
||||
|
||||
umount("/opt/platform")
|
||||
drbd_stop("drbd-platform")
|
||||
|
||||
umount("/opt/cgcs")
|
||||
drbd_stop("drbd-cgcs")
|
||||
|
||||
umount("/opt/extension")
|
||||
drbd_stop("drbd-extension")
|
||||
|
||||
if os.path.exists("/opt/patch-vault"):
|
||||
umount("/opt/patch-vault")
|
||||
drbd_stop("drbd-patch-vault")
|
||||
|
||||
|
||||
def persist_config():
|
||||
"""Copy temporary config files into new DRBD filesystem"""
|
||||
|
||||
# Persist temporary keyring
|
||||
try:
|
||||
if os.path.isdir(constants.KEYRING_WORKDIR):
|
||||
shutil.move(constants.KEYRING_WORKDIR, constants.KEYRING_PERMDIR)
|
||||
except IOError:
|
||||
LOG.error("Failed to persist temporary keyring")
|
||||
raise Exception("Failed to persist temporary keyring")
|
||||
|
||||
# Move puppet working files into permanent directory
|
||||
try:
|
||||
# ensure parent directory is present
|
||||
subprocess.call(["mkdir", "-p", tsconfig.PUPPET_PATH])
|
||||
|
||||
# move hiera data to puppet directory
|
||||
if os.path.isdir(constants.HIERADATA_WORKDIR):
|
||||
subprocess.check_call(["mv", constants.HIERADATA_WORKDIR,
|
||||
tsconfig.PUPPET_PATH])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to persist puppet config files")
|
||||
raise Exception("Failed to persist puppet config files")
|
||||
|
||||
# Move config working files into permanent directory
|
||||
try:
|
||||
# ensure parent directory is present
|
||||
subprocess.call(["mkdir", "-p",
|
||||
os.path.dirname(constants.CONFIG_PERMDIR)])
|
||||
|
||||
if os.path.isdir(constants.CONFIG_WORKDIR):
|
||||
# Remove destination directory in case it was created previously
|
||||
subprocess.call(["rm", "-rf", constants.CONFIG_PERMDIR])
|
||||
|
||||
# move working data to config directory
|
||||
subprocess.check_call(["mv", constants.CONFIG_WORKDIR,
|
||||
constants.CONFIG_PERMDIR])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to persist config files")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
# Copy postgres config files for mate
|
||||
try:
|
||||
subprocess.check_call(["mkdir",
|
||||
constants.CONFIG_PERMDIR + "/postgresql"])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to create postgresql dir")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
try:
|
||||
for f in glob.glob("/etc/postgresql/*.conf"):
|
||||
subprocess.check_call([
|
||||
"cp", "-p", f, constants.CONFIG_PERMDIR + "/postgresql/"])
|
||||
except IOError:
|
||||
LOG.error("Failed to persist postgresql config files")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
# Set up replicated directory for PXE config files
|
||||
try:
|
||||
subprocess.check_call([
|
||||
"mkdir", "-p", constants.CONFIG_PERMDIR + "/pxelinux.cfg"])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to create persistent pxelinux.cfg directory")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
try:
|
||||
subprocess.check_call(["ln", "-s", constants.CONFIG_PERMDIR +
|
||||
"/pxelinux.cfg", "/pxeboot/pxelinux.cfg"])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to create pxelinux.cfg symlink")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
# Copy branding tarball for mate
|
||||
if os.listdir('/opt/branding'):
|
||||
try:
|
||||
subprocess.check_call([
|
||||
"mkdir", constants.CONFIG_PERMDIR + "/branding"])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to create branding dir")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
try:
|
||||
if os.path.isfile(
|
||||
'/opt/branding/horizon-region-exclusions.csv'):
|
||||
subprocess.check_call(
|
||||
["cp", "-p",
|
||||
'/opt/branding/horizon-region-exclusions.csv',
|
||||
constants.CONFIG_PERMDIR + "/branding/"])
|
||||
except IOError:
|
||||
LOG.error("Failed to persist horizon exclusion file")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
try:
|
||||
for f in glob.glob("/opt/branding/*.tgz"):
|
||||
subprocess.check_call([
|
||||
"cp", "-p", f, constants.CONFIG_PERMDIR + "/branding/"])
|
||||
break
|
||||
except IOError:
|
||||
LOG.error("Failed to persist branding config files")
|
||||
raise Exception("Failed to persist config files")
|
||||
|
||||
|
||||
def apply_banner_customization():
|
||||
""" Apply and Install banners provided by the user """
|
||||
""" execute: /usr/sbin/apply_banner_customization """
|
||||
logfile = "/tmp/apply_banner_customization.log"
|
||||
try:
|
||||
with open(logfile, "w") as blog:
|
||||
subprocess.check_call(["/usr/sbin/apply_banner_customization",
|
||||
"/opt/banner"],
|
||||
stdout=blog, stderr=blog)
|
||||
except subprocess.CalledProcessError:
|
||||
error_text = "Failed to apply banner customization"
|
||||
print "%s; see %s for detail" % (error_text, logfile)
|
||||
|
||||
|
||||
def mtce_restart():
|
||||
"""Restart maintenance processes to handle interface changes"""
|
||||
restart_service("mtcClient")
|
||||
restart_service("hbsClient")
|
||||
restart_service("rmon")
|
||||
restart_service("pmon")
|
||||
|
||||
|
||||
def mark_config_complete():
|
||||
"""Signal initial configuration has been completed"""
|
||||
try:
|
||||
subprocess.check_call(["touch",
|
||||
constants.INITIAL_CONFIG_COMPLETE_FILE])
|
||||
subprocess.call(["rm", "-rf", constants.KEYRING_WORKDIR])
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to mark initial config complete")
|
||||
raise Exception("Failed to mark initial config complete")
|
||||
|
||||
|
||||
def configure_hostname(hostname):
|
||||
"""Configure hostname for this host."""
|
||||
|
||||
hostname_file = '/etc/hostname'
|
||||
try:
|
||||
with open(hostname_file, 'w') as f:
|
||||
f.write(hostname + "\n")
|
||||
except IOError:
|
||||
LOG.error("Failed to update file: %s", hostname_file)
|
||||
raise Exception("Failed to configure hostname")
|
||||
|
||||
try:
|
||||
subprocess.check_call(["hostname", hostname])
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Failed to update hostname %s" % hostname)
|
||||
raise Exception("Failed to configure hostname")
|
||||
|
||||
|
||||
def progress(steps, step, action, result, newline=False):
|
||||
"""Display progress."""
|
||||
if steps == 0:
|
||||
hashes = 45
|
||||
percentage = 100
|
||||
else:
|
||||
hashes = (step * 45) / steps
|
||||
percentage = (step * 100) / steps
|
||||
|
||||
sys.stdout.write("\rStep {0:{width}d} of {1:d} [{2:45s}] "
|
||||
"[{3:d}%]".format(min(step, steps), steps,
|
||||
'#' * hashes, percentage,
|
||||
width=len(str(steps))))
|
||||
if step == steps or newline:
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def touch(fname):
|
||||
with open(fname, 'a'):
|
||||
os.utime(fname, None)
|
||||
217
controllerconfig/controllerconfig/pylint.rc
Executable file
217
controllerconfig/controllerconfig/pylint.rc
Executable file
@@ -0,0 +1,217 @@
|
||||
[MASTER]
|
||||
# Specify a configuration file.
|
||||
rcfile=pylint.rc
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not paths.
|
||||
ignore=tests
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time.
|
||||
#enable=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once).
|
||||
# https://pylint.readthedocs.io/en/latest/user_guide/output.html#source-code-analysis-section
|
||||
# We are disabling (C)onvention
|
||||
# We are disabling (R)efactor
|
||||
# We are probably disabling (W)arning
|
||||
# We are not disabling (F)atal, (E)rror
|
||||
disable=C, R, W
|
||||
|
||||
|
||||
[REPORTS]
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html
|
||||
output-format=text
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
|
||||
[FORMAT]
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=85
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 tab).
|
||||
indent-string=' '
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set).
|
||||
ignored-classes=SQLObject
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=REQUEST,acl_users,aq_parent
|
||||
|
||||
|
||||
[BASIC]
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,apply,input
|
||||
|
||||
# Regular expression which should only match correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression which should only match correct module level names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression which should only match correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression which should only match correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct instance attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct list comprehension /
|
||||
# generator expression variable names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Regular expression which should only match functions or classes name which do
|
||||
# not require a docstring
|
||||
no-docstring-rgx=__.*__
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the beginning of the name of dummy variables
|
||||
# (i.e. not used).
|
||||
dummy-variables-rgx=_|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[DESIGN]
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branchs=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
|
||||
[CLASSES]
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
||||
11
controllerconfig/controllerconfig/requirements.txt
Normal file
11
controllerconfig/controllerconfig/requirements.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
# Getting values from https://github.com/openstack/requirements/blob/stable/pike/global-requirements.txt
|
||||
netaddr>=0.7.13,!=0.7.16 # BSD
|
||||
keyring>=5.5.1 # MIT/PSF
|
||||
pyudev # LGPLv2.1+
|
||||
psycopg2>=2.5 # LGPL/ZPL
|
||||
six>=1.9.0 # MIT
|
||||
iso8601>=0.1.11 # MIT
|
||||
netifaces>=0.10.4 # MIT
|
||||
pycrypto>=2.6 # Public Domain
|
||||
oslo.utils>=3.20.0 # Apache-2.0
|
||||
PyYAML>=3.1.0
|
||||
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Sample upgrade migration script. Important notes:
|
||||
# - The script should exit 0 on success and exit non-0 on fail. Note that
|
||||
# failing will result in the upgrade of controller-1 failing, so don't fail
|
||||
# unless it is a real failure.
|
||||
# - Your logic should only check the FROM_RELEASE to determine if migration is
|
||||
# required. Checking the TO_RELEASE is dangerous because we do not know
|
||||
# the exact value the TO_RELEASE will hold until we reach final compile.
|
||||
# The TO_RELEASE is here for logging reasons and in case of some unexpected
|
||||
# emergency where we may need it.
|
||||
# - The script will be passed one of the following actions:
|
||||
# start: Prepare for upgrade on release N side. Called during
|
||||
# "system upgrade-start".
|
||||
# migrate: Perform data migration on release N+1 side. Called while
|
||||
# controller-1 is performing its upgrade. At this point in the
|
||||
# upgrade of controller-1, the databases have been migrated from
|
||||
# release N to release N+1 (data migration scripts have been
|
||||
# run). Postgres is running and is using the release N+1
|
||||
# databases. The platform filesystem is mounted at /opt/platform
|
||||
# and has data populated for both release N and release N+1.
|
||||
# - We do the migration work here in the python script. This is the format we
|
||||
# use when we need to connect to the postgres database. This format makes
|
||||
# manipulating the data easier and gives more details when error handling.
|
||||
# - The migration scripts are executed in alphabetical order. Please prefix
|
||||
# your script name with a two digit number (e.g. 01-my-script-name.sh). The
|
||||
# order of migrations usually shouldn't matter, so pick an unused number
|
||||
# near the middle of the range.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
LOG.info("performing sample migration from release %s to %s with "
|
||||
"action: %s" % (from_release, to_release, action))
|
||||
do_migration_work()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
# Rename this function to something relevant
|
||||
def do_migration_work():
|
||||
""" This is a sample upgrade action."""
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select * from i_system;")
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("Failed to fetch i_system data")
|
||||
raise
|
||||
LOG.info("Got system version: %s during sample migration script"
|
||||
% row.get('software_version'))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Sample upgrade migration script. Important notes:
|
||||
# - The script should exit 0 on success and exit non-0 on fail. Note that
|
||||
# failing will result in the upgrade of controller-1 failing, so don't fail
|
||||
# unless it is a real failure.
|
||||
# - Your logic should only check the FROM_RELEASE to determine if migration is
|
||||
# required. Checking the TO_RELEASE is dangerous because we do not know
|
||||
# the exact value the TO_RELEASE will hold until we reach final compile.
|
||||
# The TO_RELEASE is here for logging reasons and in case of some unexpected
|
||||
# emergency where we may need it.
|
||||
# - The script will be passed one of the following actions:
|
||||
# start: Prepare for upgrade on release N side. Called during
|
||||
# "system upgrade-start".
|
||||
# migrate: Perform data migration on release N+1 side. Called while
|
||||
# controller-1 is performing its upgrade. At this point in the
|
||||
# upgrade of controller-1, the databases have been migrated from
|
||||
# release N to release N+1 (data migration scripts have been
|
||||
# run). Postgres is running and is using the release N+1
|
||||
# databases. The platform filesystem is mounted at /opt/platform
|
||||
# and has data populated for both release N and release N+1.
|
||||
# - You can do the migration work here in a bash script. There are other
|
||||
# options:
|
||||
# - Invoke another binary from this script to do the migration work.
|
||||
# - Instead of using a bash script, create a symlink in this directory, to
|
||||
# a binary of your choice.
|
||||
# - The migration scripts are executed in alphabetical order. Please prefix
|
||||
# your script name with a two digit number (e.g. 01-my-script-name.sh). The
|
||||
# order of migrations usually shouldn't matter, so pick an unused number
|
||||
# near the middle of the range.
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
# The migration scripts are passed these parameters:
|
||||
FROM_RELEASE=$1
|
||||
TO_RELEASE=$2
|
||||
ACTION=$3
|
||||
|
||||
# This will log to /var/log/platform.log
|
||||
function log {
|
||||
logger -p local1.info $1
|
||||
}
|
||||
|
||||
log "$NAME: performing sample migration from release $FROM_RELEASE to $TO_RELEASE with action $ACTION"
|
||||
|
||||
|
||||
if [ "$FROM_RELEASE" == "17.06" ] && [ "$ACTION" == "migrate" ]
|
||||
then
|
||||
log "Sample migration from release $FROM_RELEASE"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Configuration "goenabled" check.
|
||||
# If configuration failed, prevent the node from going enabled.
|
||||
|
||||
NAME=$(basename $0)
|
||||
VOLATILE_CONFIG_FAIL="/var/run/.config_fail"
|
||||
|
||||
logfile=/var/log/patching.log
|
||||
|
||||
if [ -f $VOLATILE_CONFIG_FAIL ]
|
||||
then
|
||||
logger "$NAME: Node configuration has failed. Failing goenabled check."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
461
controllerconfig/controllerconfig/scripts/controller_config
Executable file
461
controllerconfig/controllerconfig/scripts/controller_config
Executable file
@@ -0,0 +1,461 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2013-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# chkconfig: 2345 80 80
|
||||
#
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: controller_config
|
||||
# Short-Description: Controller node config agent
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
### END INIT INFO
|
||||
|
||||
. /usr/bin/tsconfig
|
||||
. /etc/platform/platform.conf
|
||||
|
||||
PLATFORM_DIR=/opt/platform
|
||||
VAULT_DIR=$PLATFORM_DIR/.keyring/${SW_VERSION}/python_keyring
|
||||
CONFIG_DIR=$CONFIG_PATH
|
||||
VOLATILE_CONFIG_PASS="/var/run/.config_pass"
|
||||
VOLATILE_CONFIG_FAIL="/var/run/.config_fail"
|
||||
COMPLETED="/etc/platform/.initial_config_complete"
|
||||
INITIAL_MANIFEST_APPLY_FAILED="/etc/platform/.initial_manifest_apply_failed"
|
||||
DELAY_SEC=70
|
||||
CONTROLLER_UPGRADE_STARTED_FILE="$(basename ${CONTROLLER_UPGRADE_STARTED_FLAG})"
|
||||
PUPPET_DOWNLOAD=/tmp/puppet.download
|
||||
IMA_POLICY=/etc/ima.policy
|
||||
|
||||
fatal_error()
|
||||
{
|
||||
cat <<EOF
|
||||
*****************************************************
|
||||
*****************************************************
|
||||
$1
|
||||
*****************************************************
|
||||
*****************************************************
|
||||
EOF
|
||||
# Don't set the .config_fail flag if the config
|
||||
# complete flag is not set first.
|
||||
if [ -e $COMPLETED ]
|
||||
then
|
||||
touch $VOLATILE_CONFIG_FAIL
|
||||
fi
|
||||
|
||||
if [ -e /usr/bin/logger ]
|
||||
then
|
||||
logger "Error: $1"
|
||||
fi
|
||||
|
||||
echo "Pausing for 5 seconds..."
|
||||
sleep 5
|
||||
|
||||
if [ -d ${PUPPET_DOWNLOAD} ]; then
|
||||
rm -rf ${PUPPET_DOWNLOAD}
|
||||
fi
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
exit_error()
|
||||
{
|
||||
cat <<EOF
|
||||
*****************************************************
|
||||
*****************************************************
|
||||
$1
|
||||
*****************************************************
|
||||
*****************************************************
|
||||
EOF
|
||||
if [ -e /usr/bin/logger ]
|
||||
then
|
||||
logger "Exit error: $1"
|
||||
fi
|
||||
|
||||
echo "Pausing for 5 seconds..."
|
||||
sleep 5
|
||||
exit 1
|
||||
}
|
||||
|
||||
get_ip()
|
||||
{
|
||||
local host=$1
|
||||
|
||||
# Check /etc/hosts for the hostname
|
||||
local ipaddr=$(cat /etc/hosts | awk -v host=$host '$2 == host {print $1}')
|
||||
if [ -n "$ipaddr" ]
|
||||
then
|
||||
echo $ipaddr
|
||||
return
|
||||
fi
|
||||
|
||||
# Try the DNS query
|
||||
# Because dnsmasq can resolve both a hostname to both an IPv4 and an IPv6
|
||||
# address in certain situations, and the last address is the IPv6, which
|
||||
# would be the management, this is preferred over the IPv4 pxeboot address,
|
||||
# so take the last address only.
|
||||
ipaddr=$(dig +short ANY $host|tail -1)
|
||||
if [[ "$ipaddr" =~ ^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$ ]]
|
||||
then
|
||||
echo $ipaddr
|
||||
return
|
||||
fi
|
||||
if [[ "$ipaddr" =~ ^[0-9a-z]*\:[0-9a-z\:]*$ ]]
|
||||
then
|
||||
echo $ipaddr
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
mount_platform_dir()
|
||||
{
|
||||
if [ -e "${PLATFORM_SIMPLEX_FLAG}" ]
|
||||
then
|
||||
systemctl start drbd.service
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to start drbd.service"
|
||||
fi
|
||||
|
||||
# The drbd-platform FS may already be "up", so we won't check for errors
|
||||
drbdadm up drbd-platform 2>/dev/null
|
||||
|
||||
drbdadm primary drbd-platform
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
drbdadm down drbd-platform
|
||||
systemctl stop drbd.service
|
||||
fatal_error "Failed to make drbd-platform primary"
|
||||
fi
|
||||
|
||||
mount $PLATFORM_DIR
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
drbdadm secondary drbd-platform
|
||||
drbdadm down drbd-platform
|
||||
systemctl stop drbd.service
|
||||
fatal_error "Unable to mount $PLATFORM_DIR"
|
||||
fi
|
||||
else
|
||||
mkdir -p $PLATFORM_DIR
|
||||
nfs-mount controller-platform-nfs:$PLATFORM_DIR $PLATFORM_DIR
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to mount $PLATFORM_DIR"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
umount_platform_dir()
|
||||
{
|
||||
if [ -e "${PLATFORM_SIMPLEX_FLAG}" ]
|
||||
then
|
||||
umount $PLATFORM_DIR
|
||||
drbdadm secondary drbd-platform
|
||||
drbdadm down drbd-platform
|
||||
systemctl stop drbd.service
|
||||
else
|
||||
umount $PLATFORM_DIR
|
||||
fi
|
||||
}
|
||||
|
||||
start()
|
||||
{
|
||||
if [ -f /etc/platform/installation_failed ] ; then
|
||||
fatal_error "/etc/platform/installation_failed flag is set. Aborting."
|
||||
fi
|
||||
|
||||
###### SECURITY PROFILE (EXTENDED) #################
|
||||
# If we are in Extended Security Profile mode, #
|
||||
# then before anything else, we need to load the #
|
||||
# IMA Policy so that all configuration operations #
|
||||
# can be measured and appraised #
|
||||
#####################################################
|
||||
if [ "${security_profile}" = "extended" ]
|
||||
then
|
||||
IMA_LOAD_PATH=/sys/kernel/security/ima/policy
|
||||
if [ -f ${IMA_LOAD_PATH} ]; then
|
||||
echo "Loading IMA Policy"
|
||||
# Best effort operation only, if policy is
|
||||
# malformed then audit logs will indicate this,
|
||||
# and customer will need to load policy manually
|
||||
cat $IMA_POLICY > ${IMA_LOAD_PATH}
|
||||
[ $? -eq 0 ] || logger -t $0 -p warn "IMA Policy could not be loaded, see audit.log"
|
||||
else
|
||||
# the securityfs mount should have been
|
||||
# created had the IMA module loaded properly.
|
||||
# This is therefore a fatal error
|
||||
fatal_error "${IMA_LOAD_PATH} not available. Aborting."
|
||||
fi
|
||||
fi
|
||||
|
||||
# If hostname is undefined or localhost, something is wrong
|
||||
HOST=$(hostname)
|
||||
if [ -z "$HOST" -o "$HOST" = "localhost" ]
|
||||
then
|
||||
fatal_error "Host undefined. Unable to perform config"
|
||||
fi
|
||||
|
||||
if [ $HOST != "controller-0" -a $HOST != "controller-1" ]
|
||||
then
|
||||
fatal_error "Invalid hostname for controller node: $HOST"
|
||||
fi
|
||||
|
||||
IPADDR=$(get_ip $HOST)
|
||||
if [ -z "$IPADDR" ]
|
||||
then
|
||||
fatal_error "Unable to get IP from host: $HOST"
|
||||
fi
|
||||
|
||||
if [ -f ${INITIAL_MANIFEST_APPLY_FAILED} ]
|
||||
then
|
||||
fatal_error "Initial manifest application failed; Host must be re-installed."
|
||||
fi
|
||||
|
||||
echo "Configuring controller node..."
|
||||
|
||||
if [ ! -e "${PLATFORM_SIMPLEX_FLAG}" ]
|
||||
then
|
||||
# try for DELAY_SEC seconds to reach controller-platform-nfs
|
||||
/usr/local/bin/connectivity_test -t ${DELAY_SEC} -i ${IPADDR} controller-platform-nfs
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
# 'controller-platform-nfs' is not available, just exit
|
||||
exit_error "Unable to contact active controller (controller-platform-nfs). Boot will continue."
|
||||
fi
|
||||
|
||||
# Check whether our installed load matches the active controller
|
||||
CONTROLLER_UUID=`curl -sf http://controller/feed/rel-${SW_VERSION}/install_uuid`
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to retrieve installation uuid from active controller"
|
||||
fi
|
||||
INSTALL_UUID=`cat /www/pages/feed/rel-${SW_VERSION}/install_uuid`
|
||||
if [ "$INSTALL_UUID" != "$CONTROLLER_UUID" ]
|
||||
then
|
||||
fatal_error "This node is running a different load than the active controller and must be reinstalled"
|
||||
fi
|
||||
fi
|
||||
|
||||
mount_platform_dir
|
||||
|
||||
# Cleanup from any previous config runs
|
||||
if [ -e $VOLATILE_CONFIG_FAIL ]
|
||||
then
|
||||
rm -f $VOLATILE_CONFIG_FAIL
|
||||
fi
|
||||
if [ -e $VOLATILE_CONFIG_PASS ]
|
||||
then
|
||||
rm -f $VOLATILE_CONFIG_PASS
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/server-cert.pem ]
|
||||
then
|
||||
cp $CONFIG_DIR/server-cert.pem /etc/ssl/private/server-cert.pem
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/server-cert.pem"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/iptables.rules ]
|
||||
then
|
||||
cp $CONFIG_DIR/iptables.rules /etc/platform/iptables.rules
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/iptables.rules"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Keep the /opt/branding directory to preserve any new files and explicitly copy over any required files
|
||||
if [ -e $CONFIG_DIR/branding/horizon-region-exclusions.csv ]
|
||||
then
|
||||
cp $CONFIG_DIR/branding/horizon-region-exclusions.csv /opt/branding
|
||||
fi
|
||||
rm -rf /opt/branding/*.tgz
|
||||
cp $CONFIG_DIR/branding/*.tgz /opt/branding 2>/dev/null
|
||||
|
||||
# banner customization always returns 0, success:
|
||||
/usr/sbin/install_banner_customization
|
||||
|
||||
cp $CONFIG_DIR/hosts /etc/hosts
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/hosts"
|
||||
fi
|
||||
|
||||
hostname > /etc/hostname
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to write /etc/hostname"
|
||||
fi
|
||||
|
||||
# Our PXE config files are located in the config directory. Create a
|
||||
# symbolic link if it is not already created.
|
||||
if [ ! -L /pxeboot/pxelinux.cfg ]
|
||||
then
|
||||
ln -sf $CONFIG_DIR/pxelinux.cfg /pxeboot/pxelinux.cfg
|
||||
fi
|
||||
|
||||
# Upgrade related checks
|
||||
if [ ! -e "${PLATFORM_SIMPLEX_FLAG}" ]
|
||||
then
|
||||
VOLATILE_ETC_PLATFORM_MOUNT=$VOLATILE_PATH/etc_platform
|
||||
mkdir $VOLATILE_ETC_PLATFORM_MOUNT
|
||||
nfs-mount controller-platform-nfs:/etc/platform $VOLATILE_ETC_PLATFORM_MOUNT
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
# Generate Rollback flag if necessary
|
||||
if [ -f $VOLATILE_ETC_PLATFORM_MOUNT/.upgrade_rollback ]
|
||||
then
|
||||
touch $UPGRADE_ROLLBACK_FLAG
|
||||
fi
|
||||
# Check whether we are upgrading controller-1.
|
||||
UPGRADE_CONTROLLER=0
|
||||
if [ -f $VOLATILE_ETC_PLATFORM_MOUNT/.upgrade_controller_1 ]
|
||||
then
|
||||
if [ -f $VOLATILE_ETC_PLATFORM_MOUNT/.upgrade_controller_1_fail ]
|
||||
then
|
||||
exit_error "Controller-1 upgrade previously failed. Upgrade must be aborted."
|
||||
fi
|
||||
|
||||
if [ -f $VOLATILE_ETC_PLATFORM_MOUNT/$CONTROLLER_UPGRADE_STARTED_FILE ]
|
||||
then
|
||||
touch $VOLATILE_ETC_PLATFORM_MOUNT/.upgrade_controller_1_fail
|
||||
exit_error "Controller-1 data migration already in progress. Upgrade must be aborted"
|
||||
fi
|
||||
|
||||
touch $VOLATILE_ETC_PLATFORM_MOUNT/$CONTROLLER_UPGRADE_STARTED_FILE
|
||||
|
||||
UPGRADE_CONTROLLER=1
|
||||
fi
|
||||
# Check whether software versions match on the two controllers
|
||||
MATE_SW_VERSION=`grep sw_version $VOLATILE_ETC_PLATFORM_MOUNT/platform.conf | awk -F\= '{print $2}'`
|
||||
if [ $SW_VERSION != $MATE_SW_VERSION ]
|
||||
then
|
||||
echo "Controllers are running different software versions"
|
||||
echo "SW_VERSION: $SW_VERSION MATE_SW_VERSION: $MATE_SW_VERSION"
|
||||
# This environment variable allows puppet manifests to behave
|
||||
# differently when the controller software versions do not match.
|
||||
export CONTROLLER_SW_VERSIONS_MISMATCH=true
|
||||
fi
|
||||
umount $VOLATILE_ETC_PLATFORM_MOUNT
|
||||
rmdir $VOLATILE_ETC_PLATFORM_MOUNT
|
||||
|
||||
if [ $UPGRADE_CONTROLLER -eq 1 ]
|
||||
then
|
||||
#R3 Removed
|
||||
umount_platform_dir
|
||||
echo "Upgrading controller-1. This will take some time..."
|
||||
/usr/bin/upgrade_controller $MATE_SW_VERSION $SW_VERSION
|
||||
exit $?
|
||||
fi
|
||||
else
|
||||
umount_platform_dir
|
||||
rmdir $VOLATILE_ETC_PLATFORM_MOUNT
|
||||
fatal_error "Unable to mount /etc/platform"
|
||||
fi
|
||||
fi
|
||||
|
||||
mkdir -p /etc/postgresql/
|
||||
cp -p $CONFIG_DIR/postgresql/*.conf /etc/postgresql/
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy .conf files to /etc/postgresql"
|
||||
fi
|
||||
|
||||
# Copy the hieradata and the staging secured vault
|
||||
|
||||
rm -rf ${PUPPET_DOWNLOAD}
|
||||
cp -R $PUPPET_PATH ${PUPPET_DOWNLOAD}
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
umount_platform_dir
|
||||
fatal_error "Failed to copy puppet directory $PUPPET_PATH"
|
||||
fi
|
||||
|
||||
cp -RL $VAULT_DIR /tmp
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
umount_platform_dir
|
||||
fatal_error "Failed to copy vault directory $VAULT_DIR"
|
||||
fi
|
||||
|
||||
# Unmount
|
||||
umount_platform_dir
|
||||
|
||||
# Apply the puppet manifest
|
||||
HOST_HIERA=${PUPPET_DOWNLOAD}/hieradata/${IPADDR}.yaml
|
||||
if [ -f ${HOST_HIERA} ]; then
|
||||
echo "$0: Running puppet manifest apply"
|
||||
puppet-manifest-apply.sh ${PUPPET_DOWNLOAD}/hieradata ${IPADDR} controller
|
||||
RC=$?
|
||||
if [ $RC -ne 0 ];
|
||||
then
|
||||
fatal_error "Failed to run the puppet manifest (RC:$RC)"
|
||||
if [ ! -f ${COMPLETED} ]
|
||||
then
|
||||
# The initial manifest application failed. We need to remember
|
||||
# this so we don't attempt to reapply them after a reboot.
|
||||
# Many of our manifests do not support being run more than
|
||||
# once with the $COMPLETED flag unset.
|
||||
touch $INITIAL_MANIFEST_APPLY_FAILED
|
||||
fatal_error "Failed to run the puppet manifest (RC:$RC); Host must be re-installed."
|
||||
else
|
||||
fatal_error "Failed to run the puppet manifest (RC:$RC)"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
fatal_error "Host configuration not yet available for this node ($(hostname)=${IPADDR}); aborting configuration."
|
||||
fi
|
||||
|
||||
# Cleanup ${PUPPET_DOWNLOAD} and the secured vault
|
||||
rm -rf ${PUPPET_DOWNLOAD}
|
||||
rm -rf /tmp/python_keyring
|
||||
|
||||
if [ ! -e "${PLATFORM_SIMPLEX_FLAG}" ]
|
||||
then
|
||||
# The second controller is now configured - remove the simplex flag on
|
||||
# the mate controller.
|
||||
mkdir /tmp/mateflag
|
||||
nfs-mount controller-platform-nfs:/etc/platform /tmp/mateflag
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
rm -f /tmp/mateflag/simplex
|
||||
umount /tmp/mateflag
|
||||
rmdir /tmp/mateflag
|
||||
else
|
||||
echo "Unable to mount /etc/platform"
|
||||
fi
|
||||
fi
|
||||
|
||||
touch $COMPLETED
|
||||
touch $VOLATILE_CONFIG_PASS
|
||||
|
||||
}
|
||||
|
||||
stop ()
|
||||
{
|
||||
# Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=controllerconfig service
|
||||
After=syslog.target network.target remote-fs.target sw-patch.service sysinv-agent.service
|
||||
After=network-online.target
|
||||
Before=config.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/etc/init.d/controller_config start
|
||||
ExecStop=
|
||||
ExecReload=
|
||||
StandardOutput=syslog+console
|
||||
StandardError=syslog+console
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,42 @@
|
||||
#! /bin/bash
|
||||
########################################################################
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
########################################################################
|
||||
|
||||
NOVAOPENRC="/etc/nova/openrc"
|
||||
if [ -e ${NOVAOPENRC} ] ; then
|
||||
source ${NOVAOPENRC} &>/dev/null
|
||||
else
|
||||
echo "Admin credentials not found"
|
||||
exit
|
||||
fi
|
||||
|
||||
# Delete all the servers
|
||||
echo "Deleting all servers [`openstack server list --all`]"
|
||||
found=false
|
||||
for i in $(openstack server list --all -c ID -f value); do
|
||||
`openstack server delete $i &> /dev/null`
|
||||
echo $i deleted
|
||||
found=true
|
||||
done
|
||||
if $found; then
|
||||
sleep 30
|
||||
fi
|
||||
echo "Deleted all servers [`openstack server list --all`]"
|
||||
# Delete all the volumes
|
||||
echo "Deleting all volumes [`openstack volume list --all`]"
|
||||
found=false
|
||||
for i in $(openstack volume list --all -c ID -f value); do
|
||||
`openstack volume delete $i &> /dev/null`
|
||||
echo $i deleted
|
||||
found=true
|
||||
done
|
||||
if $found; then
|
||||
sleep 30
|
||||
fi
|
||||
echo "Deleted all volumes [`openstack volume list --all`]"
|
||||
|
||||
321
controllerconfig/controllerconfig/scripts/install_clone.py
Executable file
321
controllerconfig/controllerconfig/scripts/install_clone.py
Executable file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
import shutil
|
||||
import tempfile
|
||||
import subprocess
|
||||
import ConfigParser
|
||||
|
||||
import tsconfig.tsconfig as tsconfig
|
||||
from controllerconfig.common import log
|
||||
import controllerconfig.utils as utils
|
||||
import controllerconfig.sysinv_api as sysinv
|
||||
import controllerconfig.backup_restore as backup_restore
|
||||
import controllerconfig.clone as clone
|
||||
from controllerconfig.common.exceptions import CloneFail
|
||||
from sysinv.common import constants as si_const
|
||||
|
||||
LOG = log.get_logger("cloning")
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
INI_FILE = os.path.join("/", clone.CLONE_ARCHIVE_DIR, clone.CLONE_ISO_INI)
|
||||
SECTION = "clone_iso"
|
||||
parser = ConfigParser.SafeConfigParser()
|
||||
clone_name = ""
|
||||
|
||||
|
||||
def console_log(str, err=False):
|
||||
""" Log onto console also """
|
||||
if err:
|
||||
str = "Failed to install clone-image. " + str
|
||||
LOG.error(str)
|
||||
else:
|
||||
LOG.info(str)
|
||||
print("\n" + str)
|
||||
|
||||
|
||||
def persist(key, value):
|
||||
""" Write into ini file """
|
||||
parser.set(SECTION, key, value)
|
||||
with open(INI_FILE, 'w') as f:
|
||||
parser.write(f)
|
||||
|
||||
|
||||
def set_result(value):
|
||||
""" Set the result of installation of clone image """
|
||||
persist(clone.RESULT, value)
|
||||
persist(clone.INSTALLED, time.strftime("%Y-%m-%d %H:%M:%S %Z"))
|
||||
|
||||
|
||||
def validate_hardware_compatibility():
|
||||
""" validate if cloned-image can be installed on this h/w """
|
||||
valid = True
|
||||
disk_paths = ""
|
||||
if parser.has_option(SECTION, "disks"):
|
||||
disk_paths = parser.get(SECTION, "disks")
|
||||
if not disk_paths:
|
||||
console_log("Missing value [disks] in ini file")
|
||||
valid = False
|
||||
for d in disk_paths.split():
|
||||
disk_path, size = d.split('#')
|
||||
if os.path.exists('/dev/disk/by-path/' + disk_path):
|
||||
LOG.info("Disk [{}] exists".format(disk_path))
|
||||
disk_size = clone.get_disk_size('/dev/disk/by-path/' +
|
||||
disk_path)
|
||||
if int(disk_size) >= int(size):
|
||||
LOG.info("Disk size is good: {} >= {}"
|
||||
.format(utils.print_bytes(int(disk_size)),
|
||||
utils.print_bytes(int(size))))
|
||||
else:
|
||||
console_log("Not enough disk size[{}], "
|
||||
"found:{} looking_for:{}".format(
|
||||
disk_path, utils.print_bytes(int(disk_size)),
|
||||
utils.print_bytes(int(size))), err=True)
|
||||
valid = False
|
||||
else:
|
||||
console_log("Disk [{}] does not exist!"
|
||||
.format(disk_path), err=True)
|
||||
valid = False
|
||||
|
||||
interfaces = ""
|
||||
if parser.has_option(SECTION, "interfaces"):
|
||||
interfaces = parser.get(SECTION, "interfaces")
|
||||
if not interfaces:
|
||||
console_log("Missing value [interfaces] in ini file")
|
||||
valid = False
|
||||
for f in interfaces.split():
|
||||
if os.path.exists('/sys/class/net/' + f):
|
||||
LOG.info("Interface [{}] exists".format(f))
|
||||
else:
|
||||
console_log("Interface [{}] does not exist!"
|
||||
.format(f), err=True)
|
||||
valid = False
|
||||
|
||||
maxcpuid = ""
|
||||
if parser.has_option(SECTION, "cpus"):
|
||||
maxcpuid = parser.get(SECTION, "cpus")
|
||||
if not maxcpuid:
|
||||
console_log("Missing value [cpus] in ini file")
|
||||
valid = False
|
||||
else:
|
||||
my_maxcpuid = clone.get_online_cpus()
|
||||
if int(maxcpuid) <= int(my_maxcpuid):
|
||||
LOG.info("Got enough cpus {},{}".format(
|
||||
maxcpuid, my_maxcpuid))
|
||||
else:
|
||||
console_log("Not enough CPUs, found:{} looking_for:{}"
|
||||
.format(my_maxcpuid, maxcpuid), err=True)
|
||||
valid = False
|
||||
|
||||
mem_total = ""
|
||||
if parser.has_option(SECTION, "mem"):
|
||||
mem_total = parser.get(SECTION, "mem")
|
||||
if not mem_total:
|
||||
console_log("Missing value [mem] in ini file")
|
||||
valid = False
|
||||
else:
|
||||
my_mem_total = clone.get_total_mem()
|
||||
# relaxed RAM check: within 1 GiB
|
||||
if (int(mem_total) - (1024 * 1024)) <= int(my_mem_total):
|
||||
LOG.info("Got enough memory {},{}".format(
|
||||
mem_total, my_mem_total))
|
||||
else:
|
||||
console_log("Not enough memory; found:{} kB, "
|
||||
"looking for a minimum of {} kB"
|
||||
.format(my_mem_total, mem_total), err=True)
|
||||
valid = False
|
||||
|
||||
if not valid:
|
||||
console_log("Validation failure!")
|
||||
set_result(clone.FAIL)
|
||||
time.sleep(20)
|
||||
exit(1)
|
||||
|
||||
console_log("Successful validation")
|
||||
|
||||
|
||||
def update_sysuuid_in_archive(tmpdir):
|
||||
"""Update system uuid in system archive file."""
|
||||
sysuuid = str(uuid.uuid4())
|
||||
clone.find_and_replace(
|
||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
||||
"CLONEISO_SYSTEM_UUID", sysuuid)
|
||||
LOG.info("System uuid updated [%s]" % sysuuid)
|
||||
|
||||
|
||||
def update_db(archive_dir, backup_name):
|
||||
""" Update DB before restore """
|
||||
path_to_archive = os.path.join(archive_dir, backup_name)
|
||||
LOG.info("Updating system archive [%s] DB." % path_to_archive)
|
||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
||||
try:
|
||||
subprocess.check_call(
|
||||
['gunzip', path_to_archive + '.tgz'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
# Extract only postgres dir to update system uuid
|
||||
subprocess.check_call(
|
||||
['tar', '-x',
|
||||
'--directory=' + tmpdir,
|
||||
'-f', path_to_archive + '.tar',
|
||||
'postgres'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
update_sysuuid_in_archive(tmpdir)
|
||||
subprocess.check_call(
|
||||
['tar', '--update',
|
||||
'--directory=' + tmpdir,
|
||||
'-f', path_to_archive + '.tar',
|
||||
'postgres'],
|
||||
stdout=DEVNULL, stderr=DEVNULL)
|
||||
subprocess.check_call(['gzip', path_to_archive + '.tar'])
|
||||
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
|
||||
|
||||
except Exception as e:
|
||||
LOG.error("Update of system archive {} failed {}".format(
|
||||
path_to_archive, str(e)))
|
||||
raise CloneFail("Failed to update system archive")
|
||||
|
||||
finally:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
|
||||
def config_compute():
|
||||
"""
|
||||
Enable compute functionality for AIO system.
|
||||
:return: True if compute-config-complete is executed
|
||||
"""
|
||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
||||
console_log("Applying compute manifests for {}. "
|
||||
"Node will reboot on completion."
|
||||
.format(utils.get_controller_hostname()))
|
||||
sysinv.do_compute_config_complete(utils.get_controller_hostname())
|
||||
time.sleep(30)
|
||||
# compute-config-complete has no logs to console. So, wait
|
||||
# for some time before showing the login prompt.
|
||||
for i in range(1, 10):
|
||||
console_log("compute-config in progress..")
|
||||
time.sleep(30)
|
||||
console_log("Timed out on do_compute_config_complete")
|
||||
raise CloneFail("Timed out on do_compute_config_complete")
|
||||
return True
|
||||
else:
|
||||
# compute_config_complete is not needed.
|
||||
return False
|
||||
|
||||
|
||||
def finalize_install():
|
||||
""" Complete the installation """
|
||||
subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases'])
|
||||
console_log("Updating system parameters...")
|
||||
i = 1
|
||||
system_update = False
|
||||
# Retries if sysinv is not yet ready
|
||||
while i < 10:
|
||||
time.sleep(20)
|
||||
LOG.info("Attempt %d to update system parameters..." % i)
|
||||
try:
|
||||
if sysinv.update_clone_system('Cloned_from_' + clone_name,
|
||||
utils.get_controller_hostname()):
|
||||
system_update = True
|
||||
break
|
||||
except Exception:
|
||||
# Sysinv might not be ready yet
|
||||
pass
|
||||
i += 1
|
||||
if not system_update:
|
||||
LOG.error("System update failed")
|
||||
raise CloneFail("System update failed")
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(["finish_install_clone.sh"],
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.info("finish_install_clone out: {}".format(output))
|
||||
except Exception:
|
||||
console_log("Failed to cleanup stale OpenStack resources. "
|
||||
"Manually delete the Volumes and Instances.")
|
||||
|
||||
|
||||
def cleanup():
|
||||
""" Cleanup after installation """
|
||||
LOG.info("Cleaning up...")
|
||||
subprocess.call(['systemctl', 'disable', 'install-clone'], stderr=DEVNULL)
|
||||
OLD_FILE = os.path.join(tsconfig.PLATFORM_CONF_PATH, clone.CLONE_ISO_INI)
|
||||
if os.path.exists(OLD_FILE):
|
||||
os.remove(OLD_FILE)
|
||||
if os.path.exists(INI_FILE):
|
||||
os.chmod(INI_FILE, 0400)
|
||||
shutil.move(INI_FILE, tsconfig.PLATFORM_CONF_PATH)
|
||||
shutil.rmtree(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
|
||||
ignore_errors=True)
|
||||
|
||||
|
||||
log.configure()
|
||||
if os.path.exists(INI_FILE):
|
||||
try:
|
||||
parser.read(INI_FILE)
|
||||
if parser.has_section(SECTION):
|
||||
clone_name = parser.get(SECTION, clone.NAME)
|
||||
LOG.info("System archive [%s] to be installed." % clone_name)
|
||||
|
||||
first_boot = False
|
||||
last_result = clone.IN_PROGRESS
|
||||
if not parser.has_option(SECTION, clone.RESULT):
|
||||
# first boot after cloning
|
||||
first_boot = True
|
||||
else:
|
||||
last_result = parser.get(SECTION, clone.RESULT)
|
||||
LOG.info("Last attempt to install clone was [{}]"
|
||||
.format(last_result))
|
||||
|
||||
if last_result == clone.IN_PROGRESS:
|
||||
if first_boot:
|
||||
update_db(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
|
||||
clone_name + '_system')
|
||||
else:
|
||||
# Booting up after patch application, do validation
|
||||
validate_hardware_compatibility()
|
||||
|
||||
console_log("+++++ Starting to install clone-image [{}] +++++"
|
||||
.format(clone_name))
|
||||
set_result(clone.IN_PROGRESS)
|
||||
clone_arch_path = os.path.join("/", clone.CLONE_ARCHIVE_DIR,
|
||||
clone_name)
|
||||
if (backup_restore.RESTORE_RERUN_REQUIRED ==
|
||||
backup_restore.restore_system(
|
||||
clone_arch_path + "_system.tgz",
|
||||
clone=True)):
|
||||
# If there are no patches to be applied, run validation
|
||||
# code and resume restore. If patches were applied, node
|
||||
# will be rebooted and validate will after reboot.
|
||||
validate_hardware_compatibility()
|
||||
LOG.info("validate passed, resuming restore...")
|
||||
backup_restore.restore_system(
|
||||
clone_arch_path + "_system.tgz", clone=True)
|
||||
console_log("System archive installed from [%s]" % clone_name)
|
||||
backup_restore.restore_images(clone_arch_path + "_images.tgz",
|
||||
clone=True)
|
||||
console_log("Images archive installed from [%s]" % clone_name)
|
||||
finalize_install()
|
||||
set_result(clone.OK)
|
||||
if not config_compute():
|
||||
# do cleanup if compute_config_complete is not required
|
||||
cleanup()
|
||||
elif last_result == clone.OK:
|
||||
# Installation completed successfully before last reboot
|
||||
cleanup()
|
||||
else:
|
||||
LOG.error("Bad file: {}".format(INI_FILE))
|
||||
set_result(clone.FAIL)
|
||||
exit(1)
|
||||
except Exception as e:
|
||||
console_log("Clone [%s] installation failed" % clone_name)
|
||||
LOG.exception("install failed")
|
||||
set_result(clone.FAIL)
|
||||
exit(1)
|
||||
else:
|
||||
console_log("nothing to do, Not installing clone?")
|
||||
30
controllerconfig/controllerconfig/scripts/keyringstaging
Executable file
30
controllerconfig/controllerconfig/scripts/keyringstaging
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
#
|
||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import keyring
|
||||
import os
|
||||
import sys
|
||||
|
||||
def get_stealth_password():
|
||||
"""Get the stealth password vault for manifest to run"""
|
||||
orig_root = os.environ.get('XDG_DATA_HOME', None)
|
||||
os.environ["XDG_DATA_HOME"] = "/tmp"
|
||||
|
||||
stealth_pw = keyring.get_password("CGCS", "admin")
|
||||
|
||||
if orig_root is not None:
|
||||
os.environ("XDG_DATA_HOME",orig_root)
|
||||
else:
|
||||
del os.environ["XDG_DATA_HOME"]
|
||||
return stealth_pw
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.stdout.write(get_stealth_password())
|
||||
sys.stdout.flush()
|
||||
sys.exit(0)
|
||||
|
||||
114
controllerconfig/controllerconfig/scripts/openstack_update_admin_password
Executable file
114
controllerconfig/controllerconfig/scripts/openstack_update_admin_password
Executable file
@@ -0,0 +1,114 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2016-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# This script is used to change the OpenStack 'admin' user's password
|
||||
# on Secondary Titanium Cloud Regions
|
||||
|
||||
# This script logs to user.log
|
||||
|
||||
PASSWORD_INPUT=$1
|
||||
|
||||
function set_admin_password()
|
||||
{
|
||||
local SET_PASSWD_CMD="keyring set CGCS admin"
|
||||
/usr/bin/expect << EOD
|
||||
set loguser_save [ log_user ]
|
||||
log_user 0
|
||||
set timeout_save timeout
|
||||
set timeout 60
|
||||
spawn $SET_PASSWD_CMD
|
||||
expect {
|
||||
"Password*" {
|
||||
send "$PASSWORD_INPUT\r"
|
||||
expect eof
|
||||
}
|
||||
timeout {
|
||||
puts "ERROR: Timed out"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
set timeout $timeout_save
|
||||
log_user $loguser_save
|
||||
EOD
|
||||
|
||||
local PASSWORD=$(keyring get CGCS admin)
|
||||
|
||||
if [ "${PASSWORD}" == "${PASSWORD_INPUT}" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function validate_exec_environment()
|
||||
{
|
||||
local TS_CONF_FILE="/usr/bin/tsconfig"
|
||||
if [ -f "$TS_CONF_FILE" ]; then
|
||||
source $TS_CONF_FILE
|
||||
else
|
||||
echo "ERROR: Missing $TS_CONF_FILE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local CONFIG_DIR=$CONFIG_PATH
|
||||
|
||||
# check if it is running on a secondary region
|
||||
if [ -f "$PLATFORM_CONF_FILE" ]; then
|
||||
source $PLATFORM_CONF_FILE
|
||||
if [ "$region_config" = "no" ]; then
|
||||
echo "ERROR: This command is only applicable to a Secondary Region."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Missing $PLATFORM_CONF_FILE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if it is running on the active controller
|
||||
if [ ! -d $CONFIG_DIR ]; then
|
||||
echo "ERROR: Command must be run from the active controller."
|
||||
exit 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function validate_input()
|
||||
{
|
||||
if [ -z "$PASSWORD_INPUT" ]; then
|
||||
echo "ERROR: Missing password input."
|
||||
echo "USAGE: $0 <password>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check for space in the password
|
||||
if [[ "$PASSWORD_INPUT" =~ ( |\') ]]; then
|
||||
echo "ERROR: Space is not allowed in the password."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
read -p "This command will update this Secondary Region's internal copy of the OpenStack Admin Password.
|
||||
Are you sure you want to proceed (y/n)? " -n 1 -r
|
||||
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "cancelled"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
validate_exec_environment
|
||||
validate_input
|
||||
logger -p info -t $0 "Updating OpenStack Admin Password locally"
|
||||
set_admin_password
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "The OpenStack Admin Password has been updated on this Secondary Region."
|
||||
echo "Please swact the controllers to allow certain services to resync the Admin password."
|
||||
else
|
||||
echo "ERROR: Failed to update the Admin Password."
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
29
controllerconfig/controllerconfig/setup.py
Normal file
29
controllerconfig/controllerconfig/setup.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#
|
||||
# Copyright (c) 2015-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name='controllerconfig',
|
||||
description='Controller Configuration',
|
||||
version='1.0.0',
|
||||
license='Apache-2.0',
|
||||
platforms=['any'],
|
||||
provides=['controllerconfig'],
|
||||
packages=find_packages(),
|
||||
package_data={},
|
||||
include_package_data=False,
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'config_controller = controllerconfig.systemconfig:main',
|
||||
'config_region = controllerconfig.regionconfig:region_main',
|
||||
'config_subcloud = controllerconfig.regionconfig:subcloud_main',
|
||||
'config_management = controllerconfig.config_management:main',
|
||||
'upgrade_controller = controllerconfig.upgrades.controller:main',
|
||||
'upgrade_controller_simplex = '
|
||||
'controllerconfig.upgrades.controller:simplex_main'
|
||||
],
|
||||
}
|
||||
)
|
||||
9
controllerconfig/controllerconfig/test-requirements.txt
Normal file
9
controllerconfig/controllerconfig/test-requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
pylint
|
||||
pytest
|
||||
mock
|
||||
coverage>=3.6
|
||||
PyYAML>=3.10.0 # MIT
|
||||
os-testr>=0.8.0 # Apache-2.0
|
||||
testresources>=0.2.4 # Apache-2.0/BSD
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
|
||||
51
controllerconfig/controllerconfig/tox.ini
Normal file
51
controllerconfig/controllerconfig/tox.ini
Normal file
@@ -0,0 +1,51 @@
|
||||
# Tox (http://tox.testrun.org/) is a tool for running tests
|
||||
# in multiple virtualenvs. This configuration file will run the
|
||||
# test suite on all supported python versions. To use it, "pip install tox"
|
||||
# and then run "tox" from this directory.
|
||||
|
||||
[tox]
|
||||
envlist = flake8, py27, pylint
|
||||
# Tox does not work if the path to the workdir is too long, so move it to /tmp
|
||||
toxworkdir = /tmp/{env:USER}_cctox
|
||||
wrsdir = {toxinidir}/../../../../../../../../..
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = find
|
||||
install_command = pip install --no-cache-dir -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages}
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-e{[tox]wrsdir}/addons/wr-cgcs/layers/cgcs/middleware/config/recipes-control/configutilities/configutilities
|
||||
-e{[tox]wrsdir}/addons/wr-cgcs/layers/cgcs/middleware/fault/recipes-common/fm-api
|
||||
-e{[tox]wrsdir}/addons/wr-cgcs/layers/cgcs/middleware/config/recipes-common/tsconfig/tsconfig
|
||||
-e{[tox]wrsdir}/addons/wr-cgcs/layers/cgcs/middleware/sysinv/recipes-common/sysinv/sysinv
|
||||
-e{[tox]wrsdir}/addons/wr-cgcs/layers/cgcs/middleware/sysinv/recipes-common/cgts-client/cgts-client
|
||||
|
||||
[testenv:pylint]
|
||||
basepython = python2.7
|
||||
deps = {[testenv]deps}
|
||||
pylint
|
||||
commands = pylint {posargs} controllerconfig --rcfile=./pylint.rc --extension-pkg-whitelist=netifaces
|
||||
|
||||
[testenv:flake8]
|
||||
basepython = python2.7
|
||||
deps = flake8
|
||||
commands = flake8 {posargs}
|
||||
|
||||
[flake8]
|
||||
ignore = W503
|
||||
|
||||
[testenv:py27]
|
||||
basepython = python2.7
|
||||
commands =
|
||||
find . -type f -name "*.pyc" -delete
|
||||
py.test {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
basepython = python2.7
|
||||
deps = {[testenv]deps}
|
||||
|
||||
commands =
|
||||
coverage erase
|
||||
python setup.py testr --coverage --testr-args='{posargs}'
|
||||
coverage xml
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will add neutron hosts for each controller
|
||||
|
||||
import psycopg2
|
||||
import sys
|
||||
|
||||
from sysinv.common import constants
|
||||
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
from controllerconfig.common import log
|
||||
|
||||
from tsconfig.tsconfig import system_mode
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
neutron_create_controller_hosts()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def get_controller(conn, hostname):
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT * FROM i_host WHERE hostname=%s;",
|
||||
(hostname,))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("Failed to fetch %s host_id" % hostname)
|
||||
raise
|
||||
return row
|
||||
|
||||
|
||||
def create_neutron_host_if_not_exists(conn, sysinv_host):
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT * FROM hosts WHERE name=%s;",
|
||||
(sysinv_host['hostname'],))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
cur.execute("INSERT INTO hosts "
|
||||
"(id, name, availability, created_at) "
|
||||
"VALUES (%s, %s, %s, %s);",
|
||||
(sysinv_host['uuid'], sysinv_host['hostname'],
|
||||
"down", sysinv_host['created_at']))
|
||||
|
||||
|
||||
def neutron_create_controller_hosts():
|
||||
simplex = (system_mode == constants.SYSTEM_MODE_SIMPLEX)
|
||||
|
||||
sysinv_conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
controller_0 = get_controller(sysinv_conn, constants.CONTROLLER_0_HOSTNAME)
|
||||
if not simplex:
|
||||
controller_1 = get_controller(sysinv_conn,
|
||||
constants.CONTROLLER_1_HOSTNAME)
|
||||
|
||||
neutron_conn = psycopg2.connect("dbname=neutron user=postgres")
|
||||
create_neutron_host_if_not_exists(neutron_conn, controller_0)
|
||||
if not simplex:
|
||||
create_neutron_host_if_not_exists(neutron_conn, controller_1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will update the controller_fs extension in the sysinv database.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import math
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
import psycopg2
|
||||
from controllerconfig import utils
|
||||
from controllerconfig.common import log
|
||||
from controllerconfig.common import constants
|
||||
from psycopg2.extras import RealDictCursor
|
||||
from sysinv.common import utils as sutils
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
update_extension()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def get_temp_sizes():
|
||||
""" Get the temporary filesystems sizes setup during upgrades.
|
||||
"""
|
||||
total_temp_sizes = 0
|
||||
|
||||
args = ["lvdisplay",
|
||||
"--columns",
|
||||
"--options",
|
||||
"lv_size,lv_name",
|
||||
"--units",
|
||||
"g",
|
||||
"--noheading",
|
||||
"--nosuffix",
|
||||
"/dev/cgts-vg/dbdump-temp-lv",
|
||||
"/dev/cgts-vg/postgres-temp-lv"]
|
||||
|
||||
with open(os.devnull, "w") as fnull:
|
||||
try:
|
||||
lvdisplay_output = subprocess.check_output(args,
|
||||
stderr=fnull)
|
||||
except Exception:
|
||||
LOG.info("migrate extension, total_temp_size=%s" %
|
||||
total_temp_sizes)
|
||||
return total_temp_sizes
|
||||
|
||||
lvdisplay_dict = utils.output_to_dict(lvdisplay_output)
|
||||
|
||||
if lvdisplay_dict.get('dbdump-temp-lv'):
|
||||
total_temp_sizes = int(math.ceil(float(
|
||||
lvdisplay_dict.get('dbdump-temp-lv'))))
|
||||
|
||||
if lvdisplay_dict.get('postgres-temp-lv'):
|
||||
total_temp_sizes += int(math.ceil(float(
|
||||
lvdisplay_dict.get('postgres-temp-lv'))))
|
||||
|
||||
LOG.info("migrate extension, total_temp_sizes=%s" % total_temp_sizes)
|
||||
return total_temp_sizes
|
||||
|
||||
|
||||
def update_extension():
|
||||
""" Update sysinv db controller_fs extension size on upgrade."""
|
||||
try:
|
||||
vg_free = sutils.get_cgts_vg_free_space()
|
||||
LOG.info("migrate extension, get_cgts_vg_free_space=%s" % vg_free)
|
||||
|
||||
# Add back the temporary sizes
|
||||
vg_free = get_temp_sizes()
|
||||
LOG.info("migrate extension, vg_free=%s" % vg_free)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
print e
|
||||
return 1
|
||||
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select id from i_system;")
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"i_system data")
|
||||
raise
|
||||
|
||||
controller_fs_uuid = str(uuid.uuid4())
|
||||
forisystemid = row.get('id')
|
||||
values = {'created_at': datetime.now(),
|
||||
'updated_at': None,
|
||||
'deleted_at': None,
|
||||
'uuid': controller_fs_uuid,
|
||||
'name': 'extension',
|
||||
'size': 1,
|
||||
'replicated': True,
|
||||
'logical_volume': 'extension-lv',
|
||||
'forisystemid': forisystemid}
|
||||
|
||||
cur.execute("INSERT INTO controller_fs "
|
||||
"(created_at, updated_at, deleted_at, "
|
||||
"uuid, name, size, replicated, logical_volume, "
|
||||
"forisystemid) "
|
||||
"VALUES (%(created_at)s, %(updated_at)s, "
|
||||
"%(deleted_at)s, %(uuid)s, %(name)s, %(size)s, "
|
||||
"%(replicated)s, %(logical_volume)s, "
|
||||
"%(forisystemid)s)",
|
||||
values)
|
||||
|
||||
LOG.info("migrate extension, controller_fs, insert new row with "
|
||||
"data %s" % values)
|
||||
conn.commit()
|
||||
|
||||
# If there is not enough space to add the new extension filesystem
|
||||
# then decrease the backup filesystem by the amount required (1G)
|
||||
|
||||
cur.execute("select size from controller_fs where name='backup';")
|
||||
row = cur.fetchone()
|
||||
LOG.info("migrate extension, backup = %s" % row)
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"controller_fs data")
|
||||
raise
|
||||
backup_size = row.get('size')
|
||||
|
||||
cur.execute(
|
||||
"select size from controller_fs where name='database';")
|
||||
row = cur.fetchone()
|
||||
LOG.info("migrate extension, database = %s" % row)
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"controller_fs data")
|
||||
raise
|
||||
database_size = row.get('size')
|
||||
|
||||
cur.execute("select size from controller_fs where name='cgcs';")
|
||||
row = cur.fetchone()
|
||||
LOG.info("migrate extension, cgcs = %s" % row)
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"controller_fs data")
|
||||
raise
|
||||
cgcs_size = row.get('size')
|
||||
|
||||
cur.execute(
|
||||
"select size from controller_fs where name='img-conversions';")
|
||||
row = cur.fetchone()
|
||||
LOG.info("migrate extension, img-conversions = %s" % row)
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"controller_fs data")
|
||||
raise
|
||||
img_conversions_size = row.get('size')
|
||||
|
||||
cur.execute(
|
||||
"select size from controller_fs where name='extension';")
|
||||
row = cur.fetchone()
|
||||
LOG.info("migrate extension, extension= %s" % row)
|
||||
if row is None:
|
||||
LOG.exception("migrate extension, failed to fetch "
|
||||
"controller_fs data")
|
||||
raise
|
||||
extension_size = row.get('size')
|
||||
|
||||
total_size = backup_size + (database_size * 2) + \
|
||||
cgcs_size + img_conversions_size + extension_size
|
||||
|
||||
if vg_free < total_size:
|
||||
LOG.info("migrate extension, we have less than 1G free")
|
||||
new_backup_size = \
|
||||
backup_size - constants.DEFAULT_EXTENSION_STOR_SIZE
|
||||
|
||||
LOG.info("migrate extension, reduce the backup size by 1G. "
|
||||
"new_backup_size = %s" % new_backup_size)
|
||||
cur.execute(
|
||||
"UPDATE controller_fs SET size=%s where name='backup';",
|
||||
(new_backup_size,))
|
||||
conn.commit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,708 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will update the partition schema for controller-1.
|
||||
|
||||
import collections
|
||||
import json
|
||||
import math
|
||||
import psycopg2
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
import parted
|
||||
from sysinv.openstack.common import uuidutils
|
||||
|
||||
from sysinv.common import constants
|
||||
from psycopg2.extras import RealDictCursor
|
||||
from controllerconfig.common import log
|
||||
from controllerconfig import utils
|
||||
|
||||
from tsconfig.tsconfig import system_mode
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
Partition_Tuple = collections.namedtuple(
|
||||
'partition', 'uuid idisk_id idisk_uuid size_mib device_node device_path '
|
||||
'status type_guid forihostid foripvid start_mib end_mib')
|
||||
uefi_cgts_pv_1_partition_number = 4
|
||||
bios_cgts_pv_1_partition_number = 5
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
create_user_partitions()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def get_partitions(device_path):
|
||||
"""Obtain existing partitions from a disk."""
|
||||
try:
|
||||
device = parted.getDevice(device_path)
|
||||
disk = parted.newDisk(device)
|
||||
except Exception as e:
|
||||
LOG.info("No partition info for disk %s - %s" % (device_path, e))
|
||||
return None
|
||||
|
||||
ipartitions = []
|
||||
|
||||
partitions = disk.partitions
|
||||
|
||||
for partition in partitions:
|
||||
part_size_mib = partition.getSize()
|
||||
part_device_node = partition.path
|
||||
part_device_path = '{}-part{}'.format(device_path,
|
||||
partition.number)
|
||||
start_mib = math.ceil(float(partition.geometry.start) / 2048)
|
||||
end_mib = math.ceil(float(partition.geometry.end) / 2048)
|
||||
|
||||
part_attrs = {
|
||||
'size_mib': part_size_mib,
|
||||
'device_node': part_device_node,
|
||||
'device_path': part_device_path,
|
||||
'start_mib': start_mib,
|
||||
'end_mib': end_mib
|
||||
}
|
||||
ipartitions.append(part_attrs)
|
||||
|
||||
return ipartitions
|
||||
|
||||
|
||||
def get_disk_available_mib(device_node):
|
||||
# Get sector size command.
|
||||
sector_size_bytes_cmd = '{} {}'.format('blockdev --getss', device_node)
|
||||
|
||||
# Get total free space in sectors command.
|
||||
avail_space_sectors_cmd = '{} {} {}'.format(
|
||||
'sgdisk -p', device_node, "| grep \"Total free space\"")
|
||||
|
||||
# Get the sector size.
|
||||
sector_size_bytes_process = subprocess.Popen(
|
||||
sector_size_bytes_cmd, stdout=subprocess.PIPE, shell=True)
|
||||
sector_size_bytes = sector_size_bytes_process.stdout.read().rstrip()
|
||||
|
||||
# Get the free space.
|
||||
avail_space_sectors_process = subprocess.Popen(
|
||||
avail_space_sectors_cmd, stdout=subprocess.PIPE, shell=True)
|
||||
avail_space_sectors_output = avail_space_sectors_process.stdout.read()
|
||||
avail_space_sectors = re.findall('\d+',
|
||||
avail_space_sectors_output)[0].rstrip()
|
||||
|
||||
# Free space in MiB.
|
||||
avail_space_mib = (int(sector_size_bytes) * int(avail_space_sectors) /
|
||||
(1024 ** 2))
|
||||
|
||||
# Keep 2 MiB for partition table.
|
||||
if avail_space_mib >= 2:
|
||||
avail_space_mib = avail_space_mib - 2
|
||||
|
||||
return avail_space_mib
|
||||
|
||||
|
||||
def build_partition_device_node(disk_device_node, partition_number):
|
||||
if constants.DEVICE_NAME_NVME in disk_device_node:
|
||||
partition_device_node = '{}p{}'.format(
|
||||
disk_device_node, partition_number)
|
||||
else:
|
||||
partition_device_node = '{}{}'.format(
|
||||
disk_device_node, partition_number)
|
||||
|
||||
LOG.info("partition_device_node: %s" % partition_device_node)
|
||||
|
||||
return partition_device_node
|
||||
|
||||
|
||||
def update_db_pv(cur, part_device_path, part_device_node, part_uuid,
|
||||
lvm_pv_name, pv_id):
|
||||
cur.execute("update i_pv set disk_or_part_device_path=%s,"
|
||||
"disk_or_part_device_node=%s, disk_or_part_uuid=%s,"
|
||||
"lvm_pv_name=%s where id=%s",
|
||||
(part_device_path, part_device_node, part_uuid,
|
||||
lvm_pv_name, pv_id))
|
||||
|
||||
|
||||
def create_partition(cur, partition):
|
||||
cur.execute(
|
||||
"insert into partition(uuid, idisk_id, idisk_uuid, size_mib,"
|
||||
"device_node, device_path, status, type_guid, "
|
||||
"forihostid, foripvid, start_mib, end_mib) "
|
||||
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
partition)
|
||||
|
||||
|
||||
def get_storage_backend(cur):
|
||||
cur.execute("select storage_backend.id, storage_backend.backend, "
|
||||
"storage_backend.state, "
|
||||
"storage_backend.forisystemid, storage_backend.services, "
|
||||
"storage_backend.capabilities from storage_backend")
|
||||
storage_backend = cur.fetchone()
|
||||
if not storage_backend:
|
||||
LOG.exception("No storage backend present, exiting.")
|
||||
raise
|
||||
|
||||
backend = storage_backend['backend']
|
||||
LOG.info("storage_backend: %s" % str(storage_backend))
|
||||
|
||||
return backend
|
||||
|
||||
|
||||
def cgts_vg_extend(cur, disk, partition4, pv_cgts_vg, partition_number,
|
||||
part_size_mib):
|
||||
part_device_node = '{}{}'.format(disk.get('device_node'),
|
||||
partition_number)
|
||||
part_device_path = '{}-part{}'.format(disk.get('device_path'),
|
||||
partition_number)
|
||||
|
||||
LOG.info("Extra cgts-vg partition size: %s device node: %s "
|
||||
"device path: %s" %
|
||||
(part_size_mib, part_device_node, part_device_path))
|
||||
|
||||
part_uuid = uuidutils.generate_uuid()
|
||||
|
||||
new_partition = Partition_Tuple(
|
||||
uuid=part_uuid, idisk_id=disk.get('id'),
|
||||
idisk_uuid=disk.get('uuid'), size_mib=part_size_mib,
|
||||
device_node=part_device_node, device_path=part_device_path,
|
||||
status=constants.PARTITION_CREATE_ON_UNLOCK_STATUS,
|
||||
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
|
||||
forihostid=disk['forihostid'], foripvid=None,
|
||||
start_mib=None, end_mib=None)
|
||||
|
||||
create_partition(cur, new_partition)
|
||||
|
||||
pv_uuid = uuidutils.generate_uuid()
|
||||
cur.execute(
|
||||
"insert into i_pv(uuid, pv_state, pv_type, disk_or_part_uuid, "
|
||||
"disk_or_part_device_node, disk_or_part_device_path, lvm_pv_name, "
|
||||
"lvm_vg_name, forihostid, forilvgid) "
|
||||
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
(pv_uuid, constants.PV_ADD, constants.PV_TYPE_PARTITION, part_uuid,
|
||||
part_device_node, part_device_path, part_device_node,
|
||||
constants.LVG_CGTS_VG, disk.get('forihostid'),
|
||||
pv_cgts_vg.get('forilvgid')))
|
||||
|
||||
# Get the PV.
|
||||
cur.execute("select i_pv.id from i_pv where uuid=%s",
|
||||
(pv_uuid,))
|
||||
pv = cur.fetchone()
|
||||
|
||||
# Update partition.
|
||||
cur.execute(
|
||||
"update partition set foripvid=%s where uuid=%s",
|
||||
(pv.get('id'), part_uuid))
|
||||
|
||||
|
||||
def update_ctrl0_cinder_partition_pv(cur):
|
||||
# Get controller-0 id.
|
||||
hostname = constants.CONTROLLER_0_HOSTNAME
|
||||
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
|
||||
"where hostname=%s;", (hostname,))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("Failed to fetch %s host_id" % hostname)
|
||||
raise
|
||||
ctrl0_id = row['id']
|
||||
|
||||
# Controller-0 has only one partition added, the cinder partition.
|
||||
cur.execute("select partition.id, partition.uuid, "
|
||||
"partition.status, partition.device_node, "
|
||||
"partition.device_path, partition.size_mib,"
|
||||
"partition.idisk_uuid, partition.foripvid "
|
||||
"from partition where forihostid = %s",
|
||||
(ctrl0_id,))
|
||||
ctrl0_cinder_partition = cur.fetchone()
|
||||
if not ctrl0_cinder_partition:
|
||||
LOG.exception("Failed to get ctrl0 cinder volumes partition")
|
||||
raise
|
||||
|
||||
# Obtain the cinder PV for controller-0.
|
||||
cur.execute("select i_pv.id, i_pv.disk_or_part_uuid, "
|
||||
"i_pv.disk_or_part_device_node, "
|
||||
"i_pv.disk_or_part_device_path, i_pv.lvm_pv_size,"
|
||||
"i_pv.lvm_pv_name, i_pv.lvm_vg_name, i_pv.forilvgid,"
|
||||
"i_pv.pv_type from i_pv where forihostid=%s and "
|
||||
"lvm_vg_name=%s",
|
||||
(ctrl0_id, constants.LVG_CINDER_VOLUMES))
|
||||
ctrl0_cinder_pv = cur.fetchone()
|
||||
if not ctrl0_cinder_pv:
|
||||
LOG.exception("Failed to get ctrl0 cinder physical volume")
|
||||
raise
|
||||
|
||||
# Update the cinder PV with the partition info.
|
||||
update_db_pv(cur, ctrl0_cinder_partition['device_path'],
|
||||
ctrl0_cinder_partition['device_node'],
|
||||
ctrl0_cinder_partition['uuid'],
|
||||
ctrl0_cinder_partition['device_node'],
|
||||
ctrl0_cinder_pv['id'])
|
||||
|
||||
# Mark the cinder partition in use.
|
||||
cur.execute("update partition set foripvid=%s, status=%s "
|
||||
"where id=%s",
|
||||
(ctrl0_cinder_pv['id'], constants.PARTITION_IN_USE_STATUS,
|
||||
ctrl0_cinder_partition['id']))
|
||||
|
||||
|
||||
def update_partition_pv(cur, pvs, partitions, disks):
|
||||
backend = get_storage_backend(cur)
|
||||
if system_mode != constants.SYSTEM_MODE_SIMPLEX and backend != "ceph":
|
||||
update_ctrl0_cinder_partition_pv(cur)
|
||||
|
||||
for pv in pvs:
|
||||
if (pv['pv_type'] == constants.PV_TYPE_PARTITION and
|
||||
'-part' not in pv['disk_or_part_device_path']):
|
||||
if "drbd" in pv['lvm_pv_name']:
|
||||
partition_number = '1'
|
||||
else:
|
||||
partition_number = (
|
||||
re.match('.*?([0-9]+)$', pv['lvm_pv_name']).group(1))
|
||||
# Update disk foripvid to null.
|
||||
disk = next((
|
||||
d for d in disks
|
||||
if d['device_path'] == pv['disk_or_part_device_path']), None)
|
||||
if disk:
|
||||
LOG.info("Set foripvid to null for disk %s" % disk['id'])
|
||||
cur.execute(
|
||||
"update i_idisk set foripvid=null where id=%s",
|
||||
(disk['id'],))
|
||||
|
||||
# Update partition device path and device path for the current PV.
|
||||
part_device_path = "{}{}{}".format(
|
||||
pv['disk_or_part_device_path'],
|
||||
'-part',
|
||||
partition_number)
|
||||
|
||||
if constants.DEVICE_NAME_NVME in pv['disk_or_part_device_node']:
|
||||
part_device_node = "{}p{}".format(
|
||||
pv['disk_or_part_device_node'],
|
||||
partition_number)
|
||||
else:
|
||||
part_device_node = "{}{}".format(
|
||||
pv['disk_or_part_device_node'],
|
||||
partition_number)
|
||||
|
||||
LOG.info("Old PV device path: %s New PV device path: %s" %
|
||||
(pv['disk_or_part_device_path'], part_device_path))
|
||||
LOG.info("Old PV device node: %s New PV device node: %s" %
|
||||
(pv['disk_or_part_device_node'], part_device_node))
|
||||
|
||||
lvm_pv_name = part_device_node
|
||||
# Do not use constant here yet since this may change due to
|
||||
# cinder removal from cfg ctrl US.
|
||||
if "drbd" in pv['lvm_pv_name']:
|
||||
lvm_pv_name = pv['lvm_pv_name']
|
||||
|
||||
part = next((
|
||||
p for p in partitions
|
||||
if p['device_path'] == part_device_path), None)
|
||||
|
||||
if not part:
|
||||
LOG.info("No %s partition, returning" % part_device_path)
|
||||
continue
|
||||
|
||||
# Update the PV DB entry.
|
||||
update_db_pv(cur, part_device_path, part_device_node,
|
||||
part['uuid'], lvm_pv_name, pv['id'])
|
||||
|
||||
# Update the PV DB entry.
|
||||
cur.execute(
|
||||
"update partition set foripvid=%s, status=%s "
|
||||
"where id=%s",
|
||||
(pv['id'], constants.PARTITION_IN_USE_STATUS,
|
||||
part['id']))
|
||||
|
||||
|
||||
def create_ctrl0_cinder_partition(cur, stors, part_size):
|
||||
hostname = constants.CONTROLLER_0_HOSTNAME
|
||||
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
|
||||
"where hostname=%s;", (hostname,))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("Failed to fetch %s host_id" % hostname)
|
||||
raise
|
||||
|
||||
controller_id = row['id']
|
||||
|
||||
# Get the disks for controller-0.
|
||||
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
|
||||
"i_idisk.device_node, i_idisk.device_path, "
|
||||
"i_idisk.id, i_idisk.size_mib from i_idisk where "
|
||||
"forihostid = %s", (controller_id,))
|
||||
|
||||
disks_ctrl0 = cur.fetchall()
|
||||
|
||||
# Obtain the cinder disk for controller-0.
|
||||
cinder_disk_ctrl0 = next((
|
||||
d for d in disks_ctrl0
|
||||
if d['uuid'] in [s['idisk_uuid'] for s in stors]), None)
|
||||
LOG.info("cinder_disk_ctrl0: %s" % str(cinder_disk_ctrl0))
|
||||
if not cinder_disk_ctrl0:
|
||||
LOG.exception("Failed to get cinder disk for host %s" %
|
||||
controller_id)
|
||||
raise
|
||||
|
||||
# Fill in partition info.
|
||||
new_part_size = part_size
|
||||
new_part_device_node = "%s1" % cinder_disk_ctrl0['device_node']
|
||||
new_part_device_path = ('%s-part1' %
|
||||
cinder_disk_ctrl0['device_path'])
|
||||
LOG.info("New partition: %s - %s" %
|
||||
(new_part_device_node, new_part_device_path))
|
||||
new_part_uuid = uuidutils.generate_uuid()
|
||||
|
||||
new_partition = Partition_Tuple(
|
||||
uuid=new_part_uuid,
|
||||
idisk_id=cinder_disk_ctrl0.get('id'),
|
||||
idisk_uuid=cinder_disk_ctrl0.get('uuid'),
|
||||
size_mib=new_part_size,
|
||||
device_node=new_part_device_node,
|
||||
device_path=new_part_device_path,
|
||||
status=constants.PARTITION_IN_USE_STATUS,
|
||||
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
|
||||
forihostid=controller_id,
|
||||
foripvid=None,
|
||||
start_mib=None,
|
||||
end_mib=None)
|
||||
|
||||
create_partition(cur, new_partition)
|
||||
|
||||
|
||||
def create_db_partition_entries(cur, disks):
|
||||
# Get the stors with the cinder function.
|
||||
cur.execute("select i_istor.id, i_istor.idisk_uuid, "
|
||||
"i_istor.function, i_istor.forihostid "
|
||||
"from i_istor where function = %s",
|
||||
(constants.STOR_FUNCTION_CINDER,))
|
||||
stors = cur.fetchall()
|
||||
|
||||
cinder_partition = False
|
||||
for disk in disks:
|
||||
partitions = get_partitions(disk['device_path'])
|
||||
|
||||
LOG.info("partitions: %s" % str(partitions))
|
||||
# Create the DB entries for all disk partitions on controller-1.
|
||||
# For controller-0 we will only create the cinder partition, as the
|
||||
# rest will be reported by sysinv-agent once the host is upgraded.
|
||||
if not partitions:
|
||||
continue
|
||||
|
||||
for part in partitions:
|
||||
part_disk = next((
|
||||
d for d in disks if d['device_path'] in part['device_path']
|
||||
))
|
||||
|
||||
crt_stor = next((s for s in stors
|
||||
if s['idisk_uuid'] == part_disk['uuid']), None)
|
||||
|
||||
part_type_guid = constants.LINUX_LVM_PARTITION
|
||||
if crt_stor:
|
||||
part_type_guid = constants.USER_PARTITION_PHYSICAL_VOLUME
|
||||
|
||||
part_size = part['size_mib']
|
||||
part_device_node = part['device_node']
|
||||
part_device_path = part['device_path']
|
||||
|
||||
LOG.info("New partition size: %s part device node: %s "
|
||||
"part device path: %s" %
|
||||
(part_size, part_device_node, part_device_path))
|
||||
|
||||
part_uuid = uuidutils.generate_uuid()
|
||||
new_partition = Partition_Tuple(
|
||||
uuid=part_uuid, idisk_id=part_disk.get('id'),
|
||||
idisk_uuid=part_disk.get('uuid'), size_mib=part_size,
|
||||
device_node=part_device_node, device_path=part_device_path,
|
||||
status=constants.PARTITION_IN_USE_STATUS,
|
||||
type_guid=part_type_guid,
|
||||
forihostid=disk['forihostid'], foripvid=None,
|
||||
start_mib=part['start_mib'], end_mib=part['end_mib'])
|
||||
|
||||
create_partition(cur, new_partition)
|
||||
|
||||
# If this is the cinder disk, also create partition for the other
|
||||
# controller.
|
||||
if not crt_stor:
|
||||
LOG.info("Disk %s is not a cinder disk for host %s" %
|
||||
(part_disk['device_path'], part_disk['forihostid']))
|
||||
continue
|
||||
|
||||
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||
cinder_partition = True
|
||||
continue
|
||||
|
||||
# Also create the cinder partition for controller-0.
|
||||
create_ctrl0_cinder_partition(cur, stors, part_size)
|
||||
cinder_partition = True
|
||||
|
||||
# If somehow the cinder disk was also wiped and the partition was lost,
|
||||
# we need to retrieve it in another way.
|
||||
if not cinder_partition:
|
||||
LOG.info("Cinder partition was wiped so we need to create it")
|
||||
for disk in disks:
|
||||
d_json_dict = json.loads(disk['capabilities'])
|
||||
if (constants.IDISK_DEV_FUNCTION in d_json_dict and
|
||||
d_json_dict['device_function'] == 'cinder_device'):
|
||||
if 'cinder_gib' in d_json_dict:
|
||||
LOG.info("cinder_gib: %s" % d_json_dict['cinder_gib'])
|
||||
|
||||
# Partition size calculated from the size of cinder_gib.
|
||||
part_size = int(d_json_dict['cinder_gib'])
|
||||
|
||||
# Actual disk size in MiB.
|
||||
device = parted.getDevice(disk['device_path'])
|
||||
disk_size = device.length * device.sectorSize / (1024 ** 2)
|
||||
|
||||
part_size = min(part_size, disk_size - 2)
|
||||
|
||||
if constants.DEVICE_NAME_NVME in disk['device_node']:
|
||||
part_device_node = "%sp1" % disk['device_node']
|
||||
else:
|
||||
part_device_node = "%s1" % disk['device_node']
|
||||
part_device_path = "%s-part1" % disk['device_path']
|
||||
part_start_mib = 2
|
||||
part_end_mib = 2 + part_size
|
||||
|
||||
LOG.info("New partition size: %s part device node: %s "
|
||||
"part device path: %s part_end_mib: %s" %
|
||||
(part_size, part_device_node, part_device_path,
|
||||
part_end_mib))
|
||||
|
||||
part_uuid = uuidutils.generate_uuid()
|
||||
new_partition = Partition_Tuple(
|
||||
uuid=part_uuid,
|
||||
idisk_id=disk.get('id'),
|
||||
idisk_uuid=disk.get('uuid'), size_mib=part_size,
|
||||
device_node=part_device_node,
|
||||
device_path=part_device_path,
|
||||
status=constants.PARTITION_IN_USE_STATUS,
|
||||
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
|
||||
forihostid=disk['forihostid'], foripvid=None,
|
||||
start_mib=part_start_mib, end_mib=part_end_mib)
|
||||
create_partition(cur, new_partition)
|
||||
if system_mode != constants.SYSTEM_MODE_SIMPLEX:
|
||||
create_ctrl0_cinder_partition(cur, stors, part_size)
|
||||
break
|
||||
|
||||
|
||||
def create_user_partitions():
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
hostname = constants.CONTROLLER_1_HOSTNAME
|
||||
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||
hostname = constants.CONTROLLER_0_HOSTNAME
|
||||
|
||||
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
|
||||
"where hostname=%s;", (hostname,))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
LOG.exception("Failed to fetch %s host_id" % hostname)
|
||||
raise
|
||||
|
||||
controller_id = row['id']
|
||||
controller_rootfs = row['rootfs_device']
|
||||
|
||||
# Get the disks for the controller.
|
||||
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
|
||||
"i_idisk.device_node, i_idisk.device_path, "
|
||||
"i_idisk.capabilities, "
|
||||
"i_idisk.id, i_idisk.size_mib from i_idisk where "
|
||||
"forihostid = %s", (controller_id,))
|
||||
|
||||
disks = cur.fetchall()
|
||||
|
||||
# Get the PVs for the controller.
|
||||
cur.execute(
|
||||
"select i_pv.id, i_pv.disk_or_part_uuid, "
|
||||
"i_pv.disk_or_part_device_node, "
|
||||
"i_pv.disk_or_part_device_path, i_pv.lvm_pv_size,"
|
||||
"i_pv.lvm_pv_name, i_pv.lvm_vg_name, i_pv.forilvgid,"
|
||||
"i_pv.pv_type from i_pv where forihostid = %s",
|
||||
(controller_id,))
|
||||
pvs = cur.fetchall()
|
||||
|
||||
# Obtain the rootfs disk. This is for handling the case when
|
||||
# rootfs is not on /dev/sda.
|
||||
controller_rootfs_disk = next((
|
||||
d for d in disks
|
||||
if (d.get('device_path') == controller_rootfs or
|
||||
controller_rootfs in d.get('device_node'))), None)
|
||||
LOG.info("controller_rootfs_disk: %s" % controller_rootfs_disk)
|
||||
|
||||
create_db_partition_entries(cur, disks)
|
||||
|
||||
# Get the PVs for the controller.
|
||||
cur.execute(
|
||||
"select partition.id, partition.uuid, "
|
||||
"partition.status, partition.device_node, "
|
||||
"partition.device_path, partition.size_mib,"
|
||||
"partition.idisk_uuid, partition.foripvid "
|
||||
"from partition where forihostid = %s",
|
||||
(controller_id,))
|
||||
partitions = cur.fetchall()
|
||||
|
||||
update_partition_pv(cur, pvs, partitions, disks)
|
||||
|
||||
# If this is not an AIO setup, we must return, as we already have
|
||||
# all the needed information.
|
||||
if utils.get_system_type() != constants.TIS_AIO_BUILD:
|
||||
LOG.info("This is not an AIO setup, nothing to do here.")
|
||||
return
|
||||
|
||||
# Get the PVs for cgts-vg from the root fs disk, present in the DB.
|
||||
# This list can have max 2 elements.
|
||||
cgts_vg_pvs = [pv for pv in pvs
|
||||
if pv['lvm_vg_name'] == constants.LVG_CGTS_VG and
|
||||
(controller_rootfs_disk['device_path'] in
|
||||
pv['disk_or_part_device_path'])]
|
||||
|
||||
LOG.info("cgts-vg pvs: %s" % str(cgts_vg_pvs))
|
||||
|
||||
# Build the PV name of the initial PV for cgts-vg.
|
||||
R5_cgts_pv_1_name = build_partition_device_node(
|
||||
controller_rootfs_disk['device_node'],
|
||||
uefi_cgts_pv_1_partition_number)
|
||||
|
||||
# Get the initial PV of cgts-vg. If it's not present with the
|
||||
# provided name, then we're probably on a BIOS setup.
|
||||
R5_cgts_pv_1 = next((
|
||||
pv for pv in cgts_vg_pvs
|
||||
if pv['lvm_pv_name'] == R5_cgts_pv_1_name), None)
|
||||
|
||||
# Get the device used by R5_cgts_pv_1.
|
||||
R5_cgts_pv_1_part = next((
|
||||
p for p in partitions
|
||||
if p['device_node'] == R5_cgts_pv_1_name),
|
||||
None)
|
||||
|
||||
# On an R4 AIO installed with BIOS, we won't have 6 partitions
|
||||
# right after install, but only 4.
|
||||
# R4 PV /dev/sda5 thus should become PV /dev/sda4 in R5.
|
||||
if not R5_cgts_pv_1:
|
||||
LOG.info("Probably bios here, we need to update the DB for "
|
||||
"cgts-vg partitions and pv")
|
||||
R4_cgts_pv_1_name = build_partition_device_node(
|
||||
controller_rootfs_disk['device_node'],
|
||||
bios_cgts_pv_1_partition_number)
|
||||
R5_cgts_pv_1 = next((
|
||||
pv for pv in pvs
|
||||
if pv['lvm_pv_name'] == R4_cgts_pv_1_name),
|
||||
None)
|
||||
|
||||
cur.execute(
|
||||
"update partition set foripvid=%s, status=%s "
|
||||
"where device_path=%s and forihostid=%s",
|
||||
(R5_cgts_pv_1.get('id'), constants.PARTITION_IN_USE_STATUS,
|
||||
R5_cgts_pv_1_part['device_path'], controller_id))
|
||||
|
||||
update_db_pv(cur, R5_cgts_pv_1_part['device_path'],
|
||||
R5_cgts_pv_1_part['device_node'],
|
||||
R5_cgts_pv_1_part['uuid'],
|
||||
R5_cgts_pv_1_part['device_node'],
|
||||
R5_cgts_pv_1.get('id'))
|
||||
|
||||
cgts_vg_pvs.remove(R5_cgts_pv_1)
|
||||
|
||||
# There is a high chance that the current R5 /dev/sda4 partition is
|
||||
# too small for the R4 cgts-vg. In this case, we need to create
|
||||
# an extra partition & PV for cgts-vg.
|
||||
part_number = 5
|
||||
|
||||
extra_cgts_part_size = math.ceil(
|
||||
float(R5_cgts_pv_1.get('lvm_pv_size')) / (1024 ** 2) -
|
||||
R5_cgts_pv_1_part.get('size_mib'))
|
||||
if extra_cgts_part_size > 0:
|
||||
LOG.info("/dev/sda4 is not enough for R4 cgts-vg")
|
||||
cgts_vg_extend(cur, controller_rootfs_disk, R5_cgts_pv_1_part,
|
||||
R5_cgts_pv_1,
|
||||
part_number, extra_cgts_part_size)
|
||||
part_number = part_number + 1
|
||||
else:
|
||||
extra_cgts_part_size = 0
|
||||
|
||||
# If the remaining space was used by either nova-local or cgts-vg,
|
||||
# then the R4 partition must be specifically created.
|
||||
if cgts_vg_pvs:
|
||||
last_rootfs_pv = cgts_vg_pvs[0]
|
||||
LOG.info("Extra rootfs disk space used by cgts-vg")
|
||||
else:
|
||||
# Get the nova-local PV from the rootfs disk.
|
||||
last_rootfs_pv = next((
|
||||
pv for pv in pvs
|
||||
if (pv['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
|
||||
controller_rootfs_disk['device_node'] in
|
||||
pv['lvm_pv_name'])),
|
||||
None)
|
||||
|
||||
if last_rootfs_pv:
|
||||
LOG.info("Extra rootfs disk space used by nova-local")
|
||||
|
||||
# If the remaining space is not used, return.
|
||||
if not last_rootfs_pv:
|
||||
LOG.info("Extra rootfs disk space not used, return")
|
||||
return
|
||||
|
||||
# Create the partition DB entry and update the associated
|
||||
# physical volume.
|
||||
disk_available_mib = get_disk_available_mib(
|
||||
controller_rootfs_disk['device_node']) - extra_cgts_part_size
|
||||
LOG.info("Available mib: %s" % disk_available_mib)
|
||||
|
||||
part_size = disk_available_mib
|
||||
part_device_node = '{}{}'.format(
|
||||
controller_rootfs_disk.get('device_node'),
|
||||
part_number)
|
||||
part_device_path = '{}-part{}'.format(
|
||||
controller_rootfs_disk.get('device_path'),
|
||||
part_number)
|
||||
|
||||
LOG.info("Partition size: %s part device node: %s "
|
||||
"part device path: %s" %
|
||||
(part_size, part_device_node, part_device_path))
|
||||
|
||||
part_uuid = uuidutils.generate_uuid()
|
||||
|
||||
new_partition = Partition_Tuple(
|
||||
uuid=part_uuid,
|
||||
idisk_id=controller_rootfs_disk.get('id'),
|
||||
idisk_uuid=controller_rootfs_disk.get('uuid'),
|
||||
size_mib=part_size,
|
||||
device_node=part_device_node,
|
||||
device_path=part_device_path,
|
||||
status=constants.PARTITION_CREATE_ON_UNLOCK_STATUS,
|
||||
type_guid=constants.USER_PARTITION_PHYSICAL_VOLUME,
|
||||
forihostid=controller_id,
|
||||
foripvid=last_rootfs_pv.get('id'),
|
||||
start_mib=None,
|
||||
end_mib=None)
|
||||
|
||||
create_partition(cur, new_partition)
|
||||
|
||||
update_db_pv(cur, part_device_path, part_device_node,
|
||||
part_uuid, part_device_node, last_rootfs_pv.get('id'))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,411 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will migrate away from using vlan-tagged subnets,
|
||||
# to using separate networks with their compute ports trunked
|
||||
# from the network the vlan-tagged subnet was on.
|
||||
# Once all of the compute nodes are updates, the old vlan-tagged
|
||||
# subnets, as well as all of the ports on them, will be deleted.
|
||||
import os
|
||||
import psycopg2
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
migrate_vlan()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
if from_release == "17.06" and action == "activate":
|
||||
try:
|
||||
cleanup_neutron_vlan_subnets()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def run_cmd(cur, cmd):
|
||||
cur.execute(cmd)
|
||||
|
||||
|
||||
def run_cmd_postgres(sub_cmd):
|
||||
"""
|
||||
This executes the given command as user postgres. This is necessary when
|
||||
this script is run as root, which is the case on an upgrade activation.
|
||||
"""
|
||||
error_output = open(os.devnull, 'w')
|
||||
cmd = ("sudo -u postgres psql -d neutron -c \"%s\"" % sub_cmd)
|
||||
LOG.info("Executing '%s'" % cmd)
|
||||
subprocess.check_call([cmd], shell=True, stderr=error_output)
|
||||
|
||||
|
||||
def migrate_vlan():
|
||||
conn = psycopg2.connect("dbname=neutron user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
create_new_networks(cur)
|
||||
|
||||
|
||||
def cleanup_neutron_vlan_subnets():
|
||||
"""
|
||||
This function cleans up data leftover from migrating away from using
|
||||
vlan-tagged subnets. Specifically, it deletes all non-compute ports
|
||||
on vlan-tagged subnets, as well as all vlan-tagged subnets.
|
||||
"""
|
||||
cmd = ("DELETE FROM ports WHERE id in"
|
||||
" (SELECT port_id FROM ipallocations AS ipa"
|
||||
" JOIN subnets AS s ON ipa.subnet_id = s.id"
|
||||
" where s.vlan_id!=0)"
|
||||
" AND device_owner not like 'compute:%';")
|
||||
run_cmd_postgres(cmd)
|
||||
|
||||
cmd = "DELETE FROM subnets WHERE vlan_id != 0;"
|
||||
run_cmd_postgres(cmd)
|
||||
|
||||
|
||||
def create_new_networks(cur):
|
||||
"""
|
||||
This function creates new networks for each network segment belonging to
|
||||
a vlan-tagged subnet, and clones those subnets minus the vlan ID.
|
||||
For each of those cloned subnets, it also clones all of the ports on them,
|
||||
as well as all of the IP allocations, and the bindings
|
||||
"""
|
||||
cmd = ("SELECT s.vlan_id, s.network_id, m2ss.network_type,"
|
||||
" m2ss.physical_network, m2ss.segmentation_id FROM subnets AS s"
|
||||
" JOIN ml2_subnet_segments AS m2ss ON s.id = m2ss.subnet_id"
|
||||
" WHERE s.vlan_id != 0 GROUP BY s.vlan_id, s.network_id,"
|
||||
" m2ss.network_type, m2ss.physical_network, m2ss.segmentation_id;")
|
||||
run_cmd(cur, cmd)
|
||||
networks_to_create = []
|
||||
while True:
|
||||
network = cur.fetchone()
|
||||
if network is None:
|
||||
break
|
||||
networks_to_create.append(network)
|
||||
|
||||
for network in networks_to_create:
|
||||
create_and_populate_network(cur, network)
|
||||
|
||||
|
||||
def create_standard_attribute(cur, name):
|
||||
"""
|
||||
This function creates new standard attribute entries to be used by copied
|
||||
data.
|
||||
"""
|
||||
cmd = ("INSERT INTO standardattributes (resource_type)"
|
||||
" VALUES ('%s') RETURNING id") %\
|
||||
(name,)
|
||||
run_cmd(cur, cmd)
|
||||
return cur.fetchone()['id']
|
||||
|
||||
|
||||
def create_and_populate_network(cur, network):
|
||||
"""
|
||||
This function takes a network segment, and copies all the data on that
|
||||
network segment to a newly-created network. For each compute port on the
|
||||
original network, a port trunk should be created from the original port
|
||||
as a parent, to the new port as a subport. This relaces the vlan id being
|
||||
set on an individual subnet.
|
||||
"""
|
||||
vlan_id = network['vlan_id']
|
||||
network_type = network['network_type']
|
||||
old_network_id = network['network_id']
|
||||
# This new network ID should be the same as neutron passes to vswitch for
|
||||
# the network-uuid of the network segment for the vlan-tagged subnet.
|
||||
network_suffix = "vlan%s" % vlan_id
|
||||
new_network_id = uuid.uuid5(uuid.UUID(old_network_id), network_suffix)
|
||||
new_networksegment_id = uuid.uuid4()
|
||||
cmd = ("INSERT INTO networks (project_id, id, name, status,"
|
||||
"admin_state_up, vlan_transparent, standard_attr_id,"
|
||||
" availability_zone_hints)"
|
||||
" (SELECT project_id, '%s',"
|
||||
" CONCAT_WS('-VLAN%d', NULLIF(name,''), ''), status,"
|
||||
" admin_state_up, vlan_transparent, '%s', availability_zone_hints"
|
||||
" FROM networks WHERE id = '%s') RETURNING id;") %\
|
||||
(new_network_id, vlan_id,
|
||||
create_standard_attribute(cur, 'networks'), old_network_id)
|
||||
run_cmd(cur, cmd)
|
||||
old_network_id = network['network_id']
|
||||
new_network_id = cur.fetchone()['id']
|
||||
|
||||
cmd = ("INSERT INTO networksegments (id, network_id, network_type,"
|
||||
" physical_network, segmentation_id, is_dynamic, segment_index,"
|
||||
" standard_attr_id, name)"
|
||||
" VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s')") %\
|
||||
(new_networksegment_id, new_network_id, network_type,
|
||||
network['physical_network'], network['segmentation_id'],
|
||||
'f', '0', create_standard_attribute(cur, 'networksegments'), '')
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
# Get a list of vlan-tagged subnets on the network we are copying.
|
||||
# For each of these subnets, we loop through and copy them, and then loop
|
||||
# through the ip allocations on them and copy those ip allocations, along
|
||||
# with the ports that are in those ip allocations.
|
||||
sub_cmd = ("SELECT id FROM subnets"
|
||||
" WHERE vlan_id = '%s' AND network_id='%s'") %\
|
||||
(vlan_id, old_network_id)
|
||||
|
||||
# Copy the subnets to the new network
|
||||
run_cmd(cur, sub_cmd)
|
||||
subnets = cur.fetchall()
|
||||
subnet_copies = {}
|
||||
for subnet in subnets:
|
||||
old_subnet_id = subnet['id']
|
||||
new_subnet_id = uuid.uuid4()
|
||||
new_ml2_subnet_segment_id = uuid.uuid4()
|
||||
subnet_copies[old_subnet_id] = new_subnet_id
|
||||
cmd = ("INSERT INTO subnets"
|
||||
" (project_id, id, name, network_id, ip_version, cidr,"
|
||||
" gateway_ip, enable_dhcp, ipv6_ra_mode, ipv6_address_mode,"
|
||||
" subnetpool_id, vlan_id, standard_attr_id, segment_id)"
|
||||
" (SELECT project_id, '%s', name, '%s', ip_version, cidr,"
|
||||
" gateway_ip, enable_dhcp, ipv6_ra_mode, ipv6_address_mode,"
|
||||
" subnetpool_id, 0, '%s', segment_id"
|
||||
" FROM subnets WHERE id='%s')") %\
|
||||
(new_subnet_id, new_network_id,
|
||||
create_standard_attribute(cur, 'subnets'), old_subnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
cmd = ("INSERT INTO ml2_subnet_segments"
|
||||
" (id, subnet_id, network_type, physical_network,"
|
||||
" segmentation_id, is_dynamic, segment_index)"
|
||||
" (SELECT '%s', '%s', network_type, physical_network,"
|
||||
" segmentation_id, is_dynamic, segment_index"
|
||||
" FROM ml2_subnet_segments WHERE subnet_id='%s')") %\
|
||||
(new_ml2_subnet_segment_id, new_subnet_id, old_subnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
duplicate_ipam_subnets(cur, old_subnet_id, new_subnet_id)
|
||||
duplicate_ipallocationpools(cur, old_subnet_id, new_subnet_id)
|
||||
|
||||
# Copy the ports that are related to vlan subnets such that those new
|
||||
# ports are directly attached to the network that was created to replace
|
||||
# the vlan subnet. We ignore DHCP ports because since both the vlan
|
||||
# subnet and the new network will share the same provider network we do
|
||||
# not want 2 ports with the same IP to exist simultaneously. Instead,
|
||||
# we let the DHCP server allocate this port when it notices that it is
|
||||
# missing which will result in a new IP allocation and should not
|
||||
# interfere with any existing allocations because they have all been
|
||||
# cloned onto the new network.
|
||||
cmd = ("SELECT DISTINCT port_id FROM ipallocations"
|
||||
" LEFT JOIN ports AS p ON p.id = ipallocations.port_id"
|
||||
" WHERE p.device_owner != 'network:dhcp'"
|
||||
" AND subnet_id IN (%s)") % sub_cmd
|
||||
run_cmd(cur, cmd)
|
||||
ports_to_copy = cur.fetchall()
|
||||
port_copies = {}
|
||||
for port in ports_to_copy:
|
||||
old_port_id = port['port_id']
|
||||
new_port_id = uuid.uuid4()
|
||||
port_copies[old_port_id] = new_port_id
|
||||
cmd = ("INSERT INTO ports (project_id, id, name, network_id,"
|
||||
" mac_address, admin_state_up, status, device_id, device_owner,"
|
||||
" standard_attr_id, ip_allocation)"
|
||||
" (SELECT project_id, '%s',"
|
||||
" CONCAT_WS('-VLAN%d', NULLIF(name,''), ''), '%s',"
|
||||
" mac_address, admin_state_up, status, device_id, device_owner,"
|
||||
"'%s', ip_allocation FROM ports WHERE id = '%s')"
|
||||
" RETURNING id, device_owner") %\
|
||||
(new_port_id, vlan_id, new_network_id,
|
||||
create_standard_attribute(cur, 'ports'), old_port_id)
|
||||
run_cmd(cur, cmd)
|
||||
new_port = cur.fetchone()
|
||||
new_port_owner = new_port['device_owner']
|
||||
cmd = ("INSERT INTO ml2_port_bindings"
|
||||
" (port_id, host, vif_type, vnic_type, profile,"
|
||||
" vif_details, vif_model, mac_filtering, mtu)"
|
||||
" (SELECT '%s', host, vif_type, vnic_type, profile,"
|
||||
" vif_details, vif_model, mac_filtering, mtu"
|
||||
" FROM ml2_port_bindings where port_id='%s')") %\
|
||||
(new_port_id, old_port_id)
|
||||
run_cmd(cur, cmd)
|
||||
cmd = ("INSERT INTO ml2_port_binding_levels"
|
||||
" (port_id, host, level, driver, segment_id)"
|
||||
" (SELECT '%s', host, level, driver, '%s'"
|
||||
" FROM ml2_port_binding_levels WHERE port_id='%s')") %\
|
||||
(new_port_id, new_networksegment_id, old_port_id)
|
||||
run_cmd(cur, cmd)
|
||||
if new_port_owner.startswith('compute:'):
|
||||
trunk_id = create_port_trunk(cur, old_port_id)
|
||||
create_subport(cur, trunk_id, new_port_id, 'vlan', vlan_id)
|
||||
elif new_port_owner.startswith('network:router'):
|
||||
cmd = ("INSERT INTO routerports (router_id, port_id, port_type)"
|
||||
" (SELECT router_id, '%s', port_type FROM routerports"
|
||||
" WHERE port_id = '%s')") %\
|
||||
(new_port_id, old_port_id)
|
||||
run_cmd(cur, cmd)
|
||||
elif new_port_owner == 'network:dhcp':
|
||||
# Set new port's device_id to DEVICE_ID_RESERVED_DHCP_PORT,
|
||||
# so that it is used by dhcp agent for new subnet.
|
||||
cmd = ("UPDATE ports SET device_id='reserved_dhcp_port'"
|
||||
" WHERE id='%s'") %\
|
||||
(new_port_id,)
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
# Copy the ipallocations
|
||||
cmd = ("SELECT * FROM ipallocations WHERE network_id='%s'") %\
|
||||
(old_network_id)
|
||||
run_cmd(cur, cmd)
|
||||
ipallocations = cur.fetchall()
|
||||
for ipallocation in ipallocations:
|
||||
old_ip_address = ipallocation['ip_address']
|
||||
old_port_id = ipallocation['port_id']
|
||||
old_subnet_id = ipallocation['subnet_id']
|
||||
new_port_id = port_copies.get(old_port_id)
|
||||
new_subnet_id = subnet_copies.get(old_subnet_id)
|
||||
if not new_port_id or not new_subnet_id:
|
||||
continue
|
||||
cmd = ("INSERT INTO ipallocations"
|
||||
" (port_id, ip_address, subnet_id, network_id)"
|
||||
" VALUES ('%s', '%s', '%s', '%s')") %\
|
||||
(new_port_id, old_ip_address, new_subnet_id, new_network_id)
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
# Copy the DHCP network agent bindings so that the new networks are
|
||||
# initial scheduled to the same agents as the vlan subnets they are
|
||||
# replacing. The alternative is that all new networks are initially
|
||||
# unscheduled and they may all get scheduled to the same agent when any
|
||||
# of the agents query for new networks to service.
|
||||
cmd = ("SELECT * FROM networkdhcpagentbindings WHERE network_id='%s'" %
|
||||
old_network_id)
|
||||
run_cmd(cur, cmd)
|
||||
bindings = cur.fetchall()
|
||||
for binding in bindings:
|
||||
agent_id = binding['dhcp_agent_id']
|
||||
cmd = ("INSERT INTO networkdhcpagentbindings"
|
||||
" (network_id, dhcp_agent_id)"
|
||||
" VALUES ('%s', '%s')" %
|
||||
(new_network_id, agent_id))
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
|
||||
def duplicate_ipam_subnets(cur, old_neutron_subnet_id, new_neutron_subnet_id):
|
||||
cmd = ("SELECT id from ipamsubnets WHERE neutron_subnet_id='%s'") %\
|
||||
(old_neutron_subnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
ipamsubnets = cur.fetchall()
|
||||
for ipamsubnet in ipamsubnets:
|
||||
old_ipamsubnet_id = ipamsubnet['id']
|
||||
new_ipamsubnet_id = uuid.uuid4()
|
||||
cmd = ("INSERT INTO ipamsubnets (id, neutron_subnet_id)"
|
||||
" VALUES ('%s', '%s')") %\
|
||||
(new_ipamsubnet_id, new_neutron_subnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
cmd = ("SELECT * from ipamallocationpools"
|
||||
" WHERE ipam_subnet_id='%s'") %\
|
||||
(old_ipamsubnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
ipamallocationpools = cur.fetchall()
|
||||
for ipamallocationpool in ipamallocationpools:
|
||||
new_ipamallocationpool_id = uuid.uuid4()
|
||||
first_ip = ipamallocationpool['first_ip']
|
||||
last_ip = ipamallocationpool['last_ip']
|
||||
cmd = ("INSERT INTO ipamallocationpools"
|
||||
" (id, ipam_subnet_id, first_ip, last_ip)"
|
||||
" VALUES ('%s', '%s', '%s', '%s')") %\
|
||||
(new_ipamallocationpool_id, new_ipamsubnet_id,
|
||||
first_ip, last_ip)
|
||||
run_cmd(cur, cmd)
|
||||
cmd = ("INSERT INTO ipamallocations"
|
||||
" (ip_address, status, ipam_subnet_id)"
|
||||
" (SELECT ip_address, status, '%s' FROM ipamallocations"
|
||||
" WHERE ipam_subnet_id='%s')") %\
|
||||
(new_ipamsubnet_id, old_ipamsubnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
|
||||
def duplicate_ipallocationpools(cur, old_subnet_id, new_subnet_id):
|
||||
cmd = ("SELECT * from ipallocationpools WHERE subnet_id='%s'") %\
|
||||
(old_subnet_id)
|
||||
run_cmd(cur, cmd)
|
||||
ipallocationpools = cur.fetchall()
|
||||
for ipallocationpool in ipallocationpools:
|
||||
new_ipallocationpool_id = uuid.uuid4()
|
||||
first_ip = ipallocationpool['first_ip']
|
||||
last_ip = ipallocationpool['last_ip']
|
||||
cmd = ("INSERT INTO ipallocationpools"
|
||||
" (id, subnet_id, first_ip, last_ip)"
|
||||
" VALUES ('%s', '%s', '%s', '%s')") %\
|
||||
(new_ipallocationpool_id, new_subnet_id,
|
||||
first_ip, last_ip)
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
|
||||
def create_port_trunk(cur, port_id):
|
||||
"""
|
||||
This function will create a trunk off of a given port if there doesn't
|
||||
already exist a trunk off of that port. This port should be a compute
|
||||
port, where this is to replace a vlan-tagged subnet on that port.
|
||||
"""
|
||||
# create trunk if not exists
|
||||
cmd = ("SELECT id FROM trunks WHERE port_id = '%s'") %\
|
||||
(port_id)
|
||||
run_cmd(cur, cmd)
|
||||
trunk = cur.fetchone()
|
||||
if trunk:
|
||||
return trunk['id']
|
||||
|
||||
cmd = ("INSERT INTO trunks (admin_state_up, project_id, id, name, port_id,"
|
||||
" status, standard_attr_id)"
|
||||
" (SELECT admin_state_up, project_id, '%s', name, id, status, '%s'"
|
||||
" FROM ports WHERE id = '%s') RETURNING id") %\
|
||||
(uuid.uuid4(), create_standard_attribute(cur, 'trunks'), port_id)
|
||||
run_cmd(cur, cmd)
|
||||
trunk = cur.fetchone()
|
||||
return trunk['id']
|
||||
|
||||
|
||||
def create_subport(cur, trunk_id, subport_id, segmentation_type,
|
||||
segmentation_id):
|
||||
"""
|
||||
Create a subport off of a given network trunk.
|
||||
The segmentation_id should be the vlan id as visible to the guest,
|
||||
not the segmentation id of the network segment.
|
||||
"""
|
||||
cmd = ("INSERT INTO subports"
|
||||
" (port_id, trunk_id, segmentation_type, segmentation_id)"
|
||||
" VALUES ('%s', '%s','%s','%s')") %\
|
||||
(subport_id, trunk_id, segmentation_type, segmentation_id)
|
||||
run_cmd(cur, cmd)
|
||||
cmd = ("UPDATE ports SET device_id='', device_owner='trunk:subport'"
|
||||
" WHERE id='%s'") % subport_id
|
||||
run_cmd(cur, cmd)
|
||||
vif_details = '{\"port_filter\": true, \"vhostuser_enabled\": false}'
|
||||
cmd = ("UPDATE ml2_port_bindings SET vif_model='',vif_details='%s'"
|
||||
" WHERE port_id='%s'" % (vif_details, subport_id))
|
||||
run_cmd(cur, cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will update the storage backends for controller-1.
|
||||
|
||||
import psycopg2
|
||||
import sys
|
||||
import json
|
||||
|
||||
from sysinv.openstack.common import uuidutils
|
||||
from sysinv.common import constants
|
||||
from psycopg2.extras import RealDictCursor
|
||||
from controllerconfig.common import log
|
||||
from controllerconfig.upgrades import utils
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
CINDER_BACKEND = None
|
||||
CONFIG_CINDER_LVM_TYPE = "CONFIG_CINDER_LVM_TYPE"
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
set_backends(from_release)
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def update_capabilities(cur):
|
||||
# Update i_idisk capabilities.
|
||||
cur.execute("select i_idisk.forihostid, i_idisk.uuid, "
|
||||
"i_idisk.device_node, i_idisk.device_path, "
|
||||
"i_idisk.id, i_idisk.capabilities from i_idisk")
|
||||
|
||||
disks = cur.fetchall()
|
||||
for d in disks:
|
||||
d_json_dict = json.loads(d['capabilities'])
|
||||
if constants.IDISK_DEV_FUNCTION in d_json_dict:
|
||||
del d_json_dict[constants.IDISK_DEV_FUNCTION]
|
||||
d_new_capab = json.dumps(d_json_dict)
|
||||
|
||||
try:
|
||||
cur.execute(
|
||||
"update i_idisk set capabilities=%s "
|
||||
"where id=%s",
|
||||
(d_new_capab, d['id']))
|
||||
except Exception as e:
|
||||
LOG.exception("Error: %s" % str(e))
|
||||
raise
|
||||
|
||||
# Update i_system capabilities.
|
||||
cur.execute("select i_system.id, i_system.capabilities "
|
||||
"from i_system")
|
||||
systems = cur.fetchall()
|
||||
for s in systems:
|
||||
s_json_dict = json.loads(s['capabilities'])
|
||||
if 'cinder_backend' in s_json_dict:
|
||||
del s_json_dict['cinder_backend']
|
||||
s_new_capab = json.dumps(s_json_dict)
|
||||
cur.execute(
|
||||
"update i_system set capabilities=%s "
|
||||
"where id=%s",
|
||||
(s_new_capab, s['id']))
|
||||
|
||||
|
||||
def update_stors(cur):
|
||||
# Get the stors
|
||||
cur.execute("select i_istor.id, i_istor.idisk_uuid, "
|
||||
"i_istor.function, i_istor.forihostid "
|
||||
"from i_istor ")
|
||||
stors = cur.fetchall()
|
||||
|
||||
for stor in stors:
|
||||
if stor['function'] == constants.STOR_FUNCTION_CINDER:
|
||||
# remove cinder stors
|
||||
try:
|
||||
cur.execute(
|
||||
"update i_idisk set foristorid=null where uuid=%s",
|
||||
(stor['idisk_uuid'],))
|
||||
cur.execute(
|
||||
"delete from i_istor where id=%s",
|
||||
(stor['id'],))
|
||||
except Exception as e:
|
||||
LOG.exception("Error: %s" % str(e))
|
||||
raise
|
||||
elif stor['function'] == constants.STOR_FUNCTION_OSD:
|
||||
# link OSDs to the primary storage tier
|
||||
try:
|
||||
cur.execute(
|
||||
"update i_istor set fortierid=1 where id=%s",
|
||||
(stor['id'],))
|
||||
except Exception as e:
|
||||
LOG.exception("Error: %s" % str(e))
|
||||
raise
|
||||
|
||||
|
||||
def add_primary_storage_tier(cur):
|
||||
# A cluster and a primary tier are always present even if we don't have
|
||||
# a ceph backend currently enabled. So make sure on upgrade we add the tier
|
||||
# referencing the existing cluster.
|
||||
new_storage_tier_uuid = uuidutils.generate_uuid()
|
||||
try:
|
||||
# Currently only 1 cluster ever defined, id must be 1
|
||||
cur.execute("insert into storage_tiers(uuid, id, name, type, status, "
|
||||
"capabilities, forclusterid) "
|
||||
"values(%s, %s, %s, %s, %s, %s, %s)",
|
||||
(new_storage_tier_uuid, '1',
|
||||
constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH],
|
||||
constants.SB_TIER_TYPE_CEPH,
|
||||
constants.SB_TIER_STATUS_DEFINED,
|
||||
'{}', '1'))
|
||||
except Exception as e:
|
||||
LOG.exception("Error inserting into storage_tiers: %s" % str(e))
|
||||
|
||||
LOG.info("Primary Storage Tier added.")
|
||||
|
||||
|
||||
def update_storage_backends(cur):
|
||||
global CINDER_BACKEND
|
||||
cur.execute("select storage_backend.id, storage_backend.backend, "
|
||||
"storage_backend.state, "
|
||||
"storage_backend.forisystemid, storage_backend.services, "
|
||||
"storage_backend.capabilities from storage_backend")
|
||||
storage_backend = cur.fetchone()
|
||||
LOG.info("storage_backend: %s" % str(storage_backend))
|
||||
if not storage_backend:
|
||||
LOG.exception("No storage backend present, exiting.")
|
||||
raise
|
||||
|
||||
backend = storage_backend['backend']
|
||||
|
||||
if backend == "ceph":
|
||||
CINDER_BACKEND = constants.SB_TYPE_CEPH
|
||||
LOG.info("Ceph backend")
|
||||
cur.execute(
|
||||
"select storage_ceph.id, storage_ceph.object_gateway "
|
||||
"from storage_ceph")
|
||||
storage_ceph = cur.fetchone()
|
||||
if not storage_ceph:
|
||||
LOG.exception("No storage_ceph entry, exiting.")
|
||||
raise
|
||||
|
||||
services = "{0}, {1}".format(constants.SB_SVC_CINDER,
|
||||
constants.SB_SVC_GLANCE)
|
||||
if storage_ceph['object_gateway'] == "t":
|
||||
services = "cinder, glance, swift"
|
||||
LOG.info("Services ran on ceph: %s" % services)
|
||||
|
||||
try:
|
||||
cur.execute(
|
||||
"update storage_backend set state=%s, services=%s, "
|
||||
"capabilities=%s where id=%s",
|
||||
(constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH],
|
||||
constants.SB_STATE_CONFIGURED, services,
|
||||
'{"replication":"2", "min_replication":"1"}',
|
||||
storage_backend['id']))
|
||||
|
||||
cur.execute(
|
||||
"update storage_ceph set tier_id=%s where id=%s",
|
||||
('1', storage_backend['id']))
|
||||
except Exception as e:
|
||||
LOG.exception("Error: %s" % str(e))
|
||||
raise
|
||||
|
||||
elif backend == "lvm":
|
||||
CINDER_BACKEND = constants.SB_TYPE_LVM
|
||||
LOG.info("LVM backend")
|
||||
cur.execute(
|
||||
"update storage_backend set name=%s, state=%s, services=%s, "
|
||||
"capabilities=%s where id=%s",
|
||||
(constants.SB_DEFAULT_NAMES[constants.SB_TYPE_LVM],
|
||||
constants.SB_STATE_CONFIGURED, constants.SB_SVC_CINDER, '{}',
|
||||
storage_backend['id']))
|
||||
else:
|
||||
LOG.info("Other backend present: %s" % backend)
|
||||
return
|
||||
|
||||
new_storage_backend_uuid = uuidutils.generate_uuid()
|
||||
cur.execute(
|
||||
"insert into storage_backend(uuid, name, backend, state, "
|
||||
"forisystemid, services, capabilities) "
|
||||
"values(%s, %s, %s, %s, %s, %s, %s)",
|
||||
(new_storage_backend_uuid,
|
||||
constants.SB_DEFAULT_NAMES[constants.SB_TYPE_FILE],
|
||||
constants.SB_TYPE_FILE, constants.SB_STATE_CONFIGURED,
|
||||
storage_backend['forisystemid'], constants.SB_SVC_GLANCE, '{}'))
|
||||
try:
|
||||
cur.execute(
|
||||
"select storage_backend.id, storage_backend.name, "
|
||||
"storage_backend.backend, storage_backend.state, "
|
||||
"storage_backend.forisystemid, storage_backend.services, "
|
||||
"storage_backend.capabilities from storage_backend where "
|
||||
"services=%s", (constants.SB_SVC_GLANCE,))
|
||||
except Exception as e:
|
||||
LOG.exception("Error selecting the storage backend for glance: %s"
|
||||
% str(e))
|
||||
storage_backend_glance = cur.fetchone()
|
||||
|
||||
try:
|
||||
cur.execute("insert into storage_file(id) values(%s)",
|
||||
(storage_backend_glance['id'],))
|
||||
except Exception as e:
|
||||
LOG.exception("Error inserting into storage file: %s" % str(e))
|
||||
|
||||
LOG.info("Backends updated")
|
||||
|
||||
|
||||
def update_legacy_cache_tier(cur):
|
||||
feature_enabled = constants.SERVICE_PARAM_CEPH_CACHE_TIER_FEATURE_ENABLED
|
||||
cur.execute("select * from service_parameter where service=%s and "
|
||||
"name=%s", (constants.SERVICE_TYPE_CEPH, feature_enabled,))
|
||||
parameters = cur.fetchall()
|
||||
if parameters is None or len(parameters) == 0:
|
||||
LOG.exception("Failed to fetch ceph service_parameter data")
|
||||
raise
|
||||
|
||||
# Make sure that cache tiering is disabled: Not supported but not removed
|
||||
LOG.info("Updating ceph service parameters")
|
||||
cur.execute("update service_parameter set value='false' where "
|
||||
"service=%s and name=%s",
|
||||
(constants.SERVICE_TYPE_CEPH, feature_enabled,))
|
||||
|
||||
|
||||
def update_lvm_type(cur, from_release):
|
||||
lvm_type = None
|
||||
packstack_config = utils.get_packstack_config(from_release)
|
||||
|
||||
try:
|
||||
config_cinder_lvm_type = packstack_config.get(
|
||||
'general', CONFIG_CINDER_LVM_TYPE)
|
||||
except Exception:
|
||||
# For upgrades from R2, this value may be missing
|
||||
# If so we log and use the default value of thin
|
||||
LOG.info("No %s option. Using Default thin." % CONFIG_CINDER_LVM_TYPE)
|
||||
config_cinder_lvm_type = constants.CINDER_LVM_TYPE_THIN
|
||||
|
||||
# Determine the lvm_type from the packstack-answers.txt file.
|
||||
# If this information is missing, just give a warning and continue
|
||||
# with the upgrade since this is not critical.
|
||||
if constants.CINDER_LVM_TYPE_THIN in config_cinder_lvm_type.lower():
|
||||
lvm_type = constants.CINDER_LVM_TYPE_THIN
|
||||
elif constants.CINDER_LVM_TYPE_THICK in config_cinder_lvm_type.lower():
|
||||
lvm_type = constants.CINDER_LVM_TYPE_THICK
|
||||
else:
|
||||
LOG.warning("No %s or %s LVM type" % (constants.CINDER_LVM_TYPE_THIN,
|
||||
constants.CINDER_LVM_TYPE_THICK))
|
||||
|
||||
if not lvm_type:
|
||||
LOG.warning("No %s option" % CONFIG_CINDER_LVM_TYPE)
|
||||
lvm_type = constants.CINDER_LVM_TYPE_THIN
|
||||
|
||||
LOG.info("lvm_type: %s" % lvm_type)
|
||||
capabilities = '{"lvm_type": "%s"}' % lvm_type
|
||||
cur.execute("update i_lvg set capabilities=%s where lvm_vg_name=%s",
|
||||
(capabilities, constants.LVG_CINDER_VOLUMES))
|
||||
|
||||
|
||||
def set_backends(from_release):
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
update_stors(cur)
|
||||
update_capabilities(cur)
|
||||
add_primary_storage_tier(cur)
|
||||
update_storage_backends(cur)
|
||||
if CINDER_BACKEND == constants.SB_TYPE_CEPH:
|
||||
update_legacy_cache_tier(cur)
|
||||
if CINDER_BACKEND == constants.SB_TYPE_LVM:
|
||||
update_lvm_type(cur, from_release)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This migration script converts the sdn_enabled field in the system table
|
||||
# from y/n to True/False
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
LOG.info("performing system migration from release %s to %s with "
|
||||
"action: %s" % (from_release, to_release, action))
|
||||
update_system_capabilities()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def update_system_capabilities():
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select capabilities from i_system WHERE id = 1;")
|
||||
capabilities = cur.fetchone()
|
||||
if capabilities is None:
|
||||
LOG.exception("Failed to fetch i_system data")
|
||||
raise
|
||||
|
||||
fields_str = capabilities.get('capabilities')
|
||||
fields_dict = json.loads(fields_str)
|
||||
|
||||
if fields_dict.get('sdn_enabled') == 'y':
|
||||
new_vals = {'sdn_enabled': True}
|
||||
else:
|
||||
new_vals = {'sdn_enabled': False}
|
||||
fields_dict.update(new_vals)
|
||||
|
||||
new_cap = json.dumps(fields_dict)
|
||||
|
||||
LOG.info("Updating system capabilities %s to %s" %
|
||||
(capabilities, new_cap))
|
||||
upgrade_vals = {'C': new_cap}
|
||||
cur.execute("update i_system set capabilities=%(C)s WHERE id=1",
|
||||
upgrade_vals)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This migration script converts the identity and assignment driver
|
||||
# values in the service parameter table from their fully qualified
|
||||
# paths to a relative path as required by Pike
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
LOG.info("performing system migration from release %s to %s with "
|
||||
"action: %s" % (from_release, to_release, action))
|
||||
update_identity_service_parameters()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def update_identity_service_parameters():
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select * from service_parameter "
|
||||
"where service='identity' and name='driver';")
|
||||
parameters = cur.fetchall()
|
||||
if parameters is None or len(parameters) == 0:
|
||||
LOG.exception(
|
||||
"Failed to fetch identity service_parameter data")
|
||||
raise
|
||||
|
||||
LOG.info("Updating identity service parameters to 'sql'")
|
||||
cur.execute("update service_parameter set value='sql' "
|
||||
"where service='identity' and name='driver';")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This migration script converts the admin URL in the Keystone
|
||||
# service catalog to be equivalent to the internal URL
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
LOG.info("performing keystone migration from release %s to %s "
|
||||
"with action: %s" % (from_release, to_release, action))
|
||||
update_identity_admin_url()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
# We will update for all Regions and not just the primary Region,
|
||||
# otherwise we'd break non-Primary Regions once Primary Region
|
||||
# gets upgraded
|
||||
def update_identity_admin_url():
|
||||
conn = psycopg2.connect("dbname='keystone' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT service_id, url, region_id FROM "
|
||||
"endpoint INNER JOIN service "
|
||||
"ON endpoint.service_id = service.id WHERE "
|
||||
"type='identity' and interface='internal';")
|
||||
records = cur.fetchall()
|
||||
if records is None or len(records) == 0:
|
||||
LOG.exception(
|
||||
"Failed to fetch identity endpoint and servic data")
|
||||
raise
|
||||
for record in records:
|
||||
service_id = record['service_id']
|
||||
internal_url = record['url']
|
||||
region_id = record['region_id']
|
||||
if not service_id or not internal_url or not region_id:
|
||||
LOG.exception(
|
||||
"Fetched an entry %s with essential data missing" %
|
||||
record)
|
||||
raise
|
||||
LOG.info("Updating identity admin URL to '%s' for "
|
||||
"service_id '%s' and region '%s'" %
|
||||
(internal_url, service_id, region_id))
|
||||
cur.execute("UPDATE endpoint SET url='%s' "
|
||||
"WHERE interface='admin' and service_id='%s' "
|
||||
"and region_id='%s' ;" %
|
||||
(internal_url, service_id, region_id))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2016-2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Migrates ceilometer pipeline file.
|
||||
|
||||
. /usr/bin/tsconfig
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
# The migration scripts are passed these parameters:
|
||||
FROM_RELEASE=$1
|
||||
TO_RELEASE=$2
|
||||
ACTION=$3
|
||||
|
||||
# This will log to /var/log/platform.log
|
||||
function log {
|
||||
logger -p local1.info $1
|
||||
}
|
||||
|
||||
OLD_PIPELINE_FILE="${CGCS_PATH}/ceilometer/${FROM_RELEASE}/pipeline.yaml"
|
||||
NEW_PIPELINE_DIR="${CGCS_PATH}/ceilometer/${TO_RELEASE}"
|
||||
NEW_PIPELINE_FILE="${NEW_PIPELINE_DIR}/pipeline.yaml"
|
||||
PIPELINE_SOURCE_FILE=/etc/ceilometer/controller.yaml
|
||||
|
||||
function do_escape {
|
||||
local val=$1
|
||||
local val_escaped="${val//\//\\/}"
|
||||
val_escaped="${val_escaped//\&/\\&}"
|
||||
echo $val_escaped
|
||||
}
|
||||
|
||||
if [ "$ACTION" == "migrate" ]
|
||||
then
|
||||
log "Creating new $NEW_PIPELINE_FILE file for release $TO_RELEASE"
|
||||
if [ ! -d "$NEW_PIPELINE_DIR" ]
|
||||
then
|
||||
mkdir $NEW_PIPELINE_DIR
|
||||
fi
|
||||
cp $PIPELINE_SOURCE_FILE $NEW_PIPELINE_FILE
|
||||
|
||||
# Currently, the user can only modify the vswitch.csv and pm.csv paths.
|
||||
default_value=$(do_escape "$(awk '/vswitch.csv/ {print $0}' $NEW_PIPELINE_FILE)")
|
||||
custom_value=$(do_escape "$(awk '/vswitch.csv/ {print $0}' $OLD_PIPELINE_FILE)")
|
||||
sed -i "s/$default_value/$custom_value/" $NEW_PIPELINE_FILE
|
||||
|
||||
default_value=$(do_escape "$(awk '/pm.csv/ {print $0}' $NEW_PIPELINE_FILE)")
|
||||
custom_value=$(do_escape "$(awk '/pm.csv/ {print $0}' $OLD_PIPELINE_FILE)")
|
||||
sed -i "s/$default_value/$custom_value/" $NEW_PIPELINE_FILE
|
||||
|
||||
chmod 640 $NEW_PIPELINE_FILE
|
||||
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -0,0 +1,197 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This migration script converts the sdn_enabled field in the system table
|
||||
# from y/n to True/False
|
||||
|
||||
import json
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
import psycopg2
|
||||
from netaddr import IPNetwork
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor, DictCursor
|
||||
from controllerconfig.upgrades import utils
|
||||
from sysinv.common import constants
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None # noqa
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == "17.06" and action == "migrate":
|
||||
try:
|
||||
LOG.info("Performing system migration from release %s to %s with "
|
||||
"action: %s" % (from_release, to_release, action))
|
||||
packstack_config = utils.get_packstack_config(from_release)
|
||||
config_region = packstack_config.get('general', 'CONFIG_REGION')
|
||||
if config_region == 'y':
|
||||
region_name = packstack_config.get('general',
|
||||
'CONFIG_REGION_2_NAME')
|
||||
else:
|
||||
region_name = packstack_config.get('general',
|
||||
'CONFIG_KEYSTONE_REGION')
|
||||
project_name = packstack_config.get('general',
|
||||
'CONFIG_SERVICE_TENANT_NAME')
|
||||
multicast_subnet = IPNetwork(packstack_config.get(
|
||||
'general', 'CONFIG_MULTICAST_MGMT_SUBNET'))
|
||||
pxeboot_subnet = IPNetwork(packstack_config.get(
|
||||
'general', 'CONFIG_PLATFORM_PXEBOOT_SUBNET'))
|
||||
mtu = packstack_config.get('general', 'CONFIG_PLATFORM_MGMT_MTU')
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
update_system_table(conn, region_name, project_name)
|
||||
populate_multicast_address_records(conn, multicast_subnet, mtu)
|
||||
populate_pxeboot_address_records(conn, pxeboot_subnet, mtu)
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print ex
|
||||
return 1
|
||||
|
||||
|
||||
def update_system_table(conn, region_name, project_name):
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("select capabilities from i_system WHERE id = 1;")
|
||||
capabilities = cur.fetchone()
|
||||
if capabilities is None:
|
||||
LOG.exception("Failed to fetch i_system data")
|
||||
raise
|
||||
|
||||
fields_str = capabilities.get('capabilities')
|
||||
fields_dict = json.loads(fields_str)
|
||||
|
||||
if fields_dict.get('region_config') == 'True':
|
||||
new_vals = {'region_config': True}
|
||||
else:
|
||||
new_vals = {'region_config': False}
|
||||
fields_dict.update(new_vals)
|
||||
|
||||
new_cap = json.dumps(fields_dict)
|
||||
|
||||
LOG.info("Updating system capabilities %s to %s"
|
||||
% (capabilities, new_cap))
|
||||
cur.execute("update i_system set capabilities=%s, "
|
||||
"region_name=%s, service_project_name=%s WHERE id=1",
|
||||
(new_cap, region_name, project_name))
|
||||
|
||||
|
||||
def populate_multicast_address_records(conn, multicast_subnet, mtu):
|
||||
pool_name = 'multicast-subnet'
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute('insert into address_pools(uuid,name,family,network,'
|
||||
'prefix,"order") VALUES(%s, %s, %s, %s, %s, %s)',
|
||||
(str(uuid.uuid4()), pool_name, multicast_subnet.version,
|
||||
str(multicast_subnet.network), multicast_subnet.prefixlen,
|
||||
'random'))
|
||||
cur.execute("select id from address_pools WHERE name=%s;",
|
||||
(pool_name,))
|
||||
pool_row = cur.fetchone()
|
||||
if pool_row is None:
|
||||
LOG.exception("Failed to fetch pool id for %s", pool_name)
|
||||
raise
|
||||
|
||||
pool_id = pool_row['id']
|
||||
cur.execute('insert into address_pool_ranges(address_pool_id,uuid,'
|
||||
'start,"end") VALUES(%s, %s, %s, %s)',
|
||||
(pool_id, str(uuid.uuid4()),
|
||||
str(multicast_subnet[1]),
|
||||
str(multicast_subnet[-2])))
|
||||
cur.execute("insert into networks(id, address_pool_id, uuid,"
|
||||
"type, mtu, dynamic) values(%s, %s, %s, %s, %s, False)",
|
||||
(pool_id, pool_id, str(uuid.uuid4()),
|
||||
constants.NETWORK_TYPE_MULTICAST, mtu))
|
||||
addresses = {
|
||||
constants.SM_MULTICAST_MGMT_IP_NAME:
|
||||
str(multicast_subnet[1]),
|
||||
constants.MTCE_MULTICAST_MGMT_IP_NAME:
|
||||
str(multicast_subnet[2]),
|
||||
constants.PATCH_CONTROLLER_MULTICAST_MGMT_IP_NAME:
|
||||
str(multicast_subnet[3]),
|
||||
constants.PATCH_AGENT_MULTICAST_MGMT_IP_NAME:
|
||||
str(multicast_subnet[4]),
|
||||
}
|
||||
for name, address in addresses.iteritems():
|
||||
address_name = "%s-%s" % (name, constants.NETWORK_TYPE_MULTICAST)
|
||||
cur.execute("insert into addresses(uuid, address_pool_id, address,"
|
||||
"prefix, name, family, enable_dad) values(%s, %s, %s,"
|
||||
"%s, %s, %s, False)",
|
||||
(str(uuid.uuid4()), pool_id, str(address),
|
||||
multicast_subnet.prefixlen, address_name,
|
||||
|
||||
multicast_subnet.version))
|
||||
|
||||
|
||||
def populate_pxeboot_address_records(conn, pxeboot_subnet, mtu):
|
||||
pool_name = 'pxeboot'
|
||||
with conn.cursor(cursor_factory=DictCursor) as cur:
|
||||
cur.execute('select id from address_pools where name=%s;',
|
||||
(pool_name,))
|
||||
pool_row = cur.fetchone()
|
||||
if pool_row:
|
||||
LOG.info("existing pxeboot pool found, skip adding pxeboot "
|
||||
"network. pool id = (%s)" % pool_row['id'])
|
||||
return
|
||||
|
||||
cur.execute('insert into address_pools(uuid,name,family,network,'
|
||||
'prefix,"order") VALUES(%s, %s, %s, %s, %s, %s)',
|
||||
(str(uuid.uuid4()), pool_name, pxeboot_subnet.version,
|
||||
str(pxeboot_subnet.network), pxeboot_subnet.prefixlen,
|
||||
'random'))
|
||||
cur.execute("select id from address_pools WHERE name=%s;",
|
||||
(pool_name,))
|
||||
pool_row = cur.fetchone()
|
||||
if pool_row is None:
|
||||
LOG.exception("Failed to fetch pool id for %s", pool_name)
|
||||
raise
|
||||
|
||||
pool_id = pool_row['id']
|
||||
cur.execute('insert into address_pool_ranges(address_pool_id,uuid,'
|
||||
'start,"end") VALUES(%s, %s, %s, %s)',
|
||||
(pool_id, str(uuid.uuid4()),
|
||||
str(pxeboot_subnet[1]),
|
||||
str(pxeboot_subnet[-2])))
|
||||
cur.execute("insert into networks(id, address_pool_id, uuid,"
|
||||
"type, mtu, dynamic) values(%s, %s, %s, %s, %s, False)",
|
||||
(pool_id, pool_id, str(uuid.uuid4()),
|
||||
constants.NETWORK_TYPE_PXEBOOT, mtu))
|
||||
addresses = {
|
||||
constants.CONTROLLER_HOSTNAME:
|
||||
str(pxeboot_subnet[2]),
|
||||
constants.CONTROLLER_0_HOSTNAME:
|
||||
str(pxeboot_subnet[3]),
|
||||
constants.CONTROLLER_1_HOSTNAME:
|
||||
str(pxeboot_subnet[4]),
|
||||
}
|
||||
for name, address in addresses.iteritems():
|
||||
address_name = "%s-%s" % (name, constants.NETWORK_TYPE_PXEBOOT)
|
||||
cur.execute("insert into addresses(uuid, address_pool_id, address,"
|
||||
"prefix, name, family, enable_dad) values(%s, %s, %s,"
|
||||
"%s, %s, %s, False)",
|
||||
(str(uuid.uuid4()), pool_id, str(address),
|
||||
pxeboot_subnet.prefixlen, address_name,
|
||||
pxeboot_subnet.version))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user