Enable duplex platform upgrades
Enable the mechanism to upgrade the platform components on a running StarlingX system with duplex controllers. This includes upgrade updates for: o generation of kubernetes join_cmd to enable the N+1 controller to join the cluster o migrate of kubernetes config o migrate etcd on host-swact o migrate of DistributedCloud dcmanager and dcorch databases A maintenance release for stx3.x is required to upgrade to stx4.0 Tests Performed with duplex controller: AIO-DX and Standard - system load-import - system health-query-upgrade - system upgrade-start - system host-upgrade controller-0 - system host-lock/unlock host N while controller N, N+1 - system host-lock/unlock controller-0 while controller N+1 - system host-upgrade controller-1 - system host-upgrade storage - system host-upgrade worker - system upgrade-activate - system upgrade-abort - system host-downgrade - system upgrade-complete - verified application (e.g. stx-monitor) over upgrade Change-Id: I4267c7b32b2e7b59b5ffdd8146288698962da1e0 Story: 2007403 Task: 39243 Task: 39244 Task: 39245 Signed-off-by: John Kung <john.kung@windriver.com>
This commit is contained in:
parent
462c768080
commit
4e0b2acdfe
@ -58,6 +58,7 @@ install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_bindir}
|
||||
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
||||
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_goenabledd}
|
||||
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
||||
|
@ -23,7 +23,6 @@ import tempfile
|
||||
import time
|
||||
import yaml
|
||||
|
||||
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
|
||||
@ -104,7 +103,8 @@ def get_db_credentials(shared_services, from_release):
|
||||
|
||||
|
||||
def get_shared_services():
|
||||
""" Get the list of shared services from the sysinv database """
|
||||
""" Get the list of shared services from the sysinv database"""
|
||||
|
||||
shared_services = []
|
||||
DEFAULT_SHARED_SERVICES = []
|
||||
|
||||
@ -115,6 +115,7 @@ def get_shared_services():
|
||||
if row is None:
|
||||
LOG.error("Failed to fetch i_system data")
|
||||
raise psycopg2.ProgrammingError("Failed to fetch i_system data")
|
||||
|
||||
cap_obj = json.loads(row[0])
|
||||
region_config = cap_obj.get('region_config', None)
|
||||
if region_config:
|
||||
@ -264,6 +265,50 @@ def migrate_pxeboot_config(from_release, to_release):
|
||||
raise
|
||||
|
||||
|
||||
def migrate_armada_config(from_release, to_release):
|
||||
""" Migrates armada configuration. """
|
||||
|
||||
LOG.info("Migrating armada config")
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
# Copy the entire armada.cfg directory to pick up any changes made
|
||||
# after the data was migrated (i.e. updates to the controller-1 load).
|
||||
source_armada = os.path.join(PLATFORM_PATH, "armada", from_release)
|
||||
dest_armada = os.path.join(PLATFORM_PATH, "armada", to_release)
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["cp",
|
||||
"-a",
|
||||
os.path.join(source_armada),
|
||||
os.path.join(dest_armada)],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to migrate %s" % source_armada)
|
||||
raise
|
||||
|
||||
|
||||
def migrate_helm_config(from_release, to_release):
|
||||
""" Migrates helm configuration. """
|
||||
|
||||
LOG.info("Migrating helm config")
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
# Copy the entire helm.cfg directory to pick up any changes made
|
||||
# after the data was migrated (i.e. updates to the controller-1 load).
|
||||
source_helm = os.path.join(PLATFORM_PATH, "helm", from_release)
|
||||
dest_helm = os.path.join(PLATFORM_PATH, "helm", to_release)
|
||||
try:
|
||||
subprocess.check_call(
|
||||
["cp",
|
||||
"-a",
|
||||
os.path.join(source_helm),
|
||||
os.path.join(dest_helm)],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to migrate %s" % source_helm)
|
||||
raise
|
||||
|
||||
|
||||
def migrate_sysinv_data(from_release, to_release):
|
||||
""" Migrates sysinv data. """
|
||||
devnull = open(os.devnull, 'w')
|
||||
@ -665,6 +710,14 @@ def upgrade_controller(from_release, to_release):
|
||||
print("Migrating pxeboot configuration...")
|
||||
migrate_pxeboot_config(from_release, to_release)
|
||||
|
||||
# Migrate armada config
|
||||
print("Migrating armada configuration...")
|
||||
migrate_armada_config(from_release, to_release)
|
||||
|
||||
# Migrate helm config
|
||||
print("Migrating helm configuration...")
|
||||
migrate_helm_config(from_release, to_release)
|
||||
|
||||
# Migrate sysinv data.
|
||||
print("Migrating sysinv configuration...")
|
||||
migrate_sysinv_data(from_release, to_release)
|
||||
@ -766,6 +819,18 @@ def upgrade_controller(from_release, to_release):
|
||||
LOG.info("Failed to update hiera configuration")
|
||||
raise
|
||||
|
||||
# Prepare for swact
|
||||
LOG.info("Prepare for swact to controller-1")
|
||||
try:
|
||||
subprocess.check_call(['/usr/bin/upgrade_swact_migration.py',
|
||||
'prepare_swact',
|
||||
from_release,
|
||||
to_release],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed upgrade_swact_migration prepare_swact")
|
||||
raise
|
||||
|
||||
print("Shutting down upgrade processes...")
|
||||
|
||||
# Stop postgres service
|
||||
|
@ -24,7 +24,7 @@ from oslo_log import log
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def get_upgrade_databases(shared_services):
|
||||
def get_upgrade_databases(system_role, shared_services):
|
||||
|
||||
UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv',
|
||||
'barbican')
|
||||
@ -33,6 +33,13 @@ def get_upgrade_databases(shared_services):
|
||||
'sysinv': ('i_alarm',),
|
||||
'barbican': ()}
|
||||
|
||||
if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
|
||||
UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
|
||||
UPGRADE_DATABASE_SKIP_TABLES.update({
|
||||
'dcmanager': ('subcloud_alarms',),
|
||||
'dcorch': ()
|
||||
})
|
||||
|
||||
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
|
||||
UPGRADE_DATABASES += ('keystone',)
|
||||
UPGRADE_DATABASE_SKIP_TABLES.update({'keystone': ('token',)})
|
||||
@ -40,12 +47,12 @@ def get_upgrade_databases(shared_services):
|
||||
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES
|
||||
|
||||
|
||||
def export_postgres(dest_dir, shared_services):
|
||||
def export_postgres(dest_dir, system_role, shared_services):
|
||||
""" Export postgres databases """
|
||||
devnull = open(os.devnull, 'w')
|
||||
try:
|
||||
upgrade_databases, upgrade_database_skip_tables = \
|
||||
get_upgrade_databases(shared_services)
|
||||
get_upgrade_databases(system_role, shared_services)
|
||||
# Dump roles, table spaces and schemas for databases.
|
||||
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
|
||||
'--schema-only > %s/%s' %
|
||||
@ -101,7 +108,7 @@ def prepare_upgrade(from_load, to_load, i_system):
|
||||
|
||||
# Export databases
|
||||
shared_services = i_system.capabilities.get("shared_services", "")
|
||||
export_postgres(dest_dir, shared_services)
|
||||
export_postgres(dest_dir, i_system.distributed_cloud_role, shared_services)
|
||||
export_vim(dest_dir)
|
||||
|
||||
# Export filesystems so controller-1 can access them
|
||||
@ -234,6 +241,9 @@ def abort_upgrade(from_load, to_load, upgrade):
|
||||
# Remove upgrade directories
|
||||
upgrade_dirs = [
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "armada", to_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "helm", to_load),
|
||||
os.path.join(tsc.ETCD_PATH, to_load),
|
||||
os.path.join(utils.POSTGRES_PATH, "upgrade"),
|
||||
os.path.join(utils.POSTGRES_PATH, to_load),
|
||||
os.path.join(utils.RABBIT_PATH, to_load),
|
||||
|
@ -0,0 +1,95 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will perform upgrade preparation and migration operations for
|
||||
# host-swact to controller-1.
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ETCD_PATH = "/opt/etcd"
|
||||
UPGRADE_CONTROLLER_1_FILE = "/etc/platform/.upgrade_swact_controller_1"
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
action = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
to_release = sys.argv[arg]
|
||||
else:
|
||||
print("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
if action == "migrate_etcd":
|
||||
try:
|
||||
migrate_etcd_on_swact()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
return 1
|
||||
elif action == "prepare_swact":
|
||||
upgrade_prepare_swact(from_release, to_release)
|
||||
return 0
|
||||
|
||||
|
||||
def upgrade_prepare_swact(from_release, to_release):
|
||||
migrate_data = {
|
||||
'from_release': from_release,
|
||||
'to_release': to_release
|
||||
}
|
||||
with open(UPGRADE_CONTROLLER_1_FILE, 'w') as f:
|
||||
yaml.dump(migrate_data, f, default_flow_style=False)
|
||||
|
||||
|
||||
def migrate_etcd_on_swact():
|
||||
with open(UPGRADE_CONTROLLER_1_FILE, 'r') as f:
|
||||
document = yaml.safe_load(f)
|
||||
|
||||
from_release = document.get('from_release')
|
||||
to_release = document.get('to_release')
|
||||
|
||||
dest_etcd = os.path.join(ETCD_PATH, to_release)
|
||||
|
||||
if os.path.exists(dest_etcd):
|
||||
# The dest_etcd must not have already been created,
|
||||
# however this can occur on a forced host-swact
|
||||
LOG.info("skipping etcd migration %s already exists" %
|
||||
dest_etcd)
|
||||
return
|
||||
|
||||
if not os.path.isfile(UPGRADE_CONTROLLER_1_FILE):
|
||||
LOG.info("skipping etcd migration, no request %s" %
|
||||
UPGRADE_CONTROLLER_1_FILE)
|
||||
return
|
||||
|
||||
source_etcd = os.path.join(ETCD_PATH, from_release)
|
||||
try:
|
||||
shutil.copytree(os.path.join(source_etcd),
|
||||
os.path.join(dest_etcd))
|
||||
os.remove(UPGRADE_CONTROLLER_1_FILE)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to migrate %s" % source_etcd)
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -56,6 +56,7 @@ Configuration for the Controller node.
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_bindir}
|
||||
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
||||
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py
|
||||
|
||||
install -d -m 755 %{buildroot}%{local_goenabledd}
|
||||
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
||||
|
@ -1312,9 +1312,9 @@ class ConductorManager(service.PeriodicService):
|
||||
:param context: request context
|
||||
:param host: host object
|
||||
"""
|
||||
# Only update the config if the host is running the same version as
|
||||
# the active controller.
|
||||
if self.host_load_matches_sw_version(host):
|
||||
# update the config if the host is running the same version as
|
||||
# the active controller.
|
||||
if (host.administrative == constants.ADMIN_UNLOCKED or
|
||||
host.action == constants.FORCE_UNLOCK_ACTION or
|
||||
host.action == constants.UNLOCK_ACTION):
|
||||
@ -1322,8 +1322,20 @@ class ConductorManager(service.PeriodicService):
|
||||
# Update host configuration
|
||||
self._puppet.update_host_config(host)
|
||||
else:
|
||||
LOG.info("Host %s is not running active load. "
|
||||
"Skipping manifest generation" % host.hostname)
|
||||
# from active controller, update hieradata for upgrade
|
||||
host_uuids = [host.uuid]
|
||||
config_uuid = self._config_update_hosts(
|
||||
context,
|
||||
[constants.CONTROLLER],
|
||||
host_uuids,
|
||||
reboot=True)
|
||||
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
|
||||
target_load = self.dbapi.load_get(host_upgrade.target_load)
|
||||
self._puppet.update_host_config_upgrade(
|
||||
host,
|
||||
target_load.software_version,
|
||||
config_uuid
|
||||
)
|
||||
|
||||
self._allocate_addresses_for_host(context, host)
|
||||
# Set up the PXE config file for this host so it can run the installer
|
||||
@ -1580,7 +1592,6 @@ class ConductorManager(service.PeriodicService):
|
||||
if (host.administrative == constants.ADMIN_UNLOCKED or
|
||||
host.action == constants.FORCE_UNLOCK_ACTION or
|
||||
host.action == constants.UNLOCK_ACTION):
|
||||
|
||||
# Generate host configuration files
|
||||
self._puppet.update_host_config(host)
|
||||
else:
|
||||
@ -8792,7 +8803,8 @@ class ConductorManager(service.PeriodicService):
|
||||
|
||||
for upgrade_element in upgrade_paths:
|
||||
valid_from_version = upgrade_element.findtext('version')
|
||||
if valid_from_version == current_version:
|
||||
valid_from_versions = valid_from_version.split(",")
|
||||
if current_version in valid_from_versions:
|
||||
path_found = True
|
||||
upgrade_path = upgrade_element
|
||||
break
|
||||
@ -9403,7 +9415,7 @@ class ConductorManager(service.PeriodicService):
|
||||
"""
|
||||
Checks if the host is running the same load as the active controller
|
||||
:param host: a host object
|
||||
:return: true if host target load matches active sw_version
|
||||
:return: True if host target load matches active sw_version
|
||||
"""
|
||||
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
|
||||
target_load = self.dbapi.load_get(host_upgrade.target_load)
|
||||
|
@ -66,6 +66,9 @@ class BasePuppet(object):
|
||||
def get_host_config(self, host):
|
||||
return {}
|
||||
|
||||
def get_host_config_upgrade(self, host):
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def quoted_str(value):
|
||||
return quoted_str(value)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -73,11 +73,53 @@ class KubernetesPuppet(base.BasePuppet):
|
||||
|
||||
return config
|
||||
|
||||
def get_host_config_upgrade(self, host):
|
||||
"""Updates the config for upgrade with updated kubernetes params
|
||||
|
||||
:param host: host object
|
||||
"""
|
||||
config = {}
|
||||
|
||||
# Generate the join command for this host
|
||||
config.update(self._get_host_join_command(host))
|
||||
|
||||
# Get the kubernetes version
|
||||
config.update(self._get_active_kubernetes_version())
|
||||
|
||||
LOG.info("get_host_config_upgrade kubernetes config=%s" % config)
|
||||
|
||||
return config
|
||||
|
||||
@staticmethod
|
||||
def _get_active_kubernetes_version():
|
||||
"""Get the active kubernetes version
|
||||
"""
|
||||
# During a platform upgrade, the version is still None
|
||||
# when N+1 controller-1 is creating hieradata.
|
||||
# The version is updated from the running kubernetes version.
|
||||
config = {}
|
||||
|
||||
kube_operator = kubernetes.KubeOperator()
|
||||
kube_version = kube_operator.kube_get_kubernetes_version()
|
||||
|
||||
config.update({
|
||||
'platform::kubernetes::params::version': kube_version,
|
||||
})
|
||||
|
||||
return config
|
||||
|
||||
def _get_host_join_command(self, host):
|
||||
config = {}
|
||||
if not utils.is_initial_config_complete():
|
||||
return config
|
||||
|
||||
join_cmd = self._get_kubernetes_join_cmd(host)
|
||||
config.update({'platform::kubernetes::params::join_cmd': join_cmd})
|
||||
|
||||
return config
|
||||
|
||||
@staticmethod
|
||||
def _get_kubernetes_join_cmd(host):
|
||||
# The token expires after 24 hours and is needed for a reinstall.
|
||||
# The puppet manifest handles the case where the node already exists.
|
||||
try:
|
||||
@ -85,33 +127,41 @@ class KubernetesPuppet(base.BasePuppet):
|
||||
if host.personality == constants.CONTROLLER:
|
||||
# Upload the certificates used during kubeadm join
|
||||
# The cert key will be printed in the last line of the output
|
||||
|
||||
# We will create a temp file with the kubeadm config
|
||||
# We need this because the kubeadm config could have changed
|
||||
# since bootstrap. Reading the kubeadm config each time
|
||||
# it is needed ensures we are not using stale data
|
||||
fd, temp_kubeadm_config_view = tempfile.mkstemp(dir='/tmp', suffix='.yaml')
|
||||
|
||||
fd, temp_kubeadm_config_view = tempfile.mkstemp(
|
||||
dir='/tmp', suffix='.yaml')
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
cmd = ['kubeadm', 'config', 'view']
|
||||
subprocess.check_call(cmd, stdout=f)
|
||||
cmd = ['kubeadm', 'init', 'phase', 'upload-certs', '--upload-certs', '--config',
|
||||
|
||||
cmd = ['kubeadm', 'init', 'phase', 'upload-certs',
|
||||
'--upload-certs', '--config',
|
||||
temp_kubeadm_config_view]
|
||||
|
||||
cmd_output = subprocess.check_output(cmd)
|
||||
cert_key = cmd_output.strip().split('\n')[-1]
|
||||
join_cmd_additions = " --control-plane --certificate-key %s" % cert_key
|
||||
join_cmd_additions = \
|
||||
" --control-plane --certificate-key %s" % cert_key
|
||||
os.unlink(temp_kubeadm_config_view)
|
||||
|
||||
cmd = ['kubeadm', 'token', 'create', '--print-join-command',
|
||||
'--description', 'Bootstrap token for %s' % host.hostname]
|
||||
join_cmd = subprocess.check_output(cmd)
|
||||
join_cmd_additions += " --cri-socket /var/run/containerd/containerd.sock"
|
||||
join_cmd_additions += \
|
||||
" --cri-socket /var/run/containerd/containerd.sock"
|
||||
join_cmd = join_cmd.strip() + join_cmd_additions
|
||||
LOG.info('get_kubernetes_join_cmd join_cmd=%s' % join_cmd)
|
||||
except Exception:
|
||||
LOG.exception("Exception generating bootstrap token")
|
||||
raise exception.SysinvException('Failed to generate bootstrap token')
|
||||
raise exception.SysinvException(
|
||||
'Failed to generate bootstrap token')
|
||||
|
||||
config.update({'platform::kubernetes::params::join_cmd': join_cmd})
|
||||
|
||||
return config
|
||||
return join_cmd
|
||||
|
||||
def _get_etcd_endpoint(self):
|
||||
addr = self._format_url_address(self._get_cluster_host_address())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -68,6 +68,11 @@ class PlatformPuppet(base.BasePuppet):
|
||||
config.update(self._get_host_lldp_config(host))
|
||||
return config
|
||||
|
||||
def get_host_config_upgrade(self, host):
|
||||
config = {}
|
||||
config.update(self._get_host_platform_config_upgrade(host, self.config_uuid))
|
||||
return config
|
||||
|
||||
def _get_static_software_config(self):
|
||||
return {
|
||||
'platform::params::software_version': self.quoted_str(tsconfig.SW_VERSION),
|
||||
@ -328,6 +333,17 @@ class PlatformPuppet(base.BasePuppet):
|
||||
|
||||
return config
|
||||
|
||||
def _get_host_platform_config_upgrade(self, host, config_uuid):
|
||||
config = {}
|
||||
if not config_uuid:
|
||||
config_uuid = host.config_target
|
||||
|
||||
if config_uuid:
|
||||
config.update({
|
||||
'platform::config::params::config_uuid': config_uuid
|
||||
})
|
||||
return config
|
||||
|
||||
def _get_host_ntp_config(self, host):
|
||||
ntp = self.dbapi.intp_get_one()
|
||||
if host.personality == constants.CONTROLLER:
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017-2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -14,6 +14,7 @@ import tempfile
|
||||
import yaml
|
||||
|
||||
from stevedore import extension
|
||||
from tsconfig import tsconfig
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sysinv.puppet import common
|
||||
@ -148,6 +149,36 @@ class PuppetOperator(object):
|
||||
|
||||
self._write_host_config(host, config)
|
||||
|
||||
@puppet_context
|
||||
def update_host_config_upgrade(self, host, target_load, config_uuid):
|
||||
"""Update the host hiera configuration files for the supplied host
|
||||
and upgrade target load
|
||||
"""
|
||||
self.config_uuid = config_uuid
|
||||
self.context['config_upgrade'] = config = {}
|
||||
for puppet_plugin in self.puppet_plugins:
|
||||
config.update(puppet_plugin.obj.get_host_config_upgrade(host))
|
||||
|
||||
self._merge_host_config(host, target_load, config)
|
||||
LOG.info("Updating hiera for host: %s with config_uuid: %s "
|
||||
"target_load: %s config: %s" %
|
||||
(host.hostname, config_uuid, target_load, config))
|
||||
|
||||
def _merge_host_config(self, host, target_load, config):
|
||||
filename = host.mgmt_ip + '.yaml'
|
||||
path = os.path.join(
|
||||
tsconfig.PLATFORM_PATH,
|
||||
'puppet',
|
||||
target_load,
|
||||
'hieradata')
|
||||
|
||||
with open(os.path.join(path, filename), 'r') as yaml_file:
|
||||
host_config = yaml.load(yaml_file)
|
||||
|
||||
host_config.update(config)
|
||||
|
||||
self._write_host_config(host, host_config, path)
|
||||
|
||||
def remove_host_config(self, host):
|
||||
"""Remove the configuration for the supplied host"""
|
||||
try:
|
||||
@ -156,15 +187,17 @@ class PuppetOperator(object):
|
||||
except Exception:
|
||||
LOG.exception("failed to remove host config: %s" % host.uuid)
|
||||
|
||||
def _write_host_config(self, host, config):
|
||||
def _write_host_config(self, host, config, path=None):
|
||||
"""Update the configuration for a specific host"""
|
||||
filename = "%s.yaml" % host.mgmt_ip
|
||||
self._write_config(filename, config)
|
||||
self._write_config(filename, config, path)
|
||||
|
||||
def _write_config(self, filename, config):
|
||||
filepath = os.path.join(self.path, filename)
|
||||
def _write_config(self, filename, config, path=None):
|
||||
if path is None:
|
||||
path = self.path
|
||||
filepath = os.path.join(path, filename)
|
||||
try:
|
||||
fd, tmppath = tempfile.mkstemp(dir=self.path, prefix=filename,
|
||||
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
|
||||
text=True)
|
||||
with open(tmppath, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
|
@ -103,6 +103,14 @@ class ManagerTestCase(base.DbTestCase):
|
||||
self.service.fm_api.set_fault.side_effect = self._raise_alarm
|
||||
self.service.fm_api.clear_fault.side_effect = self._clear_alarm
|
||||
|
||||
# Mock sw_version check since tox tsc.SW_VERSION is "TEST.SW_VERSION"
|
||||
self.host_load_matches_sw_version_patcher = mock.patch.object(
|
||||
manager.ConductorManager, 'host_load_matches_sw_version')
|
||||
self.mock_host_load_matches_sw_version = \
|
||||
self.host_load_matches_sw_version_patcher.start()
|
||||
self.mock_host_load_matches_sw_version.return_value = True
|
||||
self.addCleanup(self.host_load_matches_sw_version_patcher.stop)
|
||||
|
||||
self.fail_config_apply_runtime_manifest = False
|
||||
|
||||
def mock_config_apply_runtime_manifest(obj, context, config_uuid,
|
||||
@ -1196,26 +1204,57 @@ class ManagerTestCase(base.DbTestCase):
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADING_NETWORKING_FAILED)
|
||||
|
||||
def test_configure_out_of_date(self):
|
||||
def _create_test_controller_config_out_of_date(self, hostname):
|
||||
config_applied = self.service._config_set_reboot_required(uuid.uuid4())
|
||||
config_target = self.service._config_set_reboot_required(uuid.uuid4())
|
||||
ihost = self._create_test_ihost(config_applied=config_applied,
|
||||
config_target=config_target)
|
||||
os.path.isfile = mock.Mock(return_value=True)
|
||||
cutils.is_aio_system = mock.Mock(return_value=True)
|
||||
ihost = self._create_test_ihost(
|
||||
config_applied=config_applied,
|
||||
config_target=config_target)
|
||||
ihost['mgmt_mac'] = '00:11:22:33:44:55'
|
||||
ihost['mgmt_ip'] = '1.2.3.42'
|
||||
ihost['hostname'] = 'controller-0'
|
||||
ihost['hostname'] = hostname
|
||||
ihost['invprovision'] = 'provisioned'
|
||||
ihost['personality'] = 'controller'
|
||||
ihost['administrative'] = 'unlocked'
|
||||
ihost['operational'] = 'available'
|
||||
ihost['operational'] = 'disabled'
|
||||
ihost['availability'] = 'online'
|
||||
ihost['serialid'] = '1234567890abc'
|
||||
ihost['boot_device'] = 'sda'
|
||||
ihost['rootfs_device'] = 'sda'
|
||||
ihost['install_output'] = 'text'
|
||||
ihost['console'] = 'ttyS0,115200'
|
||||
|
||||
return ihost
|
||||
|
||||
def test_configure_out_of_date(self):
|
||||
os.path.isfile = mock.Mock(return_value=True)
|
||||
cutils.is_aio_system = mock.Mock(return_value=True)
|
||||
ihost = self._create_test_controller_config_out_of_date('controller-0')
|
||||
self.service.configure_ihost(self.context, ihost)
|
||||
res = self.dbapi.ihost_get(ihost['uuid'])
|
||||
imsg_dict = {'config_applied': res['config_target']}
|
||||
self.service.iconfig_update_by_ihost(self.context, ihost['uuid'], imsg_dict)
|
||||
self.assertEqual(self.alarm_raised, False)
|
||||
|
||||
personalities = [constants.CONTROLLER]
|
||||
self.service._config_update_hosts(self.context, personalities, reboot=True)
|
||||
res = self.dbapi.ihost_get(ihost['uuid'])
|
||||
|
||||
personalities = [constants.CONTROLLER]
|
||||
self.service._config_update_hosts(self.context, personalities, reboot=False)
|
||||
res = self.dbapi.ihost_get(ihost['uuid'])
|
||||
config_uuid = self.service._config_clear_reboot_required(res['config_target'])
|
||||
imsg_dict = {'config_applied': config_uuid}
|
||||
self.service.iconfig_update_by_ihost(self.context, ihost['uuid'], imsg_dict)
|
||||
self.assertEqual(self.alarm_raised, True)
|
||||
|
||||
def test_configure_out_of_date_upgrade(self):
|
||||
os.path.isfile = mock.Mock(return_value=True)
|
||||
cutils.is_aio_system = mock.Mock(return_value=True)
|
||||
|
||||
# Check upgrade where the target sw_version does not match
|
||||
self.mock_host_load_matches_sw_version.return_value = False
|
||||
ihost = self._create_test_controller_config_out_of_date('controller-1')
|
||||
self.service.configure_ihost(self.context, ihost)
|
||||
res = self.dbapi.ihost_get(ihost['uuid'])
|
||||
imsg_dict = {'config_applied': res['config_target']}
|
||||
|
@ -1,2 +1,2 @@
|
||||
SRC_DIR="tsconfig"
|
||||
TIS_PATCH_VER=11
|
||||
TIS_PATCH_VER=12
|
||||
|
@ -171,6 +171,7 @@ ARMADA_PATH = PLATFORM_PATH + "/armada/" + SW_VERSION
|
||||
HELM_OVERRIDES_PATH = PLATFORM_PATH + "/helm/" + SW_VERSION
|
||||
KEYRING_PATH = PLATFORM_PATH + "/.keyring/" + SW_VERSION
|
||||
DEPLOY_PATH = PLATFORM_PATH + "/deploy/" + SW_VERSION
|
||||
ETCD_PATH = "/opt/etcd"
|
||||
EXTENSION_PATH = "/opt/extension"
|
||||
PLATFORM_CEPH_CONF_PATH = CONFIG_PATH + 'ceph-config'
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user