Remove unused upgrade code

Remove upgrade code specific to StX4 -> StX5 upgrades.

Story: 2008966
Task: 42724
Change-Id: Id0c31f3f2017f9acf3253740b83be9bb2a243e00
Signed-off-by: David Sullivan <david.sullivan@windriver.com>
This commit is contained in:
David Sullivan 2021-06-20 19:14:56 -05:00
parent 15b62819eb
commit d10c69b4dc
20 changed files with 7 additions and 1943 deletions

View File

@ -31,7 +31,6 @@ from sysinv.puppet import common as puppet_common
# have been applied, so only the static entries from tsconfig can be used # have been applied, so only the static entries from tsconfig can be used
# (the platform.conf file will not have been updated with dynamic values). # (the platform.conf file will not have been updated with dynamic values).
from tsconfig.tsconfig import SW_VERSION from tsconfig.tsconfig import SW_VERSION
from tsconfig.tsconfig import SW_VERSION_20_06
from tsconfig.tsconfig import PLATFORM_PATH from tsconfig.tsconfig import PLATFORM_PATH
from tsconfig.tsconfig import KEYRING_PATH from tsconfig.tsconfig import KEYRING_PATH
from tsconfig.tsconfig import PLATFORM_CONF_FILE from tsconfig.tsconfig import PLATFORM_CONF_FILE
@ -456,10 +455,8 @@ def import_databases(from_release, to_release, from_path=None, simplex=False):
LOG.info("Importing databases") LOG.info("Importing databases")
try: try:
# Backups and upgrade use different names during pg_dump postgres_config_path = os.path.join(
# This code is only needed for 20.06 and can be removed in the StX5 from_dir, 'postgres.postgreSql.config')
postgres_config_path = \
glob.glob(from_dir + '/postgres.*[Ss]ql.config')[0]
# Do postgres schema import (suppress stderr due to noise) # Do postgres schema import (suppress stderr due to noise)
subprocess.check_call(['sudo -u postgres psql -f ' + subprocess.check_call(['sudo -u postgres psql -f ' +
postgres_config_path + ' postgres'], postgres_config_path + ' postgres'],
@ -473,7 +470,7 @@ def import_databases(from_release, to_release, from_path=None, simplex=False):
import_commands = [] import_commands = []
# Do postgres data import # Do postgres data import
for data in glob.glob(from_dir + '/*.*[Ss]ql.data'): for data in glob.glob(from_dir + '/*.*Sql.data'):
db_elem = data.split('/')[-1].split('.')[0] db_elem = data.split('/')[-1].split('.')[0]
import_commands.append((db_elem, import_commands.append((db_elem,
"sudo -u postgres psql -f " + data + "sudo -u postgres psql -f " + data +
@ -755,72 +752,6 @@ def migrate_hiera_data(from_release, to_release, role=None):
'platform::client::credentials::params::keyring_file': 'platform::client::credentials::params::keyring_file':
os.path.join(KEYRING_PATH, '.CREDENTIAL'), os.path.join(KEYRING_PATH, '.CREDENTIAL'),
}) })
# Add dcmanager and sysinv user id as well as service project id to
# the static.yaml on subclouds
if (to_release == SW_VERSION_20_06 and
role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD):
dm_user_id = utils.get_keystone_user_id('dcmanager')
sysinv_user_id = utils.get_keystone_user_id('sysinv')
service_project_id = utils.get_keystone_project_id('services')
if dm_user_id:
static_config.update({
'platform::dcmanager::bootstrap::dc_dcmanager_user_id':
dm_user_id
})
if sysinv_user_id:
static_config.update({
'platform::sysinv::bootstrap::dc_sysinv_user_id':
sysinv_user_id
})
if service_project_id:
static_config.update({
'openstack::keystone::bootstrap::dc_services_project_id':
service_project_id
})
# Just for upgrade from STX4.0 to STX5.0
if (from_release == SW_VERSION_20_06 and etcd_security_config):
static_config.update(etcd_security_config)
if from_release == SW_VERSION_20_06:
# The helm db is new in the release stx5.0 and requires
# a password to be generated and a new user to access the DB.
# This is required for all types of system upgrade. Should
# removed in the release that follows stx5.0
static_config.update({
'platform::helm::v2::db::postgresql::user': 'admin-helmv2'
})
helmv2_db_pw = utils.get_password_from_keyring('helmv2', 'database')
if not helmv2_db_pw:
helmv2_db_pw = utils.set_password_in_keyring('helmv2', 'database')
secure_static_file = os.path.join(
constants.HIERADATA_PERMDIR, "secure_static.yaml")
with open(secure_static_file, 'r') as yaml_file:
secure_static_config = yaml.load(yaml_file)
secure_static_config.update({
'platform::helm::v2::db::postgresql::password': helmv2_db_pw
})
# update below static secure config
# sysinv::certmon::local_keystone_password
# sysinv::certmon::dc_keystone_password
sysinv_pass = utils.get_password_from_keyring('sysinv', 'services')
secure_static_config.update({
'sysinv::certmon::local_keystone_password': sysinv_pass
})
dc_pass = ''
if role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
dc_pass = utils.get_password_from_keyring('dcmanager', 'services')
secure_static_config.update({
'sysinv::certmon::dc_keystone_password': dc_pass
})
with open(secure_static_file, 'w') as yaml_file:
yaml.dump(secure_static_config, yaml_file,
default_flow_style=False)
with open(static_file, 'w') as yaml_file: with open(static_file, 'w') as yaml_file:
yaml.dump(static_config, yaml_file, default_flow_style=False) yaml.dump(static_config, yaml_file, default_flow_style=False)

View File

@ -1,463 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the partition schema for partitions on the root disk
# of AIO controllers. This is required as the default LVM partiton grew in the
# N+1 release.
import psycopg2
import sys
import six
import subprocess
from oslo_utils import uuidutils
from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == "20.06" and action == "migrate":
try:
adjust_user_partitions()
except Exception as ex:
LOG.exception(ex)
return 1
def _command(arguments1, arguments2=None):
"""Execute a command and capture stdout, stderr & return code."""
LOG.debug("Executing command: '%s'" % " ".join(arguments1))
process = subprocess.Popen(
arguments1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if arguments2:
process2 = subprocess.Popen(
arguments2,
stdin=process.stdout,
stdout=subprocess.PIPE,
shell=False)
process.stdout.close()
process = process2
out, err = process.communicate()
return out, err, process.returncode
def build_partition_device_node(disk_device_node, partition_number):
if constants.DEVICE_NAME_NVME in disk_device_node:
partition_device_node = '{}p{}'.format(
disk_device_node, partition_number)
else:
partition_device_node = '{}{}'.format(
disk_device_node, partition_number)
LOG.debug("partition_device_node: %s" % partition_device_node)
return partition_device_node
def build_partition_device_path(disk_device_path, partition_number):
partition_device_path = '{}-part{}'.format(
disk_device_path, partition_number)
LOG.debug("partition_device_path: %s" % partition_device_path)
return partition_device_path
def get_sgdisk_info(device_path):
"""Obtain partition info: type GUID, type name, UUID, start, end, size.
:param: device_path: the disk's device path
:returns: list of partition info
"""
sgdisk_part_info = []
fields = ['part_number', 'device_node', 'type_guid', 'type_name', 'uuid',
'start_mib', 'end_mib', 'size_mib']
sgdisk_command = '{} {}'.format('/usr/bin/partition_info.sh',
device_path)
try:
sgdisk_process = subprocess.Popen(sgdisk_command,
stdout=subprocess.PIPE,
shell=True)
except Exception as e:
LOG.exception("Could not retrieve partition information: %s" % e)
raise
sgdisk_output = sgdisk_process.stdout.read()
rows = [row for row in sgdisk_output.split(';') if row.strip()]
for row in rows:
values = row.split()
partition = dict(zip(fields, values))
if 'part_number' in partition.keys():
partition['part_number'] = int(partition['part_number'])
sgdisk_part_info.append(partition)
return sgdisk_part_info
def get_partitions(device_path, device_node):
"""Obtain existing partitions from a disk."""
partitions = []
sgdisk_part_info = get_sgdisk_info(device_path)
for partition in sgdisk_part_info:
partition_number = partition.get('part_number')
type_name = partition.get('type_name')
part_size_mib = partition.get('size_mib')
part_device_node = build_partition_device_node(
device_node, partition_number)
part_device_path = build_partition_device_path(
device_path, partition_number)
start_mib = partition.get('start_mib')
end_mib = partition.get('end_mib')
part_attrs = {
'partition_number': partition_number,
'device_path': part_device_path,
'device_node': part_device_node,
'type_name': type_name,
'start_mib': start_mib,
'end_mib': end_mib,
'size_mib': part_size_mib,
}
partitions.append(part_attrs)
return partitions
def is_aio_system_type():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * from i_system")
system = cur.fetchone()
return system['system_type'] == 'All-in-one'
def cgts_vg_extend(cur, disk, pv_cgts_vg, partition_number,
part_size_mib, part_start_mib, part_end_mib):
part_device_node = build_partition_device_node(
disk.get('device_node'), partition_number)
part_device_path = build_partition_device_path(
disk.get('device_path'), partition_number)
LOG.info("Extra cgts-vg partition size: %s start: %s "
"end: %s device node: %s device path: %s" %
(part_size_mib, part_start_mib, part_end_mib,
part_device_node, part_device_path))
# Create a new partition
part_uuid = uuidutils.generate_uuid()
cur.execute(
"insert into partition(uuid, idisk_id, idisk_uuid, size_mib,"
"device_node, device_path, status, type_guid, type_name, "
"forihostid, foripvid, start_mib, end_mib) "
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(part_uuid, disk.get('id'), disk.get('uuid'), part_size_mib,
part_device_node, part_device_path,
constants.PARTITION_READY_STATUS,
constants.USER_PARTITION_PHYSICAL_VOLUME,
constants.PARTITION_NAME_PV, disk.get('forihostid'),
None, part_start_mib, part_end_mib))
# Create a new pv
pv_uuid = uuidutils.generate_uuid()
cur.execute(
"insert into i_pv(uuid, pv_state, pv_type, disk_or_part_uuid, "
"disk_or_part_device_node, disk_or_part_device_path, lvm_pv_name, "
"lvm_vg_name, forihostid, forilvgid) "
"values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(pv_uuid, constants.PV_ADD, constants.PV_TYPE_PARTITION,
part_uuid, part_device_node, part_device_path, part_device_node,
constants.LVG_CGTS_VG, disk.get('forihostid'),
pv_cgts_vg.get('forilvgid')))
# Get the PV.
cur.execute("select i_pv.id from i_pv where uuid=%s",
(pv_uuid,))
pv = cur.fetchone()
# Update the partition to associate with pv id.
cur.execute(
"update partition set foripvid=%s where uuid=%s",
(pv.get('id'), part_uuid))
def adjust_user_partitions():
if not is_aio_system_type:
LOG.info("This is not an AIO system. No partition changes required.")
return
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select i_host.id, i_host.rootfs_device from i_host "
"where personality='controller'")
controllers = cur.fetchall()
if not controllers:
LOG.exception("Failed to fetch controller host information")
raise
for controller in controllers:
# Get the root disk for the controller.
cur.execute(
"select * from i_idisk where forihostid=%s and "
"capabilities like %s", (controller['id'], '%rootfs%',))
controller_rootfs_disk = cur.fetchone()
if not controller_rootfs_disk:
LOG.exception("Could not locate controller root disk.")
raise
LOG.debug("controller_rootfs_disk: %s" %
controller_rootfs_disk)
# Get the partitions for the controller root disk.
cur.execute(
"select partition.id, partition.device_node, "
"partition.device_path, partition.start_mib, "
"partition.end_mib, partition.size_mib "
"from partition where forihostid = %s and "
"idisk_uuid = %s",
(controller['id'], controller_rootfs_disk['uuid'],))
db_partitions = cur.fetchall()
LOG.debug("DB partitions: %s" % db_partitions)
# Create a db partition map
partition_map = {p['device_node']: p for p in db_partitions}
LOG.debug("DB partition map: %s" % partition_map)
installed_partitions = get_partitions(
controller_rootfs_disk['device_path'],
controller_rootfs_disk['device_node'])
LOG.debug("installed partitions: %s" % installed_partitions)
# Get the PVs for the controller.
cur.execute(
"select i_pv.id, i_pv.disk_or_part_uuid, "
"i_pv.disk_or_part_device_node, "
"i_pv.disk_or_part_device_path, i_pv.lvm_pv_size,"
"i_pv.lvm_pv_name, i_pv.lvm_vg_name, i_pv.forilvgid,"
"i_pv.pv_type from i_pv where forihostid = %s",
(controller['id'],))
db_pvs = cur.fetchall()
LOG.debug("DB pvs: %s" % db_pvs)
# Create a db pv map for the controller root disk
pv_map = {pv['disk_or_part_device_node']: pv for pv in db_pvs
if controller_rootfs_disk['device_node'] in
pv['disk_or_part_device_node']}
LOG.info("DB pv map: %s" % pv_map)
# Cgts-vg physical volumes
cgts_vg_pvs = [pv for pv in db_pvs
if ((controller_rootfs_disk['device_path'] in
pv['disk_or_part_device_path']) and
pv['lvm_vg_name'] == constants.LVG_CGTS_VG)]
update_db_partitions = [] # Requires DB updates for partitions
update_db_pvs = [] # Requires DB updates for pvs
installed_lvm_device = None # LVM device that needs adjusting
adjustments = {} # LVM device partition adjustments
extend_cgts_vg = False # Flag to determine cgts-vg extension
# Go through the installed partitions and determine any changes
for i in installed_partitions:
# Grab the partition from the db map
d = partition_map[i['device_node']]
if ((int(i['start_mib']) != int(d['start_mib'])) or
(int(i['end_mib']) != int(d['end_mib'])) or
(int(i['size_mib']) != int(d['size_mib']))):
LOG.info("MISMATCH:installed part: %s %s %s %s" % (
i['device_node'], i['start_mib'],
i['end_mib'], i['size_mib']))
LOG.info("MISMATCH: db part: %s %s %s %s" % (
d['device_node'], d['start_mib'],
d['end_mib'], d['size_mib']))
if i['type_name'] == 'Linux.LVM':
# This is key partition that will be used to adjust
# any additional user created partitions, identify
# and save the adjustments
installed_lvm_device = i
adjustments['start_mib'] = (int(i['start_mib']) -
int(d['start_mib']))
adjustments['end_mib'] = (int(i['end_mib']) -
int(d['end_mib']))
adjustments['size_mib'] = (int(i['size_mib']) -
int(d['size_mib']))
else:
# Adjust the non-LVM partitions to match what is
# installed
d['start_mib'] = i['start_mib']
d['end_mib'] = i['end_mib']
d['size_mib'] = i['size_mib']
# Save the new partition for updating
update_db_partitions.append(d)
# Remove the partition from the db map
del partition_map[i['device_node']]
else:
# Partition is the same. No changes needed
# Remove the partition from the db map
del partition_map[i['device_node']]
if installed_lvm_device:
# Found a difference in the installed partition map for the
# primary LVM partition
LOG.debug("DB unhandled part map: %s" % partition_map)
# Update the primary installed LVM partition based on
# calculated adjustments.
d = partition_map[installed_lvm_device['device_node']]
d['start_mib'] = (int(d['start_mib']) +
adjustments['start_mib'])
d['end_mib'] = (int(d['end_mib']) +
adjustments['end_mib'])
d['size_mib'] = (int(d['size_mib']) +
adjustments['size_mib'])
update_db_partitions.append(d)
del partition_map[installed_lvm_device['device_node']]
if adjustments['size_mib'] < 0:
# The primary LVM partition for cgts-vg in new release
# is smaller than the old release. Creating new
# partition and pv to extend cgts-vg is needed.
# The device node/path of the new LVM partition and pv
# is after the primary LVM partition and the device
# node/path of the user created partitions and pvs need
# to be moved backwards.
# ie.
# primary LVM partition
# device node - /dev/sda5
# device path - /dev/disk/by-path/xxx-part5
# new LVM partition
# device node - /dev/sda6
# device path - /dev/disk/by-path/xxx-part6
# user created LVM partitions
# device node - /dev/sda7
# device path - /dev/disk/by-path/xxx-part7
# ...
extend_cgts_vg = True
increased_partition_number = \
installed_lvm_device['partition_number'] + 2
# partition_map is not an ordered dictionary, we
# need to sort partition_map by its key(device node)
# to ensure the adjustments for user created partitions
# are correct
for device, partition in sorted(
six.iteritems(partition_map)):
# Adjust the device node/path of user created
# partitions. The start/end/size of the partitions
# will not be changed.
partition['device_node'] = \
build_partition_device_node(
controller_rootfs_disk['device_node'],
increased_partition_number)
partition['device_path'] = \
build_partition_device_path(
controller_rootfs_disk['device_path'],
increased_partition_number)
update_db_partitions.append(partition)
increased_partition_number += 1
# Adjust the device node/path of user created pv.
pv = pv_map.get(device)
if pv:
pv['disk_or_part_device_node'] = \
partition['device_node']
pv['disk_or_part_device_path'] = \
partition['device_path']
pv['lvm_pv_name'] = partition['device_node']
update_db_pvs.append(pv)
# Reverse the updating order. The partitions that
# moving backwards need to updated first because of
# the UniqueConstraint "u_partition_path_host_id"
# for partition table
update_db_partitions = update_db_partitions[::-1]
else:
# The primary LVM partition for cgts-vg in new release
# is equal to or larger than the old release. Adjust
# the start/end of user created partitions. Size will
# not be changed.
for device, partition in six.iteritems(partition_map):
partition['start_mib'] = \
(int(partition['start_mib']) +
adjustments['end_mib'])
partition['end_mib'] = \
(int(partition['end_mib']) +
adjustments['end_mib'])
update_db_partitions.append(partition)
if update_db_partitions:
# Found partitions that need updating
LOG.info("Required partition adjustments: %s" %
update_db_partitions)
for partition in update_db_partitions:
cur.execute(
"update partition set start_mib=%s, end_mib=%s, "
"size_mib=%s, device_node=%s, device_path=%s "
"where id=%s",
(partition['start_mib'], partition['end_mib'],
partition['size_mib'], partition['device_node'],
partition['device_path'], partition['id']),)
if update_db_pvs:
LOG.info("Required pv adjustments: %s" % update_db_pvs)
for pv in update_db_pvs:
cur.execute(
"update i_pv set disk_or_part_device_node=%s, "
"disk_or_part_device_path=%s, lvm_pv_name=%s "
"where id=%s",
(pv['disk_or_part_device_node'],
pv['disk_or_part_device_path'],
pv['lvm_pv_name'], pv['id']))
if extend_cgts_vg:
part_number = \
installed_lvm_device['partition_number'] + 1
part_size_mib = abs(adjustments['size_mib'])
part_start_mib = int(installed_lvm_device['end_mib'])
part_end_mib = part_start_mib + part_size_mib
cgts_vg_extend(
cur, controller_rootfs_disk,
cgts_vg_pvs[0], part_number, part_size_mib,
part_start_mib, part_end_mib)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,62 +0,0 @@
#!/bin/bash
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will call keystone user PATCH API to set
# "ignore_lockout_failure_attempts" option for admin user, so that admin user
# is exempted from failed auth lockout.
#
# This script can be removed in the release that follows 20.06.
#
NAME=$(basename $0)
# The migration scripts are passed these parameters:
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
user_name="admin"
option="ignore_lockout_failure_attempts"
option_value="true"
source /etc/platform/openrc
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
log "$NAME: Setting ${option} option for $user_name to exempt it from fail auth lockout."
if [ "$TO_RELEASE" == "20.06" ] && [ "$ACTION" == "activate" ]; then
token=$(openstack token issue -c id -f value)
if [ $? -ne 0 ]; then
log "$NAME: Get admin token failed."
exit 1
fi
user_id=$(openstack user show ${user_name} -c id -f value)
if [ $? -ne 0 ]; then
log "$NAME: Get user id for user ${user_name} failed."
exit 1
fi
req_url="${OS_AUTH_URL}/users/${user_id}"
data_json="{\"user\": {\"options\": {\"${option}\": ${option_value}}}}"
ret=$(/usr/bin/curl -g -X PATCH -H "X-Auth-Token: ${token}" \
-H "Content-Type: application/json" -d "${data_json}" "${req_url}")
if [ $? -ne 0 ]; then
log "$NAME: Set ${option} option for user ${user_name} failed."
exit 1
fi
if echo ${ret} | grep '"error"'; then
log "$NAME: Set ${option} option for user ${user_name} failed: ${ret}"
exit 1
fi
fi
exit 0

View File

@ -1,89 +0,0 @@
#!/bin/bash
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update subcloud_status table in dcmanager database
# in preparation for upgrade to release 20.06.
#
# Subcloud load audit, introduced in release 20.06, entails creating
# load status record when a subcloud is added to the database and
# having the subcloud load status updated by dcmanager audit task.
# The script adds a load status record for each of the existing
# subclouds to ensure successful startup and operation of dcmanager
# when the system controller hosts are upgraded to 20.06.
#
# This script can be removed in the release that follows 20.06.
#
NAME=$(basename $0)
# The migration scripts are passed these parameters:
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
function create_configmap {
cat > /etc/kubernetes/coredns.yaml <<EOF
# Based on https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.sed#L54-L82
# Hardcode dns_domain to cluster.local.
# Change policy from default of random to sequential, in order to attempt to
# resolve domain names with dnsmasq first.
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
policy sequential
}
cache 30
loop
reload
loadbalance
}
EOF
}
function apply_configmap {
kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/coredns.yaml
ret=$?
return $ret
}
if [ "$TO_RELEASE" == "20.06" ] && [ "$ACTION" == "activate" ]; then
log "$NAME: Migrating FROM release $FROM_RELEASE"
create_configmap
apply_configmap
ret=$?
if [ $ret -ne 0 ]; then
log "$NAME: Applying CoreDNS ConfigMap failed"
exit 1
fi
fi
exit 0

View File

@ -1,39 +0,0 @@
#!/bin/bash
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will create the directory /opt/platform/device_images
# if it does not exist.
#
# This script is needed for upgrade from release 20.06.
#
NAME=$(basename $0)
# The migration scripts are passed these parameters:
FROM_RELEASE=$1
TO_RELEASE=$2
ACTION=$3
source /etc/platform/openrc
# This will log to /var/log/platform.log
function log {
logger -p local1.info $1
}
DIR_NAME='/opt/platform/device_images'
if [ "$FROM_RELEASE" == "20.06" ] && [ "$ACTION" == "migrate" ]; then
if [ ! -d $DIR_NAME ]; then
log "$NAME: Create directory $DIR_NAME."
mkdir $DIR_NAME
if [ $? -ne 0 ]; then
log "$NAME: Failed to create directory $DIR_NAME"
exit 1
fi
fi
fi
exit 0

View File

@ -1,99 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update i_system table in sysinv database
# in preparation for upgrade.
#
# The 'i_system' table in sysinv DB has capabilities attribute
# which lists 'identity' as a shared service. However, identity
# is no longer a shared service in DC. The script takes care of
# this by removing identity entry on upgrade.
#
# This script can be removed in the release that follows stx.5.0.
#
import json
import psycopg2
import sys
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == "20.06" and action == "migrate":
try:
if is_subcloud():
LOG.info("Removing identity shared service...")
remove_identity_shared_service()
except Exception:
LOG.exception("Failed to remove identity entry during upgrade.")
return 1
def is_subcloud():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * from i_system")
system = cur.fetchone()
return system['distributed_cloud_role'] == 'subcloud'
def remove_identity_shared_service():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * from i_system")
system = cur.fetchone()
# Fetch the capabilities attribute and convert it into a dict
capabilities = json.loads(system['capabilities'])
# Fetch shared services
# It is of type unicode initially
# and we convert it into a list for further processing
shared_services = str(capabilities["shared_services"])
shared_service_list = shared_services.strip('][').split(', ')
# Create a new list which removes 'identity' shared service
# and any empty string elements from list
new_shared_services = [item.strip("'")
for item in shared_service_list
if "identity" not in item and item != '']
if len(shared_service_list) != len(new_shared_services):
capabilities["shared_services"] = str(new_shared_services)
LOG.info("Old shared service list: %s, "
"New shared service list: %s"
% (shared_services, new_shared_services))
cur.execute("UPDATE i_system SET capabilities='%s' where id=%s"
% (json.dumps(capabilities), system["id"]))
LOG.info("Removed identity from shared service list on subcloud.")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,119 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update subcloud_status table in dcmanager database
# in preparation for upgrade to release 20.06.
#
# Subcloud load audit, introduced in release 20.06, entails creating
# load status record when a subcloud is added to the database and
# having the subcloud load status updated by dcmanager audit task.
# The script adds a load status record for each of the existing
# subclouds to ensure successful startup and operation of dcmanager
# when the system controller hosts are upgraded to 20.06.
#
# Add dc-cert subcloud status endpoint, to indicate admin endpoint
# certificate sync status. Start with in-sync, becase subclouds
# bootstrap with in-synced certificate.
#
# This script can be removed in the release that follows 20.06.
#
import psycopg2
import sys
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if to_release == "20.06" and action == "migrate":
try:
if is_system_controller():
LOG.info("Performing subcloud status data migration...")
update_subcloud_status()
except Exception as ex:
LOG.exception(ex)
print(ex)
return 1
def is_system_controller():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * from i_system")
system = cur.fetchone()
return system['distributed_cloud_role'] == 'systemcontroller'
def update_subcloud_status():
conn = psycopg2.connect("dbname='dcmanager' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
# Check if there are any subclouds
cur.execute("SELECT * from subclouds")
subcloud_records = cur.fetchall()
if not subcloud_records:
LOG.info("Nothing to do - there are no subclouds.")
return
# Check if load status records already exist
cur.execute("SELECT * from subcloud_status where "
"endpoint_type = 'load'")
load_status_records = cur.fetchall()
if not load_status_records:
cur.execute("SELECT * from subcloud_status where "
"endpoint_type = 'patching'")
patching_status_records = cur.fetchall()
if not patching_status_records:
LOG.exception("Failed to fetch subcloud status data.")
raise
for record in patching_status_records:
# Insert a record for load endpoint type for each
# subcloud based on data of patching record.
cur.execute("INSERT into subcloud_status (subcloud_id, "
"endpoint_type, sync_status, created_at, "
"deleted) values (%d, 'load', "
"'%s', '%s', 0)"
% (record['subcloud_id'],
record['sync_status'],
record['created_at']))
else:
LOG.info("Nothing to do - load status records already exist.")
cur.execute("INSERT into subcloud_status("
"subcloud_id, endpoint_type, sync_status, deleted) "
"select id, 'dc-cert', 'in-sync', 0 "
"from subclouds where id not in "
"(select subcloud_id from subcloud_status "
"where endpoint_type = 'dc-cert')")
LOG.info("Subcloud status data migration completed.")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,99 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script updates the subcloud_sync table in dcorch database
# in preparation for upgrade from release 20.06.
#
# This script can be removed in the release that follows.
#
import json
import psycopg2
import sys
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == "20.06" and action == "migrate":
try:
if is_system_controller():
LOG.info("Performing dcorch subcloud sync data migration...")
update_subcloud_sync()
except Exception as ex:
LOG.exception(ex)
print(ex)
return 1
def is_system_controller():
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT * from i_system")
system = cur.fetchone()
return system['distributed_cloud_role'] == 'systemcontroller'
def update_subcloud_sync():
conn = psycopg2.connect("dbname='dcorch' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
# Check if there are any subclouds
cur.execute("SELECT * from subcloud")
subcloud_records = cur.fetchall()
if not subcloud_records:
LOG.info("dcorch subcloud_sync data migration not required")
return
for record in subcloud_records:
capabilities = json.loads(record['capabilities'])
endpoint_types = capabilities.get('endpoint_types')
for ept in endpoint_types:
# Insert a record into subcloud sync for each of the
# endpoint types supported for each subcloud
cur.execute("INSERT into subcloud_sync (subcloud_id, "
"subcloud_name, endpoint_type, "
"audit_status, created_at, "
"deleted) values (%d, '%s', "
"'%s', '%s', '%s', 0)"
% (record['id'],
record['region_name'],
ept,
'none',
record['created_at']))
LOG.info("dcorch subcloud_sync data migration completed.")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,80 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script updates armada to containerized version
# based using Helm v3. This also cleans up previous
# tiller-deployment.
#
# This script can be removed in the release that follows stx5.0
import json
import subprocess
import sys
from sysinv.common.kubernetes import KUBERNETES_ADMIN_CONF
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if action == 'activate' and not is_containerized_armada_installed():
LOG.info("%s invoked with from_release = %s to_release = %s "
"action = %s"
% (sys.argv[0], from_release, to_release, action))
update_armada_helmv3()
def is_containerized_armada_installed():
"""Check if containerized armada is installed by helmv3"""
try:
cmd = "/usr/sbin/helm list " \
"--namespace armada --filter armada --output json " \
"--kubeconfig {} ".format(KUBERNETES_ADMIN_CONF)
result = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
if not json.loads(result):
return False
return True
except subprocess.CalledProcessError as e:
LOG.exception("Unable to query armada helmv3 release: %s" % e.output)
raise
def update_armada_helmv3():
playbooks_root = '/usr/share/ansible/stx-ansible/playbooks'
upgrade_script = 'upgrade-k8s-armada-helm.yml'
cmd = 'ansible-playbook {}/{}'.format(playbooks_root, upgrade_script)
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
raise Exception('Cannot update armada')
LOG.info('armada helm v3 updated successfully')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,170 +0,0 @@
#!/usr/bin/python
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This migration script is used for migrating all helm releases
# from configmaps to postgresql during the activate stage of
# a platform upgrade.
#
# This script can be removed in the release that follows stx5.0
#
import collections
from datetime import datetime
import psycopg2
import subprocess
import sys
import json
import keyring
from controllerconfig.common import log
LOG = log.get_logger(__name__)
Release = collections.namedtuple(
'release', 'key body name version status owner created_at modified_at')
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == '20.06' and action == 'activate':
LOG.info("%s invoked with from_release = %s to_release = %s "
"action = %s"
% (sys.argv[0], from_release, to_release, action))
migrate_helm_releases()
LOG.info("Complete helm releases migration for release %s "
"to %s with action %s."
% (from_release, to_release, action))
def execute_command(cmd):
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error("Command failed:\n %s\n%s\n%s" % (cmd, stdout, stderr))
raise Exception("Failed to execute command: %s" % cmd)
return stdout
def get_helm_releases():
# Get all configmaps that store helm releases
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get configmaps " \
"-n kube-system -l OWNER=TILLER --sort-by '{.metadata.name}' " \
"--template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}'"
releases = execute_command(cmd)
releases_list = [r for r in releases.split('\n') if r]
return releases_list
def delete_helm_releases():
# Delete all configmaps that store helm releases
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf delete configmaps " \
"-n kube-system -l OWNER=TILLER"
execute_command(cmd)
def get_helm_release_from_configmap(release_name):
# Get the content of a specific helm release from configmap
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get configmaps " \
"-n kube-system {} -o json".format(release_name)
release_data = execute_command(cmd)
return json.loads(release_data)
def map_helm_release(release):
# Map the format of a helm release from configmap to postgresql
try:
key = str(release['metadata']['name'])
body = str(release['data']['release'])
name = str(release['metadata']['labels']['NAME'])
version = int(release['metadata']['labels']['VERSION'])
status = str(release['metadata']['labels']['STATUS'])
owner = str(release['metadata']['labels']['OWNER'])
created_at = int(datetime.strftime(datetime.strptime(
release['metadata']['creationTimestamp'],
"%Y-%m-%dT%H:%M:%SZ"), "%s"))
modified_at = int(release['metadata']['labels']['MODIFIED_AT'])
mapped_release = Release(
key=key, body=body, name=name, version=version, status=status,
owner=owner, created_at=created_at, modified_at=modified_at)
except Exception as e:
LOG.exception("Failed to convert helm release: %s" % e)
raise
return mapped_release
def create_helm_release_in_db(conn, release):
with conn:
with conn.cursor() as cur:
try:
cur.execute(
"insert into releases(key, body, name, version,"
"status, owner, created_at, modified_at) "
"values(%s, %s, %s, %s, %s, %s, %s, %s)",
release)
except psycopg2.IntegrityError:
# release already exists
pass
except Exception as e:
LOG.exception("Failed to create release in db:\n%s" % e)
raise
def migrate_helm_releases():
releases = get_helm_releases()
if not releases:
LOG.info("No helm releases need to be migrated.")
return
LOG.info("Start migrating helm releases:\n%s" % releases)
helmv2_db_pw = keyring.get_password("helmv2", "database")
if not helmv2_db_pw:
raise Exception("Unable to get password to access helmv2 database.")
try:
conn = psycopg2.connect(user="admin-helmv2",
password=helmv2_db_pw,
host="localhost",
database="helmv2")
except Exception as e:
LOG.exception("Failed to connect helmv2 database: %s" % e)
raise
for release in releases:
release_data = get_helm_release_from_configmap(release)
mapped_release = map_helm_release(release_data)
create_helm_release_in_db(conn, mapped_release)
LOG.info("Migrated release: %s" % release)
delete_helm_releases()
if __name__ == "__main__":
sys.exit(main())

View File

@ -60,7 +60,7 @@ function verify_apps_are_not_recovering {
log "$NAME: Starting Kubernetes application updates from release $FROM_RELEASE to $TO_RELEASE with action $ACTION" log "$NAME: Starting Kubernetes application updates from release $FROM_RELEASE to $TO_RELEASE with action $ACTION"
if [ "$FROM_RELEASE" == "20.06" ] && [ "$ACTION" == "activate" ]; then if [ "$ACTION" == "activate" ]; then
for tries in $(seq 1 $RECOVER_RESULT_ATTEMPTS); do for tries in $(seq 1 $RECOVER_RESULT_ATTEMPTS); do
if verify_apps_are_not_recovering; then if verify_apps_are_not_recovering; then
break break

View File

@ -1,89 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2020 Intel Corporation.
#
# SPDX-License-Identifier: Apache-2.0
#
# Active secured etcd after upgrade.
#
# Note: this can be removed in the release after STX5.0
. /etc/platform/platform.conf
FROM_REL=$1
TO_REL=$2
ACTION=$3
function log {
logger -p local1.info $1
}
# below function is cloned from ../scripts/controller_config
get_ip()
{
HOST_NAME=$1
# Check /etc/hosts for the hostname
HOST_IP=$(cat /etc/hosts | grep "${HOST_NAME}" | awk '{print $1}')
if [ -n "${HOST_IP}" ]; then
echo ${HOST_IP}
return
fi
# Try the DNS query
# Because dnsmasq can resolve both a hostname to both an IPv4 and an IPv6
# address in certain situations, and the last address is the IPv6, which
# would be the management, this is preferred over the IPv4 pxeboot address,
# so take the last address only.
HOST_IP=$(dig +short ANY $host|tail -1)
if [[ "${HOST_IP}" =~ ^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$ ]]; then
echo ${HOST_IP}
return
fi
if [[ "${HOST_IP}" =~ ^[0-9a-z]*\:[0-9a-z\:]*$ ]]; then
echo ${HOST_IP}
return
fi
}
enable_secured_etcd()
{
STATIC_YAML="/opt/platform/puppet/${sw_version}/hieradata/static.yaml"
SYSTEM_YAML="/opt/platform/puppet/${sw_version}/hieradata/system.yaml"
if [[ ! -f ${STATIC_YAML} ]] || [[ ! -f ${SYSTEM_YAML} ]]; then
log "Could not find specific static/system yaml files in \
/opt/platform/puppet/${sw_version}/hieradata!"
exit 1
fi
ETCD_SEC_ENABLED=$(grep "platform::etcd::params::security_enabled" ${STATIC_YAML} | awk '{print $2}')
CLUSTER_HOST_ADDRESS=$(grep "platform::network::cluster_host::params::controller_address" ${SYSTEM_YAML} | awk '{print $2}')
CLUSTER_HOST_ADDRESS_VERSION=$(grep "platform::network::cluster_host::params::subnet_version" ${SYSTEM_YAML} | awk '{print $2}')
HOST_ADDR=$(get_ip $(hostname))
if [ "$ETCD_SEC_ENABLED" != "true" ]; then
ANSIBLE_LOG_PATH=/root/enable_secured_etcd.log \
ansible-playbook /usr/share/ansible/stx-ansible/playbooks/enable_secured_etcd.yml \
-e "cluster_floating_address=${CLUSTER_HOST_ADDRESS}" \
-e "etcd_listen_address_version=${CLUSTER_HOST_ADDRESS_VERSION}" \
-e "puppet_permdir=/opt/platform/puppet/${sw_version}" \
-e "config_permdir=/opt/platform/config/${sw_version}" \
-e "ipaddress=${HOST_ADDR}" \
-e "k8s_root_ca_cert=''" \
-e "k8s_root_ca_key=''"
if [ $? -ne 0 ]; then
log "Failed to run ansible playbook!"
exit 1
fi
fi
}
log "${0} invoked with from_release = ${FROM_REL} to_release = ${TO_REL} action = ${ACTION}"
if [ ${FROM_REL} == "20.06" -a ${ACTION} == "activate" ]; then
enable_secured_etcd
fi
exit 0

View File

@ -1,70 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script updates the dc root ca certificate to include more
# DN information and add separated admin endpoint certificate.
# This is in preparation for the future certificate renewal.
#
# This script can be removed in the release that follows 20.06.
#
import subprocess
import sys
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == '20.06' and action == 'activate':
create_deployment_ns()
deployment_ns_yaml = """
---
apiVersion: v1
kind: Namespace
metadata:
name: deployment
"""
def create_deployment_ns():
cmd = "echo '%s' | " \
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f -" % \
deployment_ns_yaml
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
raise Exception('Cannot create deployment namespace')
LOG.info('Deployment namespace updated successfully')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,116 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script updates the dc root ca certificate to include more
# DN information and add separated admin endpoint certificate.
# This is in preparation for the future certificate renewal.
#
# This script can be removed in the release that follows 20.06.
#
import subprocess
import socket
import sys
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if to_release == '20.06' and action == 'activate':
if is_system_controller():
update_dc_root_ca()
def is_system_controller():
with open('/etc/platform/platform.conf', 'r') as f:
lines = f.readlines()
for line in lines:
if line.strip() == 'distributed_cloud_role=systemcontroller':
return True
return False
dc_root_cert_yaml = """
---
apiVersion: cert-manager.io/v1alpha3
kind: Certificate
metadata:
name: dc-adminep-root-ca-certificate
namespace: dc-cert
spec:
commonName: %s
duration: 43800h
isCA: true
issuerRef:
kind: Issuer
name: dc-selfsigning-issuer
renewBefore: 720h
secretName: dc-adminep-root-ca-certificate
subject:
organizationalUnits:
- 'StarlingX DC Root CA'
organizations:
- StarlingX
---
apiVersion: cert-manager.io/v1alpha3
kind: Certificate
metadata:
name: dc-adminep-certificate
namespace: dc-cert
spec:
commonName: %s
duration: 4320h
isCA: false
issuerRef:
kind: Issuer
name: dc-adminep-root-ca-issuer
renewBefore: 30h
secretName: dc-adminep-certificate
"""
def update_dc_root_ca():
mgmt_ip = socket.getaddrinfo('controller', None)[0][4][0]
resource = dc_root_cert_yaml % (mgmt_ip, mgmt_ip)
cmd = "echo '%s' | " \
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f -" % \
resource
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
raise Exception('Cannot update certificates')
LOG.info('DC certificates update successfully')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,118 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script updates the subcloud admin endpoint certificate
# to include mgmt floating IP as subjectAltName.
#
# This script can be removed in the release that follows stx.5.0
#
import socket
import subprocess
import sys
from controllerconfig.common import log
LOG = log.get_logger(__name__)
sc_admin_endpoint_cert_yaml = """
---
apiVersion: cert-manager.io/v1alpha3
kind: Certificate
metadata:
name: sc-adminep-certificate
namespace: sc-cert
spec:
commonName: %s
duration: 4320h
renewBefore: 30h
ipAddresses:
- "%s"
issuerRef:
name: sc-intermediate-ca-adminep-issuer
kind: Issuer
secretName: sc-adminep-certificate
"""
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == '20.06' and action == 'activate':
if is_subcloud():
update_sc_admin_endpoint_cert(to_release)
def is_subcloud():
with open('/etc/platform/platform.conf', 'r') as f:
lines = f.readlines()
for line in lines:
if line.strip() == 'distributed_cloud_role=subcloud':
return True
return False
def execute_command(cmd):
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
raise Exception('Failed to execute command: %s' % cmd)
return stdout
def update_sc_admin_endpoint_cert(to_release):
mgmt_ip = socket.getaddrinfo('controller', None)[0][4][0]
resource = sc_admin_endpoint_cert_yaml % (mgmt_ip, mgmt_ip)
# Update certificate in cert manager and secret in k8s
cmd = "echo '%s' | " \
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f -" % \
resource
execute_command(cmd)
# Wait up to 30s for admin endpoint certificate to be ready,
# Retry if certificate is not ready yet.
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf -n sc-cert \
wait --for=condition=ready certificate sc-adminep-certificate \
--timeout=30s"
for attempt in range(3):
try:
execute_command(cmd)
except Exception:
LOG.info("Retry command: %s" % cmd)
continue
break
else:
raise Exception('Command failed after retries: %s' % cmd)
LOG.info('Subcloud admin endpoint certificate updated successfully')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,60 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script applies mandatory pod security policies to a system
# after upgrades. These are usually applied by ansible, which is
# not run during an upgrade.
#
import subprocess
import sys
from controllerconfig.common import log
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == '20.06' and action == 'activate':
LOG.info("%s invoked from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
apply_mandatory_psp_policies()
def apply_mandatory_psp_policies():
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f " \
"/usr/share/ansible/stx-ansible/playbooks/roles/bootstrap/" \
"bringup-essential-services/files/psp-policies.yaml"
sub = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
if sub.returncode != 0:
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
raise Exception('Cannot apply pod security policies')
LOG.info('Mandatory pod security policies applied successfully')
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,128 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will remove snmp related data (icommunity and
# itrapdest) in dcorch database according to the host based
# SNMP removal in preparation for upgrade from release 20.06.
#
import psycopg2
import sys
from controllerconfig.common import log
from psycopg2.extras import RealDictCursor
LOG = log.get_logger(__name__)
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg]
elif arg == 3:
action = sys.argv[arg]
else:
print ("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
% (sys.argv[0], from_release, to_release, action))
if from_release == "20.06" and action == "migrate":
try:
if is_system_controller():
LOG.info("Performing dcorch snmp data removal...")
remove_snmp_record()
except Exception as ex:
LOG.exception(ex)
print(ex)
return 1
def is_system_controller():
with open('/etc/platform/platform.conf', 'r') as f:
lines = f.readlines()
for line in lines:
if line.strip() == 'distributed_cloud_role=systemcontroller':
return True
return False
def remove_snmp_in_orch_request(cur, job_id):
# Check if the record exists in orch_request
cur.execute("select * from orch_request where orch_job_id = '%d'" %
job_id)
orch_request = cur.fetchall()
if orch_request:
cur.execute("delete from orch_request where orch_job_id = '%d'" %
job_id)
LOG.info("icommunity/itrapdest is removed in orch_request.")
else:
LOG.info("There is no icommunity/itrapdest in orch_request.")
def remove_snmp_in_orch_job(cur, master_id):
# Check if the record exists in orch_job
cur.execute("select * from orch_job where source_resource_id = '%s'" %
master_id)
orch_job = cur.fetchall()
if orch_job:
for orch_job_record in orch_job:
remove_id = orch_job_record['id']
remove_snmp_in_orch_request(cur, remove_id)
cur.execute("delete from orch_job where id = %d" % (remove_id))
LOG.info("icommunity is removed in orch_job.")
else:
LOG.info("There is no icommunity/itrapdest in orch_job.")
def remove_snmp_in_subcloud_resource(cur, master_id):
# Check if the record exists in subcloud_resource
cur.execute("select * from subcloud_resource "
"where subcloud_resource_id = '%s'" % (master_id))
resource_subcloud = cur.fetchall()
if resource_subcloud:
cur.execute("delete from subcloud_resource "
"where subcloud_resource_id = '%s'" % (master_id))
LOG.info("icommunity is removed in subcloud_resource.")
else:
LOG.info("There is no icommunity/itrapdest in subcloud_resource.")
def remove_snmp_record():
conn = psycopg2.connect("dbname='dcorch' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
# Check if any icommunity or itrapdest record exists
cur.execute("select * from resource where resource_type in "
"('icommunity','itrapdest')")
resource_records = cur.fetchall()
if not resource_records:
LOG.info("Nothing to do - "
"there is no icommunity/itrapdest in resource.")
return
for data_resource in resource_records:
master_id = data_resource['master_id']
remove_snmp_in_subcloud_resource(cur, master_id)
remove_snmp_in_orch_job(cur, master_id)
cur.execute("delete from resource "
"where master_id = '%s'" % (master_id))
LOG.info("icommunity/itrapdest is removed from resource.")
LOG.info("snmp community and trapdest data removal completed.")
if __name__ == "__main__":
sys.exit(main())

View File

@ -1524,33 +1524,6 @@ class AppOperator(object):
return rc return rc
def _old_app_is_non_decoupled(self, old_app):
"""Special case application upgrade check for STX 5.0
This is a special case identifier for platform application recovery of
non-decoupled application during application upgrade.
Helm plugins were removed and delivered as part of the application tarball.
During application update, in the case of an apply failure the application
is recovered to the old version to restore application functionality.
The current decoupled app framework relies on the existence of a plugin
directory to signify that it is a system knowledgeable application. The
prior not decoupled applications, do not have this structure(ie.portieris
and nginx-ingress-controller applications are not decoupled in stx4.0).This
function will identify them so their saved overrides can be used during
recovery and plugins/operators can be reloaded after recovery is completed.
NOTE: This and its call should be removed from master after branching
for STX 5.0 is complete. All applications post STX 5.0 will all be
decoupled and future application upgrades do not require this.
"""
if (not old_app.system_app and
old_app.name in [constants.HELM_APP_NGINX_IC,
constants.HELM_APP_PORTIERIS]):
return True
return False
def _perform_app_recover(self, old_app, new_app, armada_process_required=True): def _perform_app_recover(self, old_app, new_app, armada_process_required=True):
"""Perform application recover """Perform application recover
@ -1568,14 +1541,8 @@ class AppOperator(object):
""" """
def _activate_old_app_plugins(old_app): def _activate_old_app_plugins(old_app):
# Enable the old app plugins. Only reload the operators for the # Enable the old app plugins.
# apps decoupled in stx5.0 but not decoupled in stx4.0, this is self._plugins.activate_plugins(old_app)
# to make sure the correct information is loaded. This particular
# handling for non-decoupled apps can be removed in the stx6.0
if self._old_app_is_non_decoupled(old_app):
self._helm.discover_plugins()
else:
self._plugins.activate_plugins(old_app)
LOG.info("Starting recover Application %s from version: %s to version: %s" % LOG.info("Starting recover Application %s from version: %s to version: %s" %
(old_app.name, new_app.version, old_app.version)) (old_app.name, new_app.version, old_app.version))
@ -1609,7 +1576,7 @@ class AppOperator(object):
if armada_process_required: if armada_process_required:
overrides_str = '' overrides_str = ''
old_app.charts = self._get_list_of_charts(old_app.sync_armada_mfile) old_app.charts = self._get_list_of_charts(old_app.sync_armada_mfile)
if old_app.system_app or self._old_app_is_non_decoupled(old_app): if old_app.system_app:
(helm_files, armada_files) = self._get_overrides_files( (helm_files, armada_files) = self._get_overrides_files(
old_app.sync_overrides_dir, old_app.charts, old_app.name, mode=None) old_app.sync_overrides_dir, old_app.charts, old_app.name, mode=None)

View File

@ -1188,35 +1188,6 @@ class ConductorManager(service.PeriodicService):
raise exception.SysinvException(_( raise exception.SysinvException(_(
"Failed to create pxelinux.cfg file")) "Failed to create pxelinux.cfg file"))
def _enable_etcd_security_config(self, context):
"""Update the manifests for etcd security
Note: this can be removed in the release after STX5.0
returns True if runtime manifests were applied
"""
controllers = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for host in controllers:
if not utils.is_host_active_controller(host):
# Just enable etcd security on the standby controller.
# Etcd security was enabled on the active controller with a
# migration script.
personalities = [constants.CONTROLLER]
host_uuids = [host.uuid]
config_uuid = self._config_update_hosts(
context, personalities, host_uuids)
config_dict = {
"personalities": personalities,
"host_uuids": host_uuids,
"classes": ['platform::etcd::upgrade::runtime'],
puppet_common.REPORT_STATUS_CFG:
puppet_common.REPORT_UPGRADE_ACTIONS
}
self._config_apply_runtime_manifest(context,
config_uuid=config_uuid,
config_dict=config_dict)
return True
return False
def _remove_pxe_config(self, host): def _remove_pxe_config(self, host):
"""Delete the PXE config file for this host. """Delete the PXE config file for this host.
@ -10678,9 +10649,6 @@ class ConductorManager(service.PeriodicService):
{'state': constants.UPGRADE_ACTIVATION_FAILED}) {'state': constants.UPGRADE_ACTIVATION_FAILED})
manifests_applied = False manifests_applied = False
if from_version == tsc.SW_VERSION_20_06:
# Apply etcd security puppet manifest to the standby controller.
manifests_applied = self._enable_etcd_security_config(context)
if manifests_applied: if manifests_applied:
LOG.info("Running upgrade activation manifests") LOG.info("Running upgrade activation manifests")

View File

@ -11,7 +11,6 @@ import io
import logging import logging
SW_VERSION = "" SW_VERSION = ""
SW_VERSION_20_06 = "20.06"
nodetype = None nodetype = None
subfunctions = [] subfunctions = []