Include missing data migration steps on deploy start
There are additional steps needed to enable "deploy host" operation for major release upgrades. This commit adds these additional steps, which are: 1. Create TO release platform config directory, done during upgrade-start on legacy upgrade procedure 2. Create TO release rabbitmq directory, done by upgrade manifest after controller-1 reinstall on legacy upgrade The commit also fixes some issues: 1. shell-utils logging functions logging nowhere after being sourced by other scripts 2. sync-controllers-feed script was only syncing the ostree_repo directory instead of the full feed content 3. major release deployment scripts ran from different places, now all scripts are executed from the TO release feed directory, or from checked out TO release ostree repo in case of chroot 4. change umount command from chroot_mounts to lazy umount Note: Since the "deploy host" endpoint for major release deployment is not yet implemented, the test plan will have test cases that simulate the "deploy host" operation. Test Plan PASS: simulate "deploy host" successfully for AIO-SX PASS: simulate "deploy host" successfully for AIO-DX Depends-on: https://review.opendev.org/c/starlingx/config/+/913715 Story: 2010676 Task: 49703 Change-Id: Ib6ae49b3590a1e50acb305ac7482e28bcc4de403 Signed-off-by: Heitor Matsui <heitorvieira.matsui@windriver.com>
This commit is contained in:
parent
306ea5f631
commit
f88638a374
@ -75,7 +75,7 @@ umount_all() {
|
||||
dst=${src_dst[1]}
|
||||
|
||||
info "Unmounting $dst"
|
||||
umount_output=$(sudo umount $dst 2>&1)
|
||||
umount_output=$(sudo umount -l $dst 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
# ignore messages that are not harmful
|
||||
if [[ ! $umount_output =~ ("not mounted"|"no mount point specified") ]]; then
|
||||
|
@ -14,6 +14,7 @@ Run platform upgrade prep data migration as a standalone executable
|
||||
|
||||
import logging as LOG
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@ -231,6 +232,31 @@ class DataMigration(object):
|
||||
finally:
|
||||
devnull.close()
|
||||
|
||||
def create_platform_config(self):
|
||||
"""
|
||||
Create platform config for target release
|
||||
"""
|
||||
try:
|
||||
platform_config_dir = os.path.join(PLATFORM_PATH, "config")
|
||||
from_config_dir = os.path.join(platform_config_dir, self.from_release)
|
||||
to_config_dir = os.path.join(platform_config_dir, self.to_release)
|
||||
shutil.copytree(from_config_dir, to_config_dir)
|
||||
except Exception as e:
|
||||
LOG.exception("Failed to create platform config for release %s. "
|
||||
"Error: %s" % (self.to_release, str(e)))
|
||||
raise
|
||||
|
||||
def create_rabbitmq_directory(self):
|
||||
"""
|
||||
Create the target release rabbitmq directory
|
||||
"""
|
||||
try:
|
||||
rabbit_dir = os.path.join("/var/lib/rabbitmq", self.to_release, "mnesia")
|
||||
os.makedirs(rabbit_dir, exist_ok=True)
|
||||
except Exception as e:
|
||||
LOG.exception("Failed to create rabbitmq directory. Error: %s" % str(e))
|
||||
raise
|
||||
|
||||
|
||||
def main(sys_argv):
|
||||
args = upgrade_utils.parse_arguments(sys_argv)
|
||||
@ -285,6 +311,12 @@ def main(sys_argv):
|
||||
# Export /etc directory to $rootdir/etc
|
||||
data_migration.export_etc()
|
||||
|
||||
# Create platform config
|
||||
data_migration.create_platform_config()
|
||||
|
||||
# Create rabbitmq directory
|
||||
data_migration.create_rabbitmq_directory()
|
||||
|
||||
LOG.info("Data migration preparation completed successfully.")
|
||||
|
||||
except Exception as e:
|
||||
|
@ -8,12 +8,18 @@
|
||||
# can be sourced and used by other shell scripts.
|
||||
#
|
||||
|
||||
# If not specified by the importing
|
||||
# script defaults to USM main log file
|
||||
if [ -z $LOG_FILE ]; then
|
||||
LOG_FILE="/var/log/software.log"
|
||||
fi
|
||||
|
||||
log()
|
||||
{
|
||||
script_name=$(basename $0)
|
||||
log_type=$1
|
||||
shift
|
||||
echo "$(date -Iseconds | cut -d'+' -f1): ${script_name}[${$}]: ${log_type}: ${@}"
|
||||
echo "$(date -Iseconds | cut -d'+' -f1): ${script_name}[${$}]: ${log_type}: ${@}" 2>&1 >> $LOG_FILE
|
||||
}
|
||||
|
||||
info() {
|
||||
|
@ -13,7 +13,8 @@
|
||||
# 5. perform data migration
|
||||
#
|
||||
|
||||
exec 2>&1 >> /var/log/software.log
|
||||
# Used by shell-utils as the log file path
|
||||
LOG_FILE="/var/log/software.log"
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
shell_utils=${script_dir}/shell-utils
|
||||
@ -55,7 +56,7 @@ instbr="starlingx"
|
||||
report_agent="deploy-start"
|
||||
|
||||
deploy_cleanup() {
|
||||
sudo ${rootdir}/usr/sbin/software-deploy/deploy-cleanup ${repo} ${rootdir} all
|
||||
sudo ${script_dir}/deploy-cleanup ${repo} ${rootdir} all
|
||||
}
|
||||
|
||||
deploy_update_state() {
|
||||
@ -70,10 +71,10 @@ handle_error() {
|
||||
local error_message="$2"
|
||||
local state="start-failed"
|
||||
|
||||
error "${error_message}" >&2
|
||||
error "Please check the error details and take appropriate action for recovery." >&2
|
||||
error "${error_message}"
|
||||
error "Please check the error details and take appropriate action for recovery."
|
||||
|
||||
error "Update deploy state ${state}." >&2
|
||||
error "Update deploy state ${state}."
|
||||
deploy_update_state ${state}
|
||||
|
||||
# cleanup before exiting
|
||||
@ -84,7 +85,7 @@ handle_error() {
|
||||
|
||||
for dir in $rootdir $repo; do
|
||||
if [ -e ${dir} ]; then
|
||||
error "${dir} already exists. Please ensure to clean up environment to continue." >&2
|
||||
error "${dir} already exists. Please ensure to clean up environment to continue."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
@ -113,7 +114,7 @@ sudo ostree --repo=${repo} checkout ${commit_id} ${rootdir} || handle_error $? "
|
||||
|
||||
# create proper mounts on deploy file system
|
||||
info "Creating mount points..."
|
||||
sudo ${rootdir}/usr/sbin/software-deploy/chroot_mounts.sh ${rootdir} || handle_error $? "Failed to mount required mount points"
|
||||
sudo ${script_dir}/chroot_mounts.sh ${rootdir} || handle_error $? "Failed to mount required mount points"
|
||||
|
||||
sudo mount --bind ${rootdir}/usr/local/kubernetes/${k8s_ver} ${rootdir}/usr/local/kubernetes/current
|
||||
sudo cp /etc/kubernetes/admin.conf ${rootdir}/etc/kubernetes/
|
||||
@ -141,11 +142,16 @@ sudo chroot ${rootdir} /usr/bin/software-migrate ${from_ver} ${to_ver} ${port} |
|
||||
info "Data migration completed."
|
||||
|
||||
info "Syncing feed between controllers..."
|
||||
SYNC_CONTROLLERS_SCRIPT="/usr/sbin/software-deploy/sync-controllers-feed"
|
||||
sync_controllers_cmd="${SYNC_CONTROLLERS_SCRIPT} ${cmd_line} --feed=${feed}"
|
||||
SYNC_CONTROLLERS_SCRIPT="${script_dir}/sync-controllers-feed"
|
||||
sync_controllers_cmd="${SYNC_CONTROLLERS_SCRIPT} ${cmd_line} --feed=$(dirname $feed)"
|
||||
${sync_controllers_cmd} || handle_error $? "Failed to sync feeds"
|
||||
info "Feed sync complete."
|
||||
|
||||
# TODO(heitormatsui) remove once sysinv upgrade tables are deprecated
|
||||
info "Creating ${to_ver} load entry in legacy upgrade table..."
|
||||
sudo -u postgres psql -d sysinv -c "insert into loads(id, uuid, state, software_version) values (nextval('loads_id_seq'), 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', 'available', '${to_ver}');"
|
||||
info "Load created successfully."
|
||||
|
||||
state="start-done"
|
||||
deploy_update_state $state
|
||||
info "Update deploy state ${state}."
|
||||
|
@ -29,7 +29,7 @@ def sync_controllers(to_release, feed, controller):
|
||||
"--delete",
|
||||
"--exclude", "tmp",
|
||||
feed,
|
||||
f"rsync://{controller}/feed/rel-{to_release}/"
|
||||
f"rsync://{controller}/feed"
|
||||
]
|
||||
subprocess.run(cmd)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user