#!/usr/bin/env bash # Copyright 2015, Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## Pre-flight Check ---------------------------------------------------------- # Clear the screen and make sure the user understands whats happening. clear # NOTICE: To run this in an automated fashion run the script via # root@HOSTNAME:/opt/os-ansible-deployment# echo "YES" | bash scripts/run-upgrade.sh # Notify the user. echo -e " This script will perform a v10.x to v11.x upgrade. Once you start the upgrade there's no going back. Note, this is an online upgrade and while the in progress running VMs will not be impacted. However, you can expect some hiccups with OpenStack API services while the upgrade is running. Are you ready to perform this upgrade now? " # Confirm the user is ready to upgrade. read -p 'Enter "YES" to continue or anything else to quit: ' UPGRADE if [ "${UPGRADE}" == "YES" ]; then echo "Running Upgrade from v10.x to v11.x" else exit 99 fi ## Shell Opts ---------------------------------------------------------------- set -e -u -v ## Library Check ------------------------------------------------------------- info_block "Checking for required libraries." 2> /dev/null || source $(dirname ${0})/scripts-library.sh ## Functions ----------------------------------------------------------------- function get_inv_items(){ ./scripts/inventory-manage.py -f /etc/openstack_deploy/openstack_inventory.json -l | grep -w ".*$1" } function remove_inv_items(){ ./scripts/inventory-manage.py -f /etc/openstack_deploy/openstack_inventory.json -r "$1" } function run_lock() { set +e run_item="${RUN_TASKS[$1]}" upgrade_marker_file=$(basename $run_item .yml) upgrade_marker="/etc/openstack_deploy/upgrade-juno/$upgrade_marker_file.complete" if [ ! -f "$upgrade_marker" ];then openstack-ansible "$2" echo "ran $run_item" if [ "$?" == "0" ];then RUN_TASKS=("${RUN_TASKS[@]/$run_item}") touch "$upgrade_marker" echo "$run_item has been marked as success" else echo "******************** FAILURE ********************" echo "The upgrade script has failed please rerun the following task to continue" echo "Failed on task $run_item" echo "Do NOT rerun the upgrade script!" echo "Please execute the remaining tasks:" # Run the tasks in order for item in ${!RUN_TASKS[@]}; do echo "${RUN_TASKS[$item]}" done echo "******************** FAILURE ********************" exit 99 fi else RUN_TASKS=("${RUN_TASKS[@]/$run_item.*}") fi set -e } ## Main ---------------------------------------------------------------------- # Create new openstack_deploy directory. if [ -d "/etc/rpc_deploy" ];then # Create an archive of the old deployment directory tar -czf ~/pre-upgrade-backup.tgz /etc/rpc_deploy # Move the new deployment directory bits into place mkdir -p /etc/openstack_deploy/ cp -R /etc/rpc_deploy/* /etc/openstack_deploy/ mv /etc/rpc_deploy /etc/rpc_deploy.OLD else echo "No /etc/rpc_deploy directory found, thus nothing to upgrade." exit 1 fi if [ ! -d "/etc/openstack_deploy/upgrade-juno" ];then mkdir -p "/etc/openstack_deploy/upgrade-juno" fi # Drop deprecation file. cat > /etc/rpc_deploy.OLD/DEPRECATED.txt <> /etc/openstack_deploy/user_secrets.yml < /tmp/fix_minor_adjustments.yml < chown -R "{{ horizon_system_user_name }}":"{{ horizon_system_group_name }}" "/usr/local/lib/python2.7/dist-packages/static" register: horizon_cmd_chown failed_when: false changed_when: horizon_cmd_chown.rc == 0 vars: horizon_system_user_name: "horizon" horizon_system_group_name: "www-data" horizon_system_shell: "/bin/false" horizon_system_comment: "horizon system user" horizon_system_user_home: "/var/lib/{{ horizon_system_user_name }}" - name: Fix keystone things hosts: "keystone_all" gather_facts: false user: root tasks: - name: Fix keystone permissions command: > chown -R "keystone":"keystone" "/var/log/keystone" register: keystone_cmd_chown failed_when: false changed_when: keystone_cmd_chown.rc == 0 EOF # Create a play to fix host things cat > /tmp/fix_host_things.yml < /tmp/fix_container_interfaces.yml < /tmp/ensure_container_networking.yml < /tmp/fix_swift_rings_locations.yml < inventory_hostname == groups['swift_hosts'][0] vars: swift_system_user_name: swift swift_system_group_name: swift swift_system_shell: /bin/bash swift_system_comment: swift system user swift_system_home_folder: "/var/lib/{{ swift_system_user_name }}" EOF pushd playbooks # Reconfig haproxy if setup. if grep '^haproxy_hosts\:' /etc/openstack_deploy/openstack_user_config.yml;then ansible haproxy_hosts \ -m shell \ -a 'rm /etc/haproxy/conf.d/nova_api_ec2 /etc/haproxy/conf.d/nova_spice_console' RUN_TASKS+=("haproxy-install.yml") fi # Hunt for and remove any rpc_release link files from pip, forces True as # containers may not exist at this point. ansible "hosts:all_containers" \ -m "file" \ -a "path=/root/.pip/links.d/rpc_release.link state=absent" || true # The galera monitoring user now defaults to 'monitoring', cleaning up old 'haproxy' user. ansible "galera_all[0]" -m "mysql_user" -a "name=haproxy host='%' password='' priv='*.*:USAGE' state=absent" # Run the fix adjustments play. RUN_TASKS+=("/tmp/fix_minor_adjustments.yml") # Run the fix host things play RUN_TASKS+=("/tmp/fix_host_things.yml") # Run the fix for container networks. Forces True as containers may not exist at this point RUN_TASKS+=("/tmp/fix_container_interfaces.yml || true") # Send the swift rings to the first swift host if swift was installed in "v10.x". if [ "$(ansible 'swift_hosts' --list-hosts)" != "No hosts matched" ] && [ -d "/etc/swift/rings" ];then RUN_TASKS+=("/tmp/fix_swift_rings_locations.yml") else # No swift install found removing the fix file rm /tmp/fix_swift_rings_locations.yml fi # Ensure that the host is setup correctly to support lxc RUN_TASKS+=("lxc-hosts-setup.yml") # Rerun create containers that will update all running containers with the new bits RUN_TASKS+=("lxc-containers-create.yml") # Run the container network ensure play RUN_TASKS+=("/tmp/ensure_container_networking.yml") # With inventory and containers upgraded run the remaining host setup RUN_TASKS+=("openstack-hosts-setup.yml") # Now run the infrastructure setup RUN_TASKS+=("-e 'rabbitmq_upgrade=true' setup-infrastructure.yml") # Now upgrade the rest of OpenStack RUN_TASKS+=("setup-openstack.yml") # Run the tasks in order for item in ${!RUN_TASKS[@]}; do run_lock $item ${RUN_TASKS[$item]} done popd