diff --git a/scripts/run-upgrade.sh b/scripts/run-upgrade.sh index 05c1b86b11..ddf9af4115 100755 --- a/scripts/run-upgrade.sh +++ b/scripts/run-upgrade.sh @@ -13,636 +13,70 @@ # See the License for the specific language governing permissions and # limitations under the License. -## Pre-flight Check ---------------------------------------------------------- -# Clear the screen and make sure the user understands whats happening. -clear - # NOTICE: To run this in an automated fashion run the script via # root@HOSTNAME:/opt/openstack-ansible# echo "YES" | bash scripts/run-upgrade.sh -# Notify the user. -echo -e " -This script will perform a v10.x to v11.x upgrade. -Once you start the upgrade there's no going back. - -Note, this is an online upgrade and while the -in progress running VMs will not be impacted. -However, you can expect some hiccups with OpenStack -API services while the upgrade is running. - -Are you ready to perform this upgrade now? -" - -# Confirm the user is ready to upgrade. -read -p 'Enter "YES" to continue or anything else to quit: ' UPGRADE -if [ "${UPGRADE}" == "YES" ]; then - echo "Running Upgrade from v10.x to v11.x" -else - exit 99 -fi ## Shell Opts ---------------------------------------------------------------- set -e -u -v -## Library Check ------------------------------------------------------------- -info_block "Checking for required libraries." 2> /dev/null || source $(dirname ${0})/scripts-library.sh - ## Functions ----------------------------------------------------------------- -function get_inv_items { - ./scripts/inventory-manage.py -f /etc/openstack_deploy/openstack_inventory.json -l | grep -w ".*$1" -} -function remove_inv_items { - ./scripts/inventory-manage.py -f /etc/openstack_deploy/openstack_inventory.json -r "$1" -} - -function run_lock { - set +e - run_item="${RUN_TASKS[$1]}" - file_part="${run_item}" - - # NOTE(sigmavirus24): This handles tasks like: - # "-e 'rabbitmq_upgrade=true' setup-infrastructure.yml" - # "/tmp/fix_container_interfaces.yml || true" - # So we can get the appropriate basename for the upgrade_marker - for part in $run_item; do - if [[ "$part" == *.yml ]];then - file_part="$part" - break +function check_for_juno { + if [ -d "/etc/rpc_deploy" ];then + echo "--------------ERROR--------------" + echo "/etc/rpc_deploy directory found, which looks like you're trying to upgrade from Juno." + echo "Please upgrade your environment to Kilo before proceeding." + exit 1 fi - done +} - upgrade_marker_file=$(basename ${file_part} .yml) - upgrade_marker="/etc/openstack_deploy/upgrade-juno/$upgrade_marker_file.complete" - if [ ! -f "$upgrade_marker" ];then - # NOTE(sigmavirus24): Use eval so that we properly turn strings like - # "/tmp/fix_container_interfaces.yml || true" - # Into a command, otherwise we'll get an error that there's no playbook - # named || - eval "openstack-ansible $2" - playbook_status="$?" - echo "ran $run_item" +function check_for_kilo { + if [[ ! -d "/etc/openstack_deploy" ]]; then + echo "--------------ERROR--------------" + echo "/etc/openstack_deploy directory not found." + echo "It appears you do not have a Kilo environment installed." + exit 2 + fi +} - if [ "$playbook_status" == "0" ];then - RUN_TASKS=("${RUN_TASKS[@]/$run_item}") - touch "$upgrade_marker" - echo "$run_item has been marked as success" +function pre_flight { + ## Library Check ------------------------------------------------------------- + echo "Checking for required libraries." 2> /dev/null || source $(dirname ${0})/scripts-library.sh + ## Pre-flight Check ---------------------------------------------------------- + # Clear the screen and make sure the user understands whats happening. + clear + + # Notify the user. + echo -e " + This script will perform a v11.x to v12.x upgrade. + Once you start the upgrade there's no going back. + + Note, this is an online upgrade and while the + in progress running VMs will not be impacted. + However, you can expect some hiccups with OpenStack + API services while the upgrade is running. + + Are you ready to perform this upgrade now? + " + + # Confirm the user is ready to upgrade. + read -p 'Enter "YES" to continue or anything else to quit: ' UPGRADE + if [ "${UPGRADE}" == "YES" ]; then + echo "Running Upgrade from v11.x to v12.x" else - echo "******************** FAILURE ********************" - echo "The upgrade script has failed please rerun the following task to continue" - echo "Failed on task $run_item" - echo "Do NOT rerun the upgrade script!" - echo "Please execute the remaining tasks:" - # Run the tasks in order - for item in ${!RUN_TASKS[@]}; do - echo "${RUN_TASKS[$item]}" - done - echo "******************** FAILURE ********************" exit 99 fi - else - RUN_TASKS=("${RUN_TASKS[@]/$run_item.*}") - fi - set -e } ## Main ---------------------------------------------------------------------- -# Create new openstack_deploy directory. -if [ -d "/etc/rpc_deploy" ];then - # Create an archive of the old deployment directory - tar -czf ~/pre-upgrade-backup.tgz /etc/rpc_deploy - # Move the new deployment directory bits into place - mkdir -p /etc/openstack_deploy/ - cp -R /etc/rpc_deploy/* /etc/openstack_deploy/ - mv /etc/rpc_deploy /etc/rpc_deploy.OLD -else - echo "No /etc/rpc_deploy directory found, thus nothing to upgrade." - exit 1 -fi +function main { + pre_flight + check_for_juno + check_for_kilo +} -if [ ! -d "/etc/openstack_deploy/upgrade-juno" ];then - mkdir -p "/etc/openstack_deploy/upgrade-juno" -fi - -# Drop deprecation file. -cat > /etc/rpc_deploy.OLD/DEPRECATED.txt <> /etc/openstack_deploy/user_secrets.yml < /tmp/fix_minor_adjustments.yml < - chown -R "{{ horizon_system_user_name }}":"{{ horizon_system_group_name }}" "/usr/local/lib/python2.7/dist-packages/static" - register: horizon_cmd_chown - failed_when: false - changed_when: horizon_cmd_chown.rc == 0 - vars: - horizon_system_user_name: "horizon" - horizon_system_group_name: "www-data" - horizon_system_shell: "/bin/false" - horizon_system_comment: "horizon system user" - horizon_system_user_home: "/var/lib/{{ horizon_system_user_name }}" -- name: Fix keystone things - hosts: "keystone_all" - gather_facts: false - user: root - tasks: - - name: Fix keystone permissions - command: > - chown -R "keystone":"keystone" "/var/log/keystone" - register: keystone_cmd_chown - failed_when: false - changed_when: keystone_cmd_chown.rc == 0 -EOF - -# Create a play to fix host things -cat > /tmp/fix_host_things.yml < /tmp/fix_container_interfaces.yml < /tmp/ensure_container_networking.yml < /tmp/fix_swift_rings_locations.yml < - inventory_hostname == groups['swift_hosts'][0] - vars: - swift_system_user_name: swift - swift_system_group_name: swift - swift_system_shell: /bin/bash - swift_system_comment: swift system user - swift_system_home_folder: "/var/lib/{{ swift_system_user_name }}" -EOF - - -pushd playbooks - # Reconfig haproxy if setup. - if grep '^haproxy_hosts\:' /etc/openstack_deploy/openstack_user_config.yml;then - ansible haproxy_hosts \ - -m shell \ - -a 'rm /etc/haproxy/conf.d/nova_api_ec2 /etc/haproxy/conf.d/nova_spice_console' - RUN_TASKS+=("haproxy-install.yml") - fi - - # Hunt for and remove any rpc_release link files from pip, forces True as - # containers may not exist at this point. - ansible "hosts:all_containers" \ - -m "file" \ - -a "path=/root/.pip/links.d/rpc_release.link state=absent" || true - - # The galera monitoring user now defaults to 'monitoring', cleaning up old 'haproxy' user. - ansible "galera_all[0]" -m "mysql_user" -a "name=haproxy host='%' password='' priv='*.*:USAGE' state=absent" - - # Run the fix adjustments play. - RUN_TASKS+=("/tmp/fix_minor_adjustments.yml") - - # Run the fix host things play - RUN_TASKS+=("/tmp/fix_host_things.yml") - - # Run the fix for container networks. Forces True as containers may not exist at this point - RUN_TASKS+=("/tmp/fix_container_interfaces.yml || true") - - # Send the swift rings to the first swift host if swift was installed in "v10.x". - if [ "$(ansible 'swift_hosts' --list-hosts)" != "No hosts matched" ] && [ -d "/etc/swift/rings" ];then - RUN_TASKS+=("/tmp/fix_swift_rings_locations.yml") - else - # No swift install found removing the fix file - rm /tmp/fix_swift_rings_locations.yml - fi - - # Ensure that the host is setup correctly to support lxc - RUN_TASKS+=("lxc-hosts-setup.yml") - - # Rerun create containers that will update all running containers with the new bits - RUN_TASKS+=("lxc-containers-create.yml") - - # Run the container network ensure play - RUN_TASKS+=("/tmp/ensure_container_networking.yml") - - # With inventory and containers upgraded run the remaining host setup - RUN_TASKS+=("openstack-hosts-setup.yml") - - # Now run the infrastructure setup - RUN_TASKS+=("-e 'rabbitmq_upgrade=true' setup-infrastructure.yml") - - # Now upgrade the rest of OpenStack - RUN_TASKS+=("setup-openstack.yml") - - # Run the tasks in order - for item in ${!RUN_TASKS[@]}; do - run_lock $item "${RUN_TASKS[$item]}" - done -popd +main