diff --git a/labs/osbash/config/scripts.ubuntu_cluster b/labs/osbash/config/scripts.ubuntu_cluster index effd51f2..99918959 100644 --- a/labs/osbash/config/scripts.ubuntu_cluster +++ b/labs/osbash/config/scripts.ubuntu_cluster @@ -70,15 +70,13 @@ cmd snapshot_cycle -n compute1 cinder-volume_installed cmd queue ubuntu/setup_telemetry_compute.sh cmd snapshot_cycle -n compute1 telemetry-compute_installed +cmd boot -n compute1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Take snapshot of database changes on controller VM, too -cmd queue shutdown_controller.sh -cmd boot -n compute1 +cmd shutdown -n controller -cmd wait_for_shutdown -n controller cmd snapshot -n controller controller_-_compute1_node_installed -#cmd boot -n controller #============================================================================== cmd queue config_public_network.sh cmd queue config_private_network.sh diff --git a/labs/osbash/lib/osbash/functions-host.sh b/labs/osbash/lib/osbash/functions-host.sh index e7996b79..fd5478e5 100644 --- a/labs/osbash/lib/osbash/functions-host.sh +++ b/labs/osbash/lib/osbash/functions-host.sh @@ -424,6 +424,15 @@ function command_from_config { echo >&2 vm_conditional_snapshot "$vm_name" "$shot_name" vm_conditional_snapshot "$vm_name" "$shot_name" ;; + shutdown) + # Format: shutdown [-n ] + get_cmd_options $args + echo >&2 "vm_acpi_shutdown $vm_name" + vm_acpi_shutdown "$vm_name" + echo >&2 vm_wait_for_shutdown "$vm_name" + vm_wait_for_shutdown "$vm_name" + conditional_sleep 1 + ;; wait_for_shutdown) # Format: wait_for_shutdown [-n ] get_cmd_options $args diff --git a/labs/osbash/scripts/shutdown_controller.sh b/labs/osbash/scripts/shutdown_controller.sh deleted file mode 100755 index 8a4887a5..00000000 --- a/labs/osbash/scripts/shutdown_controller.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -set -o errexit -o nounset -TOP_DIR=$(cd "$(dirname "$0")/.." && pwd) -source "$TOP_DIR/config/paths" -source "$LIB_DIR/functions.guest.sh" - -indicate_current_auto - -exec_logfile - -# At this point in the cluster build, we just rebooted the compute VM to take -# a snapshot, and we are about to reboot the controller node for the same -# purpose. -# -# About a minute after we reboot the controller, the status of nova-compute -# (according to nova-manage service list) becomes "XXX". -# -# If we sleep for 2 seconds now, before rebooting the controller, the -# nova-compute service on the compute node will keep running and the status -# will automatically return to ":-)" after some time (may take several -# minutes). If we don't sleep here, the nova-compute service on compute will -# die within a few minutes (needs manual service restart or a compute node -# reboot). -sleep 2 - -echo "Shutting down the controller node." -ssh \ - -o "UserKnownHostsFile /dev/null" \ - -o "StrictHostKeyChecking no" \ - -i "$HOME/.ssh/osbash_key" \ - controller \ - sudo /sbin/shutdown -P now