From 1342ab708faae3082e8ff4be5e5369ca15f60afc Mon Sep 17 00:00:00 2001 From: James Gu Date: Fri, 19 Mar 2021 13:16:46 -0700 Subject: [PATCH] Enhanced deployment script to support baremetal multi-node Additionally aligned between Treasuremap and Airchipctl to use the same environemnt variable "SITE" for a site name. Change-Id: I50494b50691a40efe68996cd11ccede6517b23d6 Signed-off-by: James Gu --- roles/airshipctl-run-script/tasks/main.yaml | 2 +- tools/deployment/24_build_images.sh | 2 +- tools/deployment/30_deploy_controlplane.sh | 18 ++++---- tools/deployment/34_deploy_worker_node.sh | 50 ++++++++++++--------- tools/deployment/azure/31_create_configs.sh | 2 +- 5 files changed, 43 insertions(+), 31 deletions(-) diff --git a/roles/airshipctl-run-script/tasks/main.yaml b/roles/airshipctl-run-script/tasks/main.yaml index d34a89ea2..ff46adb14 100644 --- a/roles/airshipctl-run-script/tasks/main.yaml +++ b/roles/airshipctl-run-script/tasks/main.yaml @@ -20,4 +20,4 @@ default_zuul_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" environment: AIRSHIP_CONFIG_PHASE_REPO_URL: "{{ remote_work_dir | default(local_src_dir) | default(default_zuul_dir) }}" - SITE_NAME: "{{ site | default('test-site') }}" + SITE: "{{ site | default('test-site') }}" diff --git a/tools/deployment/24_build_images.sh b/tools/deployment/24_build_images.sh index 12cc9a731..5856a731a 100755 --- a/tools/deployment/24_build_images.sh +++ b/tools/deployment/24_build_images.sh @@ -18,7 +18,7 @@ export USER_NAME=${USER:-"ubuntu"} IMAGE_DIR=${IMAGE_DIR:-"/srv/images"} CLEANUP_SERVE_DIR=${CLEANUP_SERVE_DIR:-"false"} -SITE_NAME=${SITE_NAME:-test-site} +SITE=${SITE:-test-site} # List of phases to run to build images. IMAGE_PHASE_PLANS=${IMAGE_PHASE_PLANS:-"iso"} diff --git a/tools/deployment/30_deploy_controlplane.sh b/tools/deployment/30_deploy_controlplane.sh index 9e774d50d..e3d75ee36 100755 --- a/tools/deployment/30_deploy_controlplane.sh +++ b/tools/deployment/30_deploy_controlplane.sh @@ -20,14 +20,16 @@ export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"} # TODO (dukov) this is needed due to sushy tools inserts cdrom image to # all vms. This can be removed once sushy tool is fixed -echo "Ensure all cdrom images are ejected." -for vm in $(sudo virsh list --all --name |grep -v ${EPHEMERAL_DOMAIN_NAME}) -do - sudo virsh domblklist $vm | - awk 'NF==2 {print $1}' | - grep -v Target | - xargs -I{} sudo virsh change-media $vm {} --eject || : -done +if type "virsh" > /dev/null; then + echo "Ensure all cdrom images are ejected." + for vm in $(sudo virsh list --all --name |grep -v ${EPHEMERAL_DOMAIN_NAME}) + do + sudo virsh domblklist $vm | + awk 'NF==2 {print $1}' | + grep -v Target | + xargs -I{} sudo virsh change-media $vm {} --eject || : + done +fi echo "Create target k8s cluster resources" airshipctl phase run controlplane-ephemeral --debug diff --git a/tools/deployment/34_deploy_worker_node.sh b/tools/deployment/34_deploy_worker_node.sh index 5221ab75a..b77225e4c 100755 --- a/tools/deployment/34_deploy_worker_node.sh +++ b/tools/deployment/34_deploy_worker_node.sh @@ -18,32 +18,42 @@ set -e export TIMEOUT=${TIMEOUT:-3600} export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"} export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"} -WORKER_NODE="node03" +WORKER_NODE=${WORKER_NODE:-"node03"} +EPHEMERAL_DOMAIN_NAME="air-ephemeral" -echo "Stop ephemeral node" -sudo virsh destroy air-ephemeral +# all vms. This can be removed once sushy tool is fixed +if type "virsh" > /dev/null; then + for vm in $(sudo virsh list --all --name --state-running |grep ${EPHEMERAL_DOMAIN_NAME}) + do + echo "Stop ephemeral node '$vm'" + sudo virsh destroy $vm + done +fi node_timeout () { - end=$(($(date +%s) + $TIMEOUT)) + end=$(($(date +%s) + $TIMEOUT)) + for worker in $WORKER_NODE + do while true; do - if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $WORKER_NODE | grep -qw $2) ; then - if [ "$1" = "node" ]; then - kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $WORKER_NODE node-role.kubernetes.io/worker="" - fi - - echo -e "\nGet $1 status" - kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 - break - else - now=$(date +%s) - if [ $now -gt $end ]; then - echo -e "\n$1 is not ready before TIMEOUT." - exit 1 - fi - echo -n . - sleep 15 + if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $worker | grep -qw $2) ; then + if [ "$1" = "node" ]; then + kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $worker node-role.kubernetes.io/worker="" fi + + echo -e "\nGet $1 status" + kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 + break + else + now=$(date +%s) + if [ $now -gt $end ]; then + echo -e "\n$1 is not ready before TIMEOUT." + exit 1 + fi + echo -n . + sleep 15 + fi done + done } echo "Deploy worker node" diff --git a/tools/deployment/azure/31_create_configs.sh b/tools/deployment/azure/31_create_configs.sh index 46ab093a6..3f92ec8a9 100755 --- a/tools/deployment/azure/31_create_configs.sh +++ b/tools/deployment/azure/31_create_configs.sh @@ -36,7 +36,7 @@ export SYSTEM_REBOOT_DELAY=30 export AIRSHIP_CONFIG_PRIMARY_REPO_BRANCH=${BRANCH:-"master"} # the git repo url or local file system path to a cloned repo, e.g., /home/stack/airshipctl export AIRSHIP_CONFIG_PRIMARY_REPO_URL=${REPO:-"https://review.opendev.org/airship/airshipctl"} -export AIRSHIP_SITE_NAME="airshipctl/manifests/site/az-test-site" +export SITE="airshipctl/manifests/site/az-test-site" export AIRSHIP_CONFIG_MANIFEST_DIRECTORY=${remote_work_dir} export AIRSHIP_CONFIG_CA_DATA=$(cat tools/deployment/certificates/airship_config_ca_data| base64 -w0) export AIRSHIP_CONFIG_EPHEMERAL_IP=${IP_Ephemeral:-"10.23.25.101"}