Enhanced deployment script to support baremetal multi-node

Additionally aligned between Treasuremap and Airchipctl to use the same
environemnt variable "SITE" for a site name.

Change-Id: I50494b50691a40efe68996cd11ccede6517b23d6
Signed-off-by: James Gu <james.gu@att.com>
This commit is contained in:
James Gu 2021-03-19 13:16:46 -07:00
parent edd0b84d84
commit 1342ab708f
5 changed files with 43 additions and 31 deletions

View File

@ -20,4 +20,4 @@
default_zuul_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" default_zuul_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
environment: environment:
AIRSHIP_CONFIG_PHASE_REPO_URL: "{{ remote_work_dir | default(local_src_dir) | default(default_zuul_dir) }}" AIRSHIP_CONFIG_PHASE_REPO_URL: "{{ remote_work_dir | default(local_src_dir) | default(default_zuul_dir) }}"
SITE_NAME: "{{ site | default('test-site') }}" SITE: "{{ site | default('test-site') }}"

View File

@ -18,7 +18,7 @@ export USER_NAME=${USER:-"ubuntu"}
IMAGE_DIR=${IMAGE_DIR:-"/srv/images"} IMAGE_DIR=${IMAGE_DIR:-"/srv/images"}
CLEANUP_SERVE_DIR=${CLEANUP_SERVE_DIR:-"false"} CLEANUP_SERVE_DIR=${CLEANUP_SERVE_DIR:-"false"}
SITE_NAME=${SITE_NAME:-test-site} SITE=${SITE:-test-site}
# List of phases to run to build images. # List of phases to run to build images.
IMAGE_PHASE_PLANS=${IMAGE_PHASE_PLANS:-"iso"} IMAGE_PHASE_PLANS=${IMAGE_PHASE_PLANS:-"iso"}

View File

@ -20,14 +20,16 @@ export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to # TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed # all vms. This can be removed once sushy tool is fixed
echo "Ensure all cdrom images are ejected." if type "virsh" > /dev/null; then
for vm in $(sudo virsh list --all --name |grep -v ${EPHEMERAL_DOMAIN_NAME}) echo "Ensure all cdrom images are ejected."
do for vm in $(sudo virsh list --all --name |grep -v ${EPHEMERAL_DOMAIN_NAME})
do
sudo virsh domblklist $vm | sudo virsh domblklist $vm |
awk 'NF==2 {print $1}' | awk 'NF==2 {print $1}' |
grep -v Target | grep -v Target |
xargs -I{} sudo virsh change-media $vm {} --eject || : xargs -I{} sudo virsh change-media $vm {} --eject || :
done done
fi
echo "Create target k8s cluster resources" echo "Create target k8s cluster resources"
airshipctl phase run controlplane-ephemeral --debug airshipctl phase run controlplane-ephemeral --debug

View File

@ -18,17 +18,26 @@ set -e
export TIMEOUT=${TIMEOUT:-3600} export TIMEOUT=${TIMEOUT:-3600}
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"} export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"} export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
WORKER_NODE="node03" WORKER_NODE=${WORKER_NODE:-"node03"}
EPHEMERAL_DOMAIN_NAME="air-ephemeral"
echo "Stop ephemeral node" # all vms. This can be removed once sushy tool is fixed
sudo virsh destroy air-ephemeral if type "virsh" > /dev/null; then
for vm in $(sudo virsh list --all --name --state-running |grep ${EPHEMERAL_DOMAIN_NAME})
do
echo "Stop ephemeral node '$vm'"
sudo virsh destroy $vm
done
fi
node_timeout () { node_timeout () {
end=$(($(date +%s) + $TIMEOUT)) end=$(($(date +%s) + $TIMEOUT))
for worker in $WORKER_NODE
do
while true; do while true; do
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $WORKER_NODE | grep -qw $2) ; then if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $worker | grep -qw $2) ; then
if [ "$1" = "node" ]; then if [ "$1" = "node" ]; then
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $WORKER_NODE node-role.kubernetes.io/worker="" kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $worker node-role.kubernetes.io/worker=""
fi fi
echo -e "\nGet $1 status" echo -e "\nGet $1 status"
@ -44,6 +53,7 @@ node_timeout () {
sleep 15 sleep 15
fi fi
done done
done
} }
echo "Deploy worker node" echo "Deploy worker node"

View File

@ -36,7 +36,7 @@ export SYSTEM_REBOOT_DELAY=30
export AIRSHIP_CONFIG_PRIMARY_REPO_BRANCH=${BRANCH:-"master"} export AIRSHIP_CONFIG_PRIMARY_REPO_BRANCH=${BRANCH:-"master"}
# the git repo url or local file system path to a cloned repo, e.g., /home/stack/airshipctl # the git repo url or local file system path to a cloned repo, e.g., /home/stack/airshipctl
export AIRSHIP_CONFIG_PRIMARY_REPO_URL=${REPO:-"https://review.opendev.org/airship/airshipctl"} export AIRSHIP_CONFIG_PRIMARY_REPO_URL=${REPO:-"https://review.opendev.org/airship/airshipctl"}
export AIRSHIP_SITE_NAME="airshipctl/manifests/site/az-test-site" export SITE="airshipctl/manifests/site/az-test-site"
export AIRSHIP_CONFIG_MANIFEST_DIRECTORY=${remote_work_dir} export AIRSHIP_CONFIG_MANIFEST_DIRECTORY=${remote_work_dir}
export AIRSHIP_CONFIG_CA_DATA=$(cat tools/deployment/certificates/airship_config_ca_data| base64 -w0) export AIRSHIP_CONFIG_CA_DATA=$(cat tools/deployment/certificates/airship_config_ca_data| base64 -w0)
export AIRSHIP_CONFIG_EPHEMERAL_IP=${IP_Ephemeral:-"10.23.25.101"} export AIRSHIP_CONFIG_EPHEMERAL_IP=${IP_Ephemeral:-"10.23.25.101"}