Cleanup CAPI deployment script

* Removed the duplicate script for CAPI ephemeral node deployment
* Updated the generic script to support all providers

Change-Id: Icc1bed5c1b62662109b43ec94ee2fdb5de6de09b
This commit is contained in:
Sirajudeen 2021-01-27 16:33:00 +00:00
parent 71e038a5c5
commit 00130a9b53
5 changed files with 16 additions and 45 deletions

View File

@ -210,7 +210,7 @@ Make sure the following conditions are met:
6. sudo -E ./tools/deployment/23_pull_documents.sh
7. sudo -E ./tools/deployment/24_build_ephemeral_iso.sh
8. sudo -E ./tools/deployment/25_deploy_ephemeral_node.sh
9. sudo -E ./tools/deployment/26_deploy_metal3_capi_ephemeral_node.sh
9. sudo -E ./tools/deployment/26_deploy_capi_ephemeral_node.sh
10. sudo -E ./tools/deployment/30_deploy_controlplane.sh
11. sudo -E ./tools/deployment/31_deploy_initinfra_target_node.sh
12. sudo -E ./tools/deployment/32_cluster_init_target_node.sh

View File

@ -25,7 +25,7 @@
- ./tools/deployment/23_pull_documents.sh
- ./tools/deployment/24_build_images.sh
- ./tools/deployment/25_deploy_ephemeral_node.sh
- ./tools/deployment/26_deploy_metal3_capi_ephemeral_node.sh
- ./tools/deployment/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/30_deploy_controlplane.sh
- ./tools/deployment/31_deploy_initinfra_target_node.sh
- ./tools/deployment/32_cluster_init_target_node.sh

View File

@ -17,14 +17,25 @@ export PROVIDER=${PROVIDER:-"metal3"}
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
airshipctl document pull -n --debug
if [ "$PROVIDER" = "metal3" ]; then
echo "Deploy calico using tigera operator"
airshipctl phase run initinfra-networking-ephemeral --debug
echo "Wait for Calico to be deployed using tigera"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s
# Skipping this check due a race condition till a work-around is identified.
#echo "Wait for Tigerastatus to be Available"
#kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s -A
echo "Deploy metal3.io components to ephemeral node"
airshipctl phase run initinfra-ephemeral --debug
echo "Getting metal3 pods as debug information"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT --namespace ${PROVIDER} get pods
echo "Deploy cluster-api components to ephemeral node"
airshipctl phase run clusterctl-init-ephemeral --debug
else

View File

@ -1,41 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
echo "Deploy calico using tigera operator"
airshipctl phase run initinfra-networking-ephemeral --debug
echo "Wait for Calico to be deployed using tigera"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s
# Skipping this check due a race condition till a work-around is identified.
#echo "Wait for Tigerastatus to be Available"
#kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s -A
echo "Deploy metal3.io components to ephemeral node"
airshipctl phase run initinfra-ephemeral --debug
echo "Getting metal3 pods as debug information"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT --namespace metal3 get pods
echo "Deploy cluster-api components to ephemeral node"
airshipctl phase run clusterctl-init-ephemeral --debug
echo "Waiting for clusterapi pods to come up"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=available deploy --all --timeout=1000s -A
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get pods --all-namespaces

View File

@ -131,7 +131,7 @@
- ./tools/deployment/23_pull_documents.sh
- ./tools/deployment/24_build_images.sh
- ./tools/deployment/25_deploy_ephemeral_node.sh
- ./tools/deployment/26_deploy_metal3_capi_ephemeral_node.sh
- ./tools/deployment/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/30_deploy_controlplane.sh
- ./tools/deployment/31_deploy_initinfra_target_node.sh
- ./tools/deployment/32_cluster_init_target_node.sh
@ -181,6 +181,7 @@
- ./tools/deployment/provider_common/04_install_yq.sh
- CLUSTER=ephemeral-cluster KIND_CONFIG=./tools/deployment/templates/kind-cluster-with-extramounts ./tools/document/start_kind.sh
- AIRSHIP_CONFIG_METADATA_PATH=manifests/site/docker-test-site/metadata.yaml SITE=docker-test-site EXTERNAL_KUBECONFIG="true" ./tools/deployment/22_test_configs.sh
- ./tools/deployment/23_pull_documents.sh
- PROVIDER=default SITE=docker-test-site ./tools/deployment/26_deploy_capi_ephemeral_node.sh
- CONTROLPLANE_COUNT=1 SITE=docker-test-site ./tools/deployment/provider_common/30_deploy_controlplane.sh
- KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/32_cluster_init_target_node.sh