Fix CAPD deployment
Commit fixes CAPD deployment and removes redundant scripts that check expiration for CAPD site. They must be tested separately outside CAPD pipeline Related-To: #482 Closes: #482 Change-Id: I60ffd76a4f3f08bd7bd198a0c2b15483dfbdd6a6
This commit is contained in:
parent
f24bf00d17
commit
3fbf865048
@ -13,58 +13,34 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Example Usage
|
||||
# CONTROLPLANE_COUNT=1 \
|
||||
# SITE=docker-test-site \
|
||||
# ./tools/deployment/provider_common/30_deploy_controlplane.sh
|
||||
|
||||
export AIRSHIP_SRC=${AIRSHIP_SRC:-"/tmp/airship"}
|
||||
set -xe
|
||||
|
||||
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
|
||||
export CONTROLPLANE_COUNT=${CONTROLPLANE_COUNT:-"1"}
|
||||
export SITE=${SITE:-"docker-test-site"}
|
||||
export TARGET_CLUSTER_NAME=${TARGET_CLUSTER_NAME:-"target-cluster"}
|
||||
|
||||
# Adjust Control Plane Count (default 1)
|
||||
# No. of control plane can be changed using
|
||||
# CONTROLPLANE_COUNT=<replicas> tools/deployment/docker/30_deploy_controlplane.sh
|
||||
|
||||
sed -i "/value.*/s//value\": $CONTROLPLANE_COUNT }/g" \
|
||||
${AIRSHIP_SRC}/airshipctl/manifests/site/${SITE}/ephemeral/controlplane/machine_count.json
|
||||
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
|
||||
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
|
||||
|
||||
echo "create control plane"
|
||||
airshipctl phase run controlplane-ephemeral --debug --kubeconfig ${KUBECONFIG} --wait-timeout 1000s
|
||||
airshipctl phase run controlplane-ephemeral --debug --wait-timeout 1000s
|
||||
|
||||
TARGET_KUBECONFIG=""
|
||||
TARGET_KUBECONFIG=$(kubectl --kubeconfig "${KUBECONFIG}" --namespace=default get secret/"${TARGET_CLUSTER_NAME}"-kubeconfig -o jsonpath={.data.value} || true)
|
||||
airshipctl cluster get-kubeconfig > ~/.airship/kubeconfig-tmp
|
||||
|
||||
if [[ -z "$TARGET_KUBECONFIG" ]]; then
|
||||
echo "Error: Could not get kubeconfig from secret."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Generate kubeconfig"
|
||||
echo ${TARGET_KUBECONFIG} | base64 -d > /tmp/${TARGET_CLUSTER_NAME}.kubeconfig
|
||||
echo "Generate kubeconfig: /tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
|
||||
|
||||
echo "add context target-cluster"
|
||||
kubectl config set-context ${TARGET_CLUSTER_NAME} --user ${TARGET_CLUSTER_NAME}-admin --cluster ${TARGET_CLUSTER_NAME} \
|
||||
--kubeconfig "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
|
||||
mv ~/.airship/kubeconfig-tmp "${KUBECONFIG}"
|
||||
|
||||
echo "apply cni as a part of initinfra-networking"
|
||||
airshipctl phase run initinfra-networking-target --debug --kubeconfig "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
|
||||
airshipctl phase run initinfra-networking-target --debug
|
||||
|
||||
echo "Check nodes status"
|
||||
kubectl --kubeconfig /tmp/"${TARGET_CLUSTER_NAME}".kubeconfig wait --for=condition=Ready nodes --all --timeout 4000s
|
||||
kubectl get nodes --kubeconfig /tmp/"${TARGET_CLUSTER_NAME}".kubeconfig
|
||||
kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" wait --for=condition=Ready nodes --all --timeout 4000s
|
||||
kubectl get nodes --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}"
|
||||
|
||||
echo "Waiting for pods to come up"
|
||||
kubectl --kubeconfig /tmp/${TARGET_CLUSTER_NAME}.kubeconfig wait --for=condition=ready pods --all --timeout=4000s -A
|
||||
kubectl --kubeconfig /tmp/${TARGET_CLUSTER_NAME}.kubeconfig get pods -A
|
||||
kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" wait --for=condition=ready pods --all --timeout=4000s -A
|
||||
kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" get pods -A
|
||||
|
||||
echo "Check machine status"
|
||||
kubectl get machines --kubeconfig ${KUBECONFIG}
|
||||
kubectl get machines --kubeconfig ${KUBECONFIG} --context "${KUBECONFIG_EPHEMERAL_CONTEXT}"
|
||||
|
||||
echo "Get cluster state for target workload cluster "
|
||||
kubectl --kubeconfig ${KUBECONFIG} get cluster
|
||||
|
||||
echo "Target Cluster Kubeconfig"
|
||||
echo "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
|
||||
kubectl --kubeconfig ${KUBECONFIG} --context "${KUBECONFIG_EPHEMERAL_CONTEXT}" get cluster
|
||||
|
@ -22,17 +22,17 @@ export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
|
||||
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
|
||||
|
||||
# Get control plane node
|
||||
CONTROL_PLANE_NODES=( $(kubectl --kubeconfig $KUBECONFIG get --no-headers=true nodes \
|
||||
CONTROL_PLANE_NODES=( $(kubectl --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG get --no-headers=true nodes \
|
||||
| grep cluster-control-plane | awk '{print $1}') )
|
||||
|
||||
# Remove noschedule taint to prevent cluster init from timing out
|
||||
for i in "${CONTROL_PLANE_NODES}"; do
|
||||
echo untainting node $i
|
||||
kubectl taint node $i node-role.kubernetes.io/master- --kubeconfig $KUBECONFIG --request-timeout 10s
|
||||
kubectl taint node $i node-role.kubernetes.io/master- --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG --request-timeout 10s
|
||||
done
|
||||
|
||||
echo "Deploy CAPI components to target cluster"
|
||||
airshipctl phase run clusterctl-init-target --debug --kubeconfig "$KUBECONFIG"
|
||||
airshipctl phase run clusterctl-init-target --debug
|
||||
|
||||
echo "Waiting for pods to be ready"
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s
|
||||
|
@ -18,18 +18,18 @@ set -xe
|
||||
export TIMEOUT=${TIMEOUT:-3600}
|
||||
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
|
||||
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
|
||||
export TARGET_KUBECONFIG="/tmp/target-cluster.kubeconfig"
|
||||
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
|
||||
|
||||
echo "Waiting for machines to come up"
|
||||
kubectl --kubeconfig ${KUBECONFIG} wait --for=condition=Ready machines --all --timeout 4000s
|
||||
kubectl --kubeconfig ${KUBECONFIG} --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Ready machines --all --timeout 4000s
|
||||
|
||||
#add wait condition
|
||||
end=$(($(date +%s) + $TIMEOUT))
|
||||
echo "Waiting $TIMEOUT seconds for Machine to be Running."
|
||||
while true; do
|
||||
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG get machines -o json | jq '.items[0].status.phase' | grep -q "Running") ; then
|
||||
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get machines -o json | jq '.items[0].status.phase' | grep -q "Running") ; then
|
||||
echo -e "\nMachine is Running"
|
||||
kubectl --kubeconfig $KUBECONFIG get machines
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get machines
|
||||
break
|
||||
else
|
||||
now=$(date +%s)
|
||||
@ -42,22 +42,20 @@ while true; do
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Move Cluster Object to Target Cluster"
|
||||
KUBECONFIG=$KUBECONFIG:$TARGET_KUBECONFIG kubectl config view --merge --flatten > "/tmp/merged_target_ephemeral.kubeconfig"
|
||||
airshipctl phase run clusterctl-move --kubeconfig "/tmp/merged_target_ephemeral.kubeconfig"
|
||||
airshipctl phase run clusterctl-move
|
||||
|
||||
echo "Waiting for pods to be ready"
|
||||
kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=3000s
|
||||
kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=3000s
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces
|
||||
|
||||
#Wait till crds are created
|
||||
end=$(($(date +%s) + $TIMEOUT))
|
||||
echo "Waiting $TIMEOUT seconds for crds to be created."
|
||||
while true; do
|
||||
if (kubectl --request-timeout 20s --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
|
||||
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
|
||||
echo -e "\nGet CRD status"
|
||||
kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get machines
|
||||
kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get clusters
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get machines
|
||||
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get clusters
|
||||
break
|
||||
else
|
||||
now=$(date +%s)
|
||||
|
@ -19,21 +19,13 @@ set -xe
|
||||
# WORKERS_COUNT=3 ./tools/deployment/provider_common/34_deploy_worker_node.sh
|
||||
|
||||
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
|
||||
export TARGET_KUBECONFIG=${TARGET_KUBECONFIG:-"/tmp/target-cluster.kubeconfig"}
|
||||
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
|
||||
export SITE=${SITE:-"docker-test-site"}
|
||||
export WORKERS_COUNT=${WORKERS_COUNT:-"1"}
|
||||
export AIRSHIP_SRC=${AIRSHIP_SRC:-"/tmp/airship"}
|
||||
|
||||
# Adjust wokers replicas, default - 1
|
||||
sed -i "/value.*/s//value\": $WORKERS_COUNT }/g" \
|
||||
${AIRSHIP_SRC}/airshipctl/manifests/site/${SITE}/target/workers/machine_count.json
|
||||
|
||||
echo "Stop/Delete ephemeral node"
|
||||
kind delete cluster --name "ephemeral-cluster"
|
||||
|
||||
echo "Deploy worker node"
|
||||
airshipctl phase run workers-target --debug --kubeconfig "$TARGET_KUBECONFIG"
|
||||
airshipctl phase run workers-target --debug
|
||||
|
||||
#Wait till node is created
|
||||
kubectl wait --for=condition=ready node --all --timeout=1000s --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $TARGET_KUBECONFIG -A
|
||||
kubectl wait --for=condition=ready node --all --timeout=1000s --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG -A
|
||||
|
@ -187,12 +187,10 @@
|
||||
- AIRSHIP_CONFIG_METADATA_PATH=manifests/site/docker-test-site/metadata.yaml SITE=docker-test-site EXTERNAL_KUBECONFIG="true" ./tools/deployment/22_test_configs.sh
|
||||
- ./tools/deployment/23_pull_documents.sh
|
||||
- PROVIDER=default SITE=docker-test-site ./tools/deployment/26_deploy_capi_ephemeral_node.sh
|
||||
- CONTROLPLANE_COUNT=1 SITE=docker-test-site ./tools/deployment/provider_common/30_deploy_controlplane.sh
|
||||
- KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/32_cluster_init_target_node.sh
|
||||
- ./tools/deployment/provider_common/30_deploy_controlplane.sh
|
||||
- ./tools/deployment/provider_common/32_cluster_init_target_node.sh
|
||||
- ./tools/deployment/provider_common/33_cluster_move_target_node.sh
|
||||
- WORKERS_COUNT=2 KUBECONFIG=/tmp/target-cluster.kubeconfig SITE=docker-test-site ./tools/deployment/provider_common/34_deploy_worker_node.sh
|
||||
- KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/41_check_certificate_expiration.sh
|
||||
- KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/42_rotate_sa_token.sh
|
||||
- ./tools/deployment/provider_common/34_deploy_worker_node.sh
|
||||
voting: false
|
||||
- job:
|
||||
name: airship-airshipctl-docker-kubebench-conformance
|
||||
|
Loading…
Reference in New Issue
Block a user