Use executor with timeout for initinfra-networking phases

initinfra-networking should use executor with timeout to properly
reconcile all the resources during apply process. Wait logic in
corresponding phase helper was modified and simplified accordingly.

Change-Id: Ia4d57462c619fe4cc4ec3db86bb372374c1ff0d4
Signed-off-by: Ruslan Aliev <raliev@mirantis.com>
Relates-To: #595
Closes: #595
This commit is contained in:
Ruslan Aliev 2021-06-28 17:20:13 -05:00
parent df55f50cb6
commit f38b38d705
3 changed files with 23 additions and 18 deletions

View File

@ -14,23 +14,16 @@
set -xe
echo "Wait for Calico to be deployed using tigera" 1>&2
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s 1>&2
export TIMEOUT=${TIMEOUT:-1000}
echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up" 1>&2
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s 1>&2
echo "Wait $TIMEOUT seconds for tigera status to be in Available state." 1>&2
end=$(($(date +%s) + $TIMEOUT))
# Wait till CR(tigerastatus) shows up to query
count=0
max_retry_attempts=150
until [ "$(kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get tigerastatus 2>/dev/null)" ]; do
count=$((count + 1))
if [[ ${count} -eq "${max_retry_attempts}" ]]; then
echo 'Timed out waiting for tigerastatus' 1>&2
until [ "$(kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Available --all tigerastatus 2>/dev/null)" ]; do
now=$(date +%s)
if [ $now -gt $end ]; then
echo "Tigera status is not ready before TIMEOUT=$TIMEOUT" 1>&2
exit 1
fi
sleep 2
sleep 10
done
# Wait till condition is available for tigerastatus
kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s 1>&2

View File

@ -23,11 +23,23 @@ config:
pruneOptions:
prune: false
---
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: kubernetes-apply-networking
config:
waitOptions:
timeout: 1000
pruneOptions:
prune: false
---
# This is added to support phase with no-wait
# When there is a wait, then it does status-check and fails
# if the resource status(condition) is not met.
# There are cases where the resource do not have status
# field implemeneted. So a wait will fail with status check
# field implemented. So a wait will fail with status check
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
metadata:

View File

@ -19,7 +19,7 @@ config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: kubernetes-apply-nowait
name: kubernetes-apply-networking
documentEntryPoint: ephemeral/initinfra-networking
---
apiVersion: airshipit.org/v1alpha1
@ -57,7 +57,7 @@ config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: kubernetes-apply-nowait
name: kubernetes-apply-networking
documentEntryPoint: target/initinfra-networking
---
apiVersion: airshipit.org/v1alpha1