diff --git a/manifests/function/phase-helpers/wait_tigera/kubectl_wait_tigera.sh b/manifests/function/phase-helpers/wait_tigera/kubectl_wait_tigera.sh index da20d6390..b9b42ef63 100644 --- a/manifests/function/phase-helpers/wait_tigera/kubectl_wait_tigera.sh +++ b/manifests/function/phase-helpers/wait_tigera/kubectl_wait_tigera.sh @@ -14,23 +14,16 @@ set -xe -echo "Wait for Calico to be deployed using tigera" 1>&2 -kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=1000s 1>&2 +export TIMEOUT=${TIMEOUT:-1000} -echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up" 1>&2 -kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s 1>&2 +echo "Wait $TIMEOUT seconds for tigera status to be in Available state." 1>&2 +end=$(($(date +%s) + $TIMEOUT)) -# Wait till CR(tigerastatus) shows up to query -count=0 -max_retry_attempts=150 -until [ "$(kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get tigerastatus 2>/dev/null)" ]; do - count=$((count + 1)) - if [[ ${count} -eq "${max_retry_attempts}" ]]; then - echo 'Timed out waiting for tigerastatus' 1>&2 +until [ "$(kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Available --all tigerastatus 2>/dev/null)" ]; do + now=$(date +%s) + if [ $now -gt $end ]; then + echo "Tigera status is not ready before TIMEOUT=$TIMEOUT" 1>&2 exit 1 fi - sleep 2 + sleep 10 done - -# Wait till condition is available for tigerastatus -kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s 1>&2 diff --git a/manifests/phases/executors.yaml b/manifests/phases/executors.yaml index f2f3ee619..06e764b61 100644 --- a/manifests/phases/executors.yaml +++ b/manifests/phases/executors.yaml @@ -23,11 +23,23 @@ config: pruneOptions: prune: false --- +apiVersion: airshipit.org/v1alpha1 +kind: KubernetesApply +metadata: + labels: + airshipit.org/deploy-k8s: "false" + name: kubernetes-apply-networking +config: + waitOptions: + timeout: 1000 + pruneOptions: + prune: false +--- # This is added to support phase with no-wait # When there is a wait, then it does status-check and fails # if the resource status(condition) is not met. # There are cases where the resource do not have status -# field implemeneted. So a wait will fail with status check +# field implemented. So a wait will fail with status check apiVersion: airshipit.org/v1alpha1 kind: KubernetesApply metadata: diff --git a/manifests/phases/phases.yaml b/manifests/phases/phases.yaml index 693f0e046..87f83246e 100644 --- a/manifests/phases/phases.yaml +++ b/manifests/phases/phases.yaml @@ -19,7 +19,7 @@ config: executorRef: apiVersion: airshipit.org/v1alpha1 kind: KubernetesApply - name: kubernetes-apply-nowait + name: kubernetes-apply-networking documentEntryPoint: ephemeral/initinfra-networking --- apiVersion: airshipit.org/v1alpha1 @@ -57,7 +57,7 @@ config: executorRef: apiVersion: airshipit.org/v1alpha1 kind: KubernetesApply - name: kubernetes-apply-nowait + name: kubernetes-apply-networking documentEntryPoint: target/initinfra-networking --- apiVersion: airshipit.org/v1alpha1