Deploy calico via function.

* Deploy Calico to ephemeral and target via function
* Removed the condition to check node ready status in
  25_deploy_ephemeral_node.sh and 30_deploy_controlplane.sh.
  As calico is applied at a later stage, relaxed the dependency to wait
for node ready status.
* updated compoiste/infra for calico manifest

Change-Id: Id582aec3ca402eac02d9baa2305b562c410c1bea
Relates-To: #303
Relates-To: #304
changes/63/743763/40
Sirajudeen 2 years ago
parent 66a5dbd574
commit b2c059087e
  1. 8
      manifests/composite/infra/kustomization.yaml
  2. 1
      manifests/function/ephemeral/secret.yaml
  3. 2
      manifests/function/k8scontrol/controlplane.yaml
  4. 1
      manifests/site/test-site/ephemeral/initinfra/kustomization.yaml
  5. 1
      manifests/site/test-site/target/initinfra/kustomization.yaml
  6. 38
      tools/deployment/25_deploy_ephemeral_node.sh
  7. 4
      tools/deployment/30_deploy_controlplane.sh
  8. 5
      tools/deployment/31_deploy_initinfra_target_node.sh

@ -1,10 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../function/baremetal-operator
- ../../function/capm3
- ../../function/capi
- ../../function/cabpk
- ../../function/cacpk
commonLabels:
airshipit.org/stage: initinfra
- ../../function/cni/calico/v3.15

@ -40,7 +40,6 @@ stringData:
- apt install -y kubelet=1.18.6-00 kubeadm=1.18.6-00 kubectl=1.18.6-00
- apt-mark hold docker-ce docker-ce-cli containerd.io kubelet kubeadm kubectl
- kubeadm init --config /tmp/kubeadm.yaml
- kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml
- mkdir -p /opt/metal3-dev-env/ironic/html/images
write_files:
- content: |

@ -41,8 +41,6 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
node-labels: 'metal3.io/uuid={{ ds.meta_data.uuid }}'
postKubeadmCommands:
- kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: Metal3MachineTemplate

@ -1,4 +1,5 @@
resources:
- ../../../../composite/infra
- ../../shared/clusterctl
- ../../../../function/baremetal-operator
patchesStrategicMerge:

@ -1,4 +1,5 @@
resources:
- ../../../../composite/infra
- ../../shared/clusterctl
- ../../../../function/baremetal-operator
patchesStrategicMerge:

@ -21,21 +21,25 @@ export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
echo "Deploy ephemeral node using redfish with iso"
airshipctl baremetal remotedirect --debug
#Wait till ephemeral node is ready
end=$(($(date +%s) + $TIMEOUT))
echo "Waiting $TIMEOUT seconds for ephemeral node to be ready."
while true; do
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG get nodes ephemeral -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' | grep -q True) ; then
echo -e "\nEphemeral node is ready."
kubectl --request-timeout 20s --kubeconfig $KUBECONFIG get nodes
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo -e "\nEphemeral node was not ready before TIMEOUT."
exit 1
fi
echo -n .
sleep 15
fi
echo "Wait for apiserver to become available"
N=0
MAX_RETRY=30
DELAY=60
until [ "$N" -ge ${MAX_RETRY} ]
do
if timeout 20 kubectl --kubeconfig $KUBECONFIG get node; then
break
fi
N=$((N+1))
echo "$N: Retrying to reach the apiserver"
sleep ${DELAY}
done
if [ "$N" -ge ${MAX_RETRY} ]; then
echo "Could not reach the apiserver"
exit 1
fi
echo "List all pods"
kubectl --kubeconfig $KUBECONFIG get pods --all-namespaces

@ -98,8 +98,8 @@ if [ "$N" -ge ${MAX_RETRY} ]; then
exit 1
fi
echo "Wait for nodes to become Ready"
kubectl --kubeconfig /tmp/targetkubeconfig wait --for=condition=Ready node --all --timeout 900s
echo "List all pods"
kubectl --kubeconfig /tmp/targetkubeconfig get pods --all-namespaces
echo "Get cluster state"
kubectl --kubeconfig ${HOME}/.airship/kubeconfig get cluster

@ -21,14 +21,11 @@ echo "Switch context to target cluster and set manifest"
airshipctl config use-context target-cluster-admin@target-cluster
airshipctl config set-context target-cluster-admin@target-cluster --manifest dummy_manifest
echo "Waiting for all control pods to come up"
kubectl --kubeconfig $KUBECONFIG wait -n kube-system --for=condition=Ready pods --selector tier=control-plane --timeout=600s
kubectl --kubeconfig $KUBECONFIG --namespace metal3 get pods --selector tier=control-plane "--output=jsonpath={.items[*].metadata.name}"
# TODO remove taint
kubectl --kubeconfig $KUBECONFIG taint node node01 node-role.kubernetes.io/master-
echo "Deploy infra to cluster"
airshipctl phase apply initinfra --debug --wait-timeout 1000s
echo "List all pods"
kubectl --kubeconfig $KUBECONFIG get pods --all-namespaces

Loading…
Cancel
Save