Merge "Move kubectl calls to KRM toolbox pt.5"

This commit is contained in:
Zuul 2021-05-24 12:42:53 +00:00 committed by Gerrit Code Review
commit 89e81b54a7
8 changed files with 181 additions and 38 deletions

View File

@ -9,3 +9,5 @@ resources:
- wait_cluster
- virsh-eject-cdrom-images
- virsh-destroy-vms
- wait_bmh
- wait_label_node

View File

@ -0,0 +1,46 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
export TIMEOUT=${TIMEOUT:-3600}
WORKER_NODE=$(kubectl --context $KCTL_CONTEXT \
get -f $RENDERED_BUNDLE_PATH \
--output jsonpath='{..metadata.name}')
echo "Wait $TIMEOUT seconds for BMH to be in ready state." 1>&2
end=$(($(date +%s) + $TIMEOUT))
for worker in $WORKER_NODE
do
while true; do
if [ "$(kubectl --request-timeout 20s \
--context $KCTL_CONTEXT \
get bmh $worker \
-o jsonpath='{.status.provisioning.state}')" == "ready" ] ; then
echo "Get BMHs status" 1>&2
kubectl \
--context $KCTL_CONTEXT \
get bmh 1>&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "BMH is not ready before TIMEOUT=$TIMEOUT" 1>&2
exit 1
fi
sleep 15
fi
done
done

View File

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-wait-bmh
options:
disableNameSuffixHash: true
files:
- script=kubectl_wait_bmh.sh

View File

@ -0,0 +1,53 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
export TIMEOUT=${TIMEOUT:-3600}
export NODE_LABEL=${NODE_LABEL:-"node-role.kubernetes.io/worker="}
NODE_NAMES=$(kubectl --context $KCTL_CONTEXT \
get -f $RENDERED_BUNDLE_PATH \
--output jsonpath='{..metadata.name}')
echo "Wait $TIMEOUT seconds for worker node to be in Ready state." 1>&2
end=$(($(date +%s) + $TIMEOUT))
for node in $NODE_NAMES
do
while true; do
if [ $(kubectl --request-timeout 20s \
--context $KCTL_CONTEXT \
get node $node \
-o jsonpath="{.status.conditions[?(@.type=='Ready')].type}") == "Ready" ] ; then
echo "Set label $NODE_LABEL for node $node" 1>&2
kubectl \
--context $KCTL_CONTEXT \
label nodes \
$node $NODE_LABEL 1>&2
echo "Get nodes status" 1>&2
kubectl \
--context $KCTL_CONTEXT \
get node 1>&2
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo "Node $node is not ready before TIMEOUT=$TIMEOUT" 1>&2
exit 1
fi
sleep 15
fi
done
done

View File

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-wait-label-node
options:
disableNameSuffixHash: true
files:
- script=kubectl_wait_label_node.sh

View File

@ -459,4 +459,37 @@ configRef:
kind: ConfigMap
name: virsh-destroy-vms
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-bmh
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=metal3.io
- RESOURCE_VERSION_FILTER=v1alpha1
- RESOURCE_KIND_FILTER=BareMetalHost
- RESOURCE_LABEL_FILTER=airshipit.org/k8s-role=worker
configRef:
kind: ConfigMap
name: kubectl-wait-bmh
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-label-node
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=metal3.io
- RESOURCE_VERSION_FILTER=v1alpha1
- RESOURCE_KIND_FILTER=BareMetalHost
- RESOURCE_LABEL_FILTER=airshipit.org/k8s-role=worker
configRef:
kind: ConfigMap
name: kubectl-wait-label-node
apiVersion: v1

View File

@ -409,3 +409,27 @@ config:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: virsh-destroy-vms
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-wait-bmh-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-bmh
documentEntryPoint: target/workers
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-wait-label-node-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-label-node
documentEntryPoint: target/workers

View File

@ -14,50 +14,23 @@
set -e
#Default wait timeout is 3600 seconds
export TIMEOUT=${TIMEOUT:-3600}
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
WORKER_NODE=${WORKER_NODE:-"node03"}
EPHEMERAL_DOMAIN_NAME="air-ephemeral"
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
airshipctl phase run virsh-destroy-vms --debug
node_timeout () {
end=$(($(date +%s) + $TIMEOUT))
for worker in $WORKER_NODE
do
while true; do
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $worker | grep -qw $2) ; then
if [ "$1" = "node" ]; then
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $worker node-role.kubernetes.io/worker=""
fi
echo -e "\nGet $1 status"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1
break
else
now=$(date +%s)
if [ $now -gt $end ]; then
echo -e "\n$1 is not ready before TIMEOUT."
exit 1
fi
echo -n .
sleep 15
fi
done
done
}
echo "Deploy worker node"
airshipctl phase run workers-target --debug
echo "Waiting $TIMEOUT seconds for bmh to be in ready state."
node_timeout bmh ready
# Waiting for bmh to be in ready state
# Scripts for this phase placed in manifests/function/phase-helpers/wait_bmh/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-bmh
airshipctl phase run kubectl-wait-bmh-target --debug
echo "Waiting $TIMEOUT seconds for node to be provisioned."
node_timeout node Ready
# Waiting for node to be provisioned."
# Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-label-node
airshipctl phase run kubectl-wait-label-node-target --debug