Dropping draining node while removing k8s cluster.

When executing kubectl drain node it sometimes removing
kuryr-controller first, which may happen, that deployments will hang
during removing, since the dependency on the controller.

Also, turns out, that we can simply skip it, and `kubeadm reset` will
remove everything for us, so that's enough.

Change-Id: I396fb8fa5658617d03f5fdeed93cc86aa61e4a2d
This commit is contained in:
Roman Dobosz 2021-05-24 12:20:33 +02:00
parent 11a34d0e5d
commit 25f5acce36
2 changed files with 1 additions and 8 deletions

View File

@ -176,10 +176,6 @@ function get_k8s_token {
}
function kubeadm_reset {
local nodename
nodename=$(kubectl get nodes -o jsonpath="{.items[0].metadata.name}")
kubectl drain $nodename --delete-emptydir-data --force --ignore-daemonsets
kubectl delete node $nodename
sudo kubeadm reset -f
sudo iptables -F
sudo iptables -t nat -F

View File

@ -155,13 +155,10 @@ if is_service_enabled kuryr-kubernetes kuryr-daemon \
if [[ "$1" == "unstack" ]]; then
# Shut down kuryr and kubernetes services
if is_service_enabled kuryr-kubernetes; then
if [ "${KURYR_CONT}" == "True" ]; then
kubectl delete deployment kuryr-controller
else
if [ "${KURYR_CONT}" == "False" ]; then
stop_process kuryr-kubernetes
stop_process kuryr-daemon
fi
#sudo systemctl stop kubelet
kubeadm_reset
fi
cleanup_kuryr_devstack_iptables