From 25f5acce36a67bfc6a1013c63083b3dbe0856142 Mon Sep 17 00:00:00 2001 From: Roman Dobosz Date: Mon, 24 May 2021 12:20:33 +0200 Subject: [PATCH] Dropping draining node while removing k8s cluster. When executing kubectl drain node it sometimes removing kuryr-controller first, which may happen, that deployments will hang during removing, since the dependency on the controller. Also, turns out, that we can simply skip it, and `kubeadm reset` will remove everything for us, so that's enough. Change-Id: I396fb8fa5658617d03f5fdeed93cc86aa61e4a2d --- devstack/lib/kubernetes | 4 ---- devstack/plugin.sh | 5 +---- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/devstack/lib/kubernetes b/devstack/lib/kubernetes index d89565502..940a8968e 100644 --- a/devstack/lib/kubernetes +++ b/devstack/lib/kubernetes @@ -176,10 +176,6 @@ function get_k8s_token { } function kubeadm_reset { - local nodename - nodename=$(kubectl get nodes -o jsonpath="{.items[0].metadata.name}") - kubectl drain $nodename --delete-emptydir-data --force --ignore-daemonsets - kubectl delete node $nodename sudo kubeadm reset -f sudo iptables -F sudo iptables -t nat -F diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 98609015e..a63b9964b 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -155,13 +155,10 @@ if is_service_enabled kuryr-kubernetes kuryr-daemon \ if [[ "$1" == "unstack" ]]; then # Shut down kuryr and kubernetes services if is_service_enabled kuryr-kubernetes; then - if [ "${KURYR_CONT}" == "True" ]; then - kubectl delete deployment kuryr-controller - else + if [ "${KURYR_CONT}" == "False" ]; then stop_process kuryr-kubernetes stop_process kuryr-daemon fi - #sudo systemctl stop kubelet kubeadm_reset fi cleanup_kuryr_devstack_iptables