From c14d8c6514ae3788891645da384a08cdcb5e79a3 Mon Sep 17 00:00:00 2001 From: Chinasubbareddy Mallavarapu Date: Sun, 29 Mar 2020 00:09:32 -0500 Subject: [PATCH] [CEPH-OSD] Move to 'OnDelete' upgrade strategy for ceph-osd daemonsets This is to move to onDelete upgrade strategy for ceph-osd daemonsets so that osd upgrade cane be performed by failure domains as current upgrade strategy(RollingUpdate) will randomly pick the osd pods for upgrade. This will be more helpful when we have rack based failure domains on the ceph clusters. This ps will add a new job called post-apply to restart the osd pods rack by rack - post-apply job will make sure osds gets restart rack by rack which will save upgrade time. - its less/no distruptive since we are upgrading per failure domain. also this job will be enabled only when we have OnDelete upgrade strategy in values. Change-Id: I2e977e75616e08fee780f714bbd267743c42c74d --- ceph-osd/templates/bin/_post-apply.sh.tpl | 184 ++++++++++++++++++++++ ceph-osd/templates/configmap-bin.yaml | 2 + ceph-osd/templates/job-post-apply.yaml | 138 ++++++++++++++++ ceph-osd/values.yaml | 1 + 4 files changed, 325 insertions(+) create mode 100644 ceph-osd/templates/bin/_post-apply.sh.tpl create mode 100644 ceph-osd/templates/job-post-apply.yaml diff --git a/ceph-osd/templates/bin/_post-apply.sh.tpl b/ceph-osd/templates/bin/_post-apply.sh.tpl new file mode 100644 index 0000000000..be9fce7b7b --- /dev/null +++ b/ceph-osd/templates/bin/_post-apply.sh.tpl @@ -0,0 +1,184 @@ +#!/bin/bash + +{{/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +export LC_ALL=C + +: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}" + +if [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then + echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon" + exit 1 +fi + +if [[ ! -f ${ADMIN_KEYRING} ]]; then + echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon" + exit 1 +fi + +ceph --cluster ${CLUSTER} -s +function wait_for_pods() { + end=$(date +%s) + timeout=${2:-1800} + end=$((end + timeout)) + while true; do + kubectl get pods --namespace=$1 -l component=osd -o json | jq -r \ + '.items[].status.phase' | grep Pending > /dev/null && \ + PENDING="True" || PENDING="False" + query='.items[]|select(.status.phase=="Running")' + pod_query="$query|.status.containerStatuses[].ready" + init_query="$query|.status.initContainerStatuses[].ready" + kubectl get pods --namespace=$1 -l component=osd -o json | jq -r "$pod_query" | \ + grep false > /dev/null && READY="False" || READY="True" + kubectl get pods --namespace=$1 -o json | jq -r "$init_query" | \ + grep false > /dev/null && INIT_READY="False" || INIT_READY="True" + kubectl get pods --namespace=$1 | grep -E 'Terminating|PodInitializing' \ + > /dev/null && UNKNOWN="True" || UNKNOWN="False" + [ $INIT_READY == "True" -a $UNKNOWN == "False" -a $PENDING == "False" -a $READY == "True" ] && \ + break || true + sleep 5 + now=$(date +%s) + if [ $now -gt $end ] ; then + echo "Containers failed to start after $timeout seconds" + echo + kubectl get pods --namespace $1 -o wide + echo + if [ $PENDING == "True" ] ; then + echo "Some pods are in pending state:" + kubectl get pods --field-selector=status.phase=Pending -n $1 -o wide + fi + [ $READY == "False" ] && echo "Some pods are not ready" + exit -1 + fi + done +} + +function check_ds() { + for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'` + do + ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status` + if echo $ds_query |grep -i "numberAvailable" ;then + currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled` + desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled` + numberAvailable=`echo $ds_query|jq -r .numberAvailable` + numberReady=`echo $ds_query|jq -r .numberReady` + updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled` + ds_check=`echo "$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled"| \ + tr ' ' '\n'|sort -u|wc -l` + if [ $ds_check != 1 ]; then + echo "few pods under daemonset $ds are not yet ready" + exit + else + echo "all pods ubder deamonset $ds are ready" + fi + else + echo "this are no osds under daemonset $ds" + fi + done +} + +function wait_for_inactive_pgs () { + echo "#### Start: Checking for inactive pgs ####" + + # Loop until all pgs are active + if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then + while [[ `ceph --cluster ${CLUSTER} pg ls | tail -n +2 | head -n -2 | grep -v "active+"` ]] + do + sleep 3 + ceph -s + done + else + while [[ `ceph --cluster ${CLUSTER} pg ls | tail -n +2 | grep -v "active+"` ]] + do + sleep 3 + ceph -s + done + fi +} + +function wait_for_degraded_objects () { + echo "#### Start: Checking for degraded objects ####" + + # Loop until no degraded objects + while [[ ! -z "`ceph --cluster ${CLUSTER} -s | grep degraded`" ]] + do + sleep 3 + ceph -s + done +} + +function restart_by_rack() { + + racks=`ceph osd tree | awk '/rack/{print $4}'` + echo "Racks under ceph cluster are: $racks" + for rack in $racks + do + hosts_in_rack=(`ceph osd tree | sed -n "/rack $rack/,/rack/p" | awk '/host/{print $4}' | tr '\n' ' '|sed 's/ *$//g'`) + echo "hosts under rack "$rack" are: ${hosts_in_rack[@]}" + echo "hosts count under $rack are: ${#hosts_in_rack[@]}" + for host in ${hosts_in_rack[@]} + do + echo "host is : $host" + if [[ ! -z "$host" ]]; then + pods_on_host=`kubectl get po -n $CEPH_NAMESPACE -l component=osd -o wide |grep $host|awk '{print $1}'` + echo "Restartig the pods under host $host" + kubectl delete po -n $CEPH_NAMESPACE $pods_on_host + fi + done + echo "waiting for the pods under rack $rack from restart" + wait_for_pods $CEPH_NAMESPACE + echo "waiting for inactive pgs after osds restarted from rack $rack" + wait_for_inactive_pgs + wait_for_degraded_objects + ceph -s + done +} + +require_upgrade=0 + + +for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'` +do + updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled` + desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled` + if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then + if kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status|grep -i "numberAvailable" ;then + require_upgrade=$((require_upgrade+1)) + fi + fi +done + +ds=`kubectl get ds -n $CEPH_NAMESPACE -l release_group=$RELEASE_GROUP_NAME --no-headers|awk '{print $1}'|head -n 1` +TARGET_HELM_RELEASE=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.observedGeneration` +echo "Latest revision of the helm chart $RELEASE_GROUP_NAME is : $TARGET_HELM_RELEASE" + +if [[ $TARGET_HELM_RELEASE -gt 1 ]]; then + if [[ $require_upgrade -gt 0 ]]; then + echo "waiting for inactive pgs and degraded obejcts before upgrade" + wait_for_inactive_pgs + wait_for_degraded_objects + ceph -s + ceph osd "set" noout + echo "lets restart the osds rack by rack" + restart_by_rack + ceph osd "unset" noout + fi + + #lets check all the ceph-osd daemonsets + echo "checking DS" + check_ds +else + echo "No revisions found for upgrade" +fi diff --git a/ceph-osd/templates/configmap-bin.yaml b/ceph-osd/templates/configmap-bin.yaml index 3d41b3a84c..84fab45572 100644 --- a/ceph-osd/templates/configmap-bin.yaml +++ b/ceph-osd/templates/configmap-bin.yaml @@ -28,6 +28,8 @@ data: bootstrap.sh: | {{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{- end }} + post-apply.sh: | +{{ tuple "bin/_post-apply.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} osd-start.sh: | {{ tuple "bin/osd/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} log-tail.sh: | diff --git a/ceph-osd/templates/job-post-apply.yaml b/ceph-osd/templates/job-post-apply.yaml new file mode 100644 index 0000000000..ad85d47a59 --- /dev/null +++ b/ceph-osd/templates/job-post-apply.yaml @@ -0,0 +1,138 @@ +{{/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.pod.lifecycle.upgrades.daemonsets.pod_replacement_strategy "OnDelete" }} +{{- if and .Values.manifests.job_post_apply }} +{{- $envAll := . }} + +{{- $serviceAccountName := printf "%s-%s" .Release.Name "post-apply" }} +{{ tuple $envAll "post-apply" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $serviceAccountName }} +rules: + - apiGroups: + - '' + resources: + - pods + - events + - jobs + - pods/exec + verbs: + - create + - get + - delete + - list + - apiGroups: + - 'apps' + resources: + - daemonsets + verbs: + - get + - list + - apiGroups: + - 'batch' + resources: + - jobs + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ $serviceAccountName }} +subjects: + - kind: ServiceAccount + name: {{ $serviceAccountName }} + namespace: {{ $envAll.Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ $serviceAccountName }} + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $serviceAccountName }} + annotations: + {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }} +spec: + template: + metadata: + labels: +{{ tuple $envAll "ceph-upgrade" "post-apply" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} + spec: +{{ dict "envAll" $envAll "application" "post-apply" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }} + serviceAccountName: {{ $serviceAccountName }} + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }} + initContainers: +{{ tuple $envAll "post-apply" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }} + containers: + - name: ceph-osd-post-apply +{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }} +{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }} +{{ dict "envAll" $envAll "application" "post-apply" "container" "ceph_osd_post_apply" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }} + env: + - name: CLUSTER + value: "ceph" + - name: CEPH_NAMESPACE + value: {{ .Release.Namespace }} + - name: RELEASE_GROUP_NAME + value: {{ .Release.Name }} + command: + - /tmp/post-apply.sh + volumeMounts: + - name: pod-tmp + mountPath: /tmp + - name: pod-etc-ceph + mountPath: /etc/ceph + - name: ceph-osd-bin + mountPath: /tmp/post-apply.sh + subPath: post-apply.sh + readOnly: true + - name: ceph-osd-bin + mountPath: /tmp/wait-for-pods.sh + subPath: wait-for-pods.sh + readOnly: true + - name: ceph-osd-etc + mountPath: /etc/ceph/ceph.conf + subPath: ceph.conf + readOnly: true + - name: ceph-osd-admin-keyring + mountPath: /etc/ceph/ceph.client.admin.keyring + subPath: ceph.client.admin.keyring + readOnly: true + volumes: + - name: pod-tmp + emptyDir: {} + - name: pod-etc-ceph + emptyDir: {} + - name: ceph-osd-bin + configMap: + name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }} + defaultMode: 0555 + - name: ceph-osd-etc + configMap: + name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }} + defaultMode: 0444 + - name: ceph-osd-admin-keyring + secret: + secretName: {{ .Values.secrets.keyrings.admin }} +{{- end }} +{{- end }} diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml index 57fa477861..09e1bcd251 100644 --- a/ceph-osd/values.yaml +++ b/ceph-osd/values.yaml @@ -352,5 +352,6 @@ manifests: configmap_test_bin: true daemonset_osd: true job_bootstrap: false + job_post_apply: true job_image_repo_sync: true helm_tests: true