platform-armada-app/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml

219 lines
7.6 KiB
YAML

{{/*
#
# Copyright (c) 2018-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_storage_init }}
{{ $root := . }}
{{ $defaults := .Values.classdefaults}}
{{ $mount := "/tmp/mount" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
annotations:
"helm.sh/hook": "pre-upgrade, pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
data:
ceph.conf: |
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
check_ceph.sh: |-
#!/bin/bash
# Copy from read only mount to Ceph config folder
cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
if [ $? -ne 0 ]; then
echo "Create ${CEPH_ADMIN_SECRET} secret"
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
exit 1
fi
fi
fi
touch /etc/ceph/ceph.client.admin.keyring
# Check if ceph is accessible
echo "===================================="
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
set -ex
# Make sure the pool exists.
ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
# Set pool configuration.
ceph osd pool application enable $POOL_NAME rbd
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
set +ex
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
echo "No need to create secrets for pool ${POOL_NAME}"
exit 0
fi
set -ex
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
echo $KEYRING > $CEPH_USER_KEYRING
set +ex
if [ -n "${CEPH_USER_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Create ${CEPH_USER_SECRET} secret"
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
exit 1
fi
else
echo "Secret ${CEPH_USER_SECRET} already exists"
fi
# Support creating namespaces and Ceph user secrets for additional
# namespaces other than that which the provisioner is installed. This
# allows the provisioner to set up and provide PVs for multiple
# applications across many namespaces.
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
kubectl get namespace $ns 2>/dev/null
if [ $? -ne 0 ]; then
kubectl create namespace $ns
if [ $? -ne 0 ]; then
echo "Error creating namespace $ns, exit"
continue
fi
fi
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
fi
else
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
fi
done
fi
fi
# Check if pool is accessible using provided credentials
echo "====================================="
timeout --preserve-status 10 rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
if [ $? -ne 143 ]; then
if [ $? -ne 0 ]; then
echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
exit 1
else
echo "Pool ${POOL_NAME} accessible"
fi
else
echo "rbd command timed out and was sent a SIGTERM. Make sure OSDs have been provisioned."
fi
ceph -s
---
apiVersion: batch/v1
kind: Job
metadata:
name: storage-init-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
annotations:
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
activeDeadlineSeconds: 360
template:
metadata:
name: "{{$root.Release.Name}}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
restartPolicy: OnFailure
volumes:
- name: config-volume-{{- $root.Values.global.name }}
configMap:
name: config-{{- $root.Values.global.name }}
containers:
{{- range $classConfig := $root.Values.classes }}
- name: storage-init-{{- $classConfig.name }}
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: ADDITIONAL_NAMESPACES
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
value: {{ $classConfig.userSecretName }}
- name: USER_ID
value: {{ $classConfig.userId }}
- name: POOL_NAME
value: {{ $classConfig.pool_name }}
- name: POOL_REPLICATION
value: {{ $classConfig.replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $classConfig.crush_rule_name | quote }}
- name: POOL_CHUNK_SIZE
value: {{ $classConfig.chunk_size | quote }}
volumeMounts:
- name: config-volume-{{- $root.Values.global.name }}
mountPath: {{ $mount }}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
---
# This ConfigMap is needed because we're not using ceph's helm chart
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-etc
namespace: {{ $root.Release.Namespace }}
annotations:
"helm.sh/hook": "pre-upgrade, pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
data:
ceph.conf: |
[global]
auth_supported = none
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
{{- end }}