platform-armada-app/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml

182 lines
5.6 KiB
YAML

{{/*
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.global.job_storage_init }}
{{ $root := . }}
{{ $defaults := .Values.classdefaults}}
{{ $mount := "/tmp/mount" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: 2016-02-18T19:14:38Z
name: config-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
check_ceph.sh: |-
#!/bin/bash
# Copy from read only mount to Ceph config folder
cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ ! -z $CEPH_ADMIN_SECRET ]; then
kubectl get secret -n $NAMESPACE | grep $CEPH_ADMIN_SECRET
ret=$?
if [ $ret -ne 0 ]; then
msg="Create $CEPH_ADMIN_SECRET secret"
echo "$msg"
kubectl create secret generic $CEPH_ADMIN_SECRET --type="kubernetes.io/rbd" --from-literal=key= --namespace=$NAMESPACE
ret=$?
if [ $ret -ne 0 ]; then
msg="Error creating secret $CEPH_ADMIN_SECRET, exit"
echo "$msg"
exit $ret
fi
fi
fi
touch /etc/ceph/ceph.client.admin.keyring
# Check if ceph is accessible
echo "===================================="
ceph -s
ret=$?
if [ $ret -ne 0 ]; then
msg="Error: Ceph cluster is not accessible, check Pod logs for details."
echo "$msg"
exit $ret
fi
set -ex
# Get the ruleset from the rule name.
ruleset=$(ceph osd crush rule dump $POOL_CRUSH_RULE_NAME | grep "\"ruleset\":" | grep -Eo '[0-9]*')
# Make sure the pool exists.
ceph osd pool stats $POOL_NAME || ceph osd pool create $POOL_NAME $POOL_CHUNK_SIZE
# Set pool configuration.
ceph osd pool set $POOL_NAME size $POOL_REPLICATION
ceph osd pool set $POOL_NAME crush_rule $ruleset
if [[ -z $USER_ID && -z $CEPH_USER_SECRET ]]; then
msg="No need to create secrets for pool $POOL_NAME"
echo "$msg"
exit 0
fi
KEYRING=$(ceph auth get-or-create client.$USER_ID mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
echo $KEYRING > $CEPH_USER_KEYRING
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$NAMESPACE
set +ex
# Check if pool is accessible using provided credentials
echo "====================================="
rbd -p $POOL_NAME --user $USER_ID ls -K $CEPH_USER_KEYRING
ret=$?
if [ $ret -ne 0 ]; then
msg="Error: Ceph pool $POOL_NAME is not accessible using \
credentials for user $USER_ID, check Pod logs for details."
echo "$msg"
exit $ret
else
msg="Pool $POOL_NAME accessible"
echo "$msg"
fi
ceph -s
---
apiVersion: batch/v1
kind: Job
metadata:
name: rbd-provisioner-storage-init
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
backoffLimit: 3 # Limit the number of job restart in case of failure
activeDeadlineSeconds: 180
template:
metadata:
name: "{{$root.Release.Name}}"
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
release: {{$root.Release.Name | quote }}
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
spec:
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
restartPolicy: OnFailure
volumes:
- name: config-volume-{{- $root.Values.global.name }}
configMap:
name: config-{{- $root.Values.global.name }}
containers:
{{- range $classConfig := $root.Values.classes }}
- name: storage-init-{{- $classConfig.name }}
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
value: {{ $classConfig.userSecretName }}
- name: USER_ID
value: {{ $classConfig.userId }}
- name: POOL_NAME
value: {{ $classConfig.pool_name }}
- name: POOL_REPLICATION
value: {{ $classConfig.replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $classConfig.crush_rule_name | quote }}
- name: POOL_CHUNK_SIZE
value: {{ $classConfig.chunk_size | quote }}
volumeMounts:
- name: config-volume-{{- $root.Values.global.name }}
mountPath: {{ $mount }}
{{- end }}
---
# This ConfigMap is needed because we're not using ceph's helm chart
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-etc
namespace: {{ $root.Release.Namespace }}
data:
ceph.conf: |
[global]
auth_supported = none
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
---
# Create the pvc-ceph-client-key. We need this here as we're not launching
# Ceph using the Helm chart.
apiVersion: v1
kind: Secret
type: kubernetes.io/rbd
metadata:
name: pvc-ceph-client-key
namespace: {{ $root.Release.Namespace }}
{{- end }}