Remove the Ceph related charts from the stx-openstack application

This will remove the rbd-provisioner and ceph-pools-audit charts from
the stx-openstack application and enable it to use the default platform
storage provisioner.

Changes include:
 - Update the rbd-provsioner and ceph-pools-audit helm plugin to provide
   overrides for the namespace defined by
   HELM_NS_STORAGE_PROVISIONER (currently: kube-system).
 - Update the cinder, glance, gnocchi, and nova helm plugins use the
   existing ceph-pool-kube-rbd secret for Ceph client access. This
   allows removing the pvc-ceph-client-key generation from the
   rbd-provisioner chart.
 - Add functions to kube_app.py to create/delete the required Ceph user
   secret for all namespaces of a supported application. This provides
   support for PVCs within the application's namespace(s). In the case
   of stx-openstack, this covers any claims made from the 'openstack'
   namespace.
 - Add functions to kube_app.py to support creating and deleting app
   specific resources that are not handled by the application charts.
   Using this enables copying the 'ceph-etc' configmap from the
   provisioner namespace to the openstack namespace for application use.
 - Add support through the kubernetes API to copy a secret from one
   namespace to another.
 - Add support through the kubernetes API to get, create, delete, and
   copy configmaps.
 - Remove the rbd-provisioner and ceph-pools-audit stevedore plugins
   from the stx-openstack application. Also, re-number the plugins.
 - Update the RBD provisioner to support creating namespaces and Ceph
   user secrets for additional namespaces other than that which the
   provisioner is installed. Also, enable PVCs for default
   namespaces (default and kube-public) against the 'general'
   storageclass.

Change-Id: I387e315545d2c99a1b6baa90d30bdb2a4e08f315
Depends-On: I67dba3f1a3a6e7c8169719ee622ddd533c69be31
Story: 2005424
Task: 30679
Signed-off-by: Robert Church <robert.church@windriver.com>
This commit is contained in:
Robert Church 2019-05-01 05:07:30 -04:00
parent a2c50e3130
commit 54e64acc05
7 changed files with 90 additions and 59 deletions

View File

@ -5,4 +5,4 @@ $PKG_BASE/../../../helm-charts/node-feature-discovery \
$PKG_BASE/../../../helm-charts/rbd-provisioner \
$PKG_BASE/../../../helm-charts/ceph-pools-audit"
TIS_PATCH_VER=3
TIS_PATCH_VER=4

View File

@ -0,0 +1,9 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
dependencies:
- name: helm-toolkit
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -34,4 +34,7 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -30,19 +30,15 @@ data:
# Copy from read only mount to Ceph config folder
cp {{ $mount -}}/ceph.conf /etc/ceph/
if [ ! -z $CEPH_ADMIN_SECRET ]; then
kubectl get secret -n $NAMESPACE | grep $CEPH_ADMIN_SECRET
ret=$?
if [ $ret -ne 0 ]; then
msg="Create $CEPH_ADMIN_SECRET secret"
echo "$msg"
kubectl create secret generic $CEPH_ADMIN_SECRET --type="kubernetes.io/rbd" --from-literal=key= --namespace=$NAMESPACE
ret=$?
if [ $ret -ne 0 ]; then
msg="Error creating secret $CEPH_ADMIN_SECRET, exit"
echo "$msg"
exit $ret
fi
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
if [ $? -ne 0 ]; then
echo "Create ${CEPH_ADMIN_SECRET} secret"
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
exit 1
fi
fi
fi
@ -51,49 +47,80 @@ data:
# Check if ceph is accessible
echo "===================================="
ceph -s
ret=$?
if [ $ret -ne 0 ]; then
msg="Error: Ceph cluster is not accessible, check Pod logs for details."
echo "$msg"
exit $ret
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
set -ex
# Make sure the pool exists.
ceph osd pool stats $POOL_NAME || ceph osd pool create $POOL_NAME $POOL_CHUNK_SIZE
ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
# Set pool configuration.
ceph osd pool application enable $POOL_NAME rbd
ceph osd pool set $POOL_NAME size $POOL_REPLICATION
ceph osd pool set $POOL_NAME crush_rule $POOL_CRUSH_RULE_NAME
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
set +ex
if [[ -z $USER_ID && -z $CEPH_USER_SECRET ]]; then
msg="No need to create secrets for pool $POOL_NAME"
echo "$msg"
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
echo "No need to create secrets for pool ${POOL_NAME}"
exit 0
fi
KEYRING=$(ceph auth get-or-create client.$USER_ID mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
set -ex
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
echo $KEYRING > $CEPH_USER_KEYRING
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$NAMESPACE
set +ex
if [ -n "${CEPH_USER_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Create ${CEPH_USER_SECRET} secret"
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
exit 1
fi
fi
# Support creating namespaces and Ceph user secrets for additional
# namespaces other than that which the provisioner is installed. This
# allows the provisioner to set up and provide PVs for multiple
# applications across many namespaces.
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
kubectl get namespace $ns 2>/dev/null
if [ $? -ne 0 ]; then
kubectl create namespace $ns
if [ $? -ne 0 ]; then
echo "Error creating namespace $ns, exit"
continue
fi
fi
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
fi
else
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
fi
done
fi
fi
# Check if pool is accessible using provided credentials
echo "====================================="
rbd -p $POOL_NAME --user $USER_ID ls -K $CEPH_USER_KEYRING
ret=$?
if [ $ret -ne 0 ]; then
msg="Error: Ceph pool $POOL_NAME is not accessible using \
credentials for user $USER_ID, check Pod logs for details."
echo "$msg"
exit $ret
rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
if [ $? -ne 0 ]; then
echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
exit 1
else
msg="Pool $POOL_NAME accessible"
echo "$msg"
echo "Pool ${POOL_NAME} accessible"
fi
ceph -s
@ -102,7 +129,7 @@ data:
apiVersion: batch/v1
kind: Job
metadata:
name: rbd-provisioner-storage-init
name: storage-init-{{- $root.Values.global.name }}
namespace: {{ $root.Release.Namespace }}
labels:
heritage: {{$root.Release.Service | quote }}
@ -134,6 +161,8 @@ spec:
env:
- name: NAMESPACE
value: {{ $root.Release.Namespace }}
- name: ADDITIONAL_NAMESPACES
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
- name: CEPH_ADMIN_SECRET
value: {{ $defaults.adminSecretName }}
- name: CEPH_USER_SECRET
@ -168,13 +197,4 @@ data:
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}
---
# Create the pvc-ceph-client-key. We need this here as we're not launching
# Ceph using the Helm chart.
apiVersion: v1
kind: Secret
type: kubernetes.io/rbd
metadata:
name: pvc-ceph-client-key
namespace: {{ $root.Release.Namespace }}
{{- end }}

View File

@ -16,4 +16,7 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
{{- end}}

View File

@ -141,6 +141,11 @@ classes:
crush_rule_name: storage_tier_ruleset
# Pool chunk size / PG_NUM
chunk_size: 8
# Additional namespace to allow storage class access (other than where
# installed)
additionalNamespaces:
- default
- kube-public
# Configuration data for the ephemeral pool(s)
ephemeral_pools:
- chunk_size: 8

View File

@ -36,15 +36,6 @@ data:
- type: job
labels:
app: rbd-provisioner
values:
global:
# TODO (rchurch): Remove after enabling the stx-openstack application to
# use the default system provisioner.
provisioner_name: "ceph.com/rbd-platform"
rbac:
clusterRole: stx-rbd-provisioner
clusterRoleBinding: stx-rbd-provisioner
serviceAccount: stx-rbd-provisioner
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz