Migration to ceph-csi for RBD/CephFS provisioners
Remove old RBD/CephFS provisioners and replace with a currently supported and evolving set of provisioners based on https://github.com/ceph/ceph-csi version 3.6.2. Test Plan: PASS: AIO-SX app upload/apply/remove/delete/update PASS: AIO-DX app upload/apply/remove/delete PASS: Storage 2+2+2 app upload/apply/remove/delete PASS: Create pvc using storageclass general (rbd) on SX/DX/Storage PASS: Create pod using rbd pvc on SX/DX/Storage PASS: Create pvc using storageclass cephfs on SX/DX/Storage PASS: Create pod using cephfs pvc on SX/DX/Storage Story: 2009987 Task: 45050 Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com> Change-Id: Iffcd56f689aa70788c4c2abbbf2c9a02b5a797cf
This commit is contained in:
parent
baccc223f0
commit
69c37e9978
@ -1 +0,0 @@
|
|||||||
flock
|
|
@ -1,2 +0,0 @@
|
|||||||
stx-platform-helm
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
|||||||
stx-platform-helm
|
|
||||||
python-k8sapp-platform
|
|
@ -1,2 +1,3 @@
|
|||||||
python-k8sapp-platform
|
python-k8sapp-platform
|
||||||
stx-platform-helm
|
stx-platform-helm
|
||||||
|
platform-helm
|
5
platform-helm/debian/deb_folder/changelog
Normal file
5
platform-helm/debian/deb_folder/changelog
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
platform-helm (1.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* Initial release.
|
||||||
|
|
||||||
|
-- Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com> Wed, 31 Aug 2022 10:45:00 +0000
|
15
platform-helm/debian/deb_folder/control
Normal file
15
platform-helm/debian/deb_folder/control
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Source: platform-helm
|
||||||
|
Section: libs
|
||||||
|
Priority: optional
|
||||||
|
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
||||||
|
Build-Depends: debhelper-compat (= 13),
|
||||||
|
helm
|
||||||
|
Standards-Version: 4.5.1
|
||||||
|
Homepage: https://www.starlingx.io
|
||||||
|
|
||||||
|
Package: platform-helm
|
||||||
|
Section: libs
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends}
|
||||||
|
Description: StarlingX Ceph CSI Helm Charts
|
||||||
|
This package contains helm charts for the Ceph CSI application.
|
41
platform-helm/debian/deb_folder/copyright
Normal file
41
platform-helm/debian/deb_folder/copyright
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||||
|
Upstream-Name: platform-helm
|
||||||
|
Source: https://opendev.org/starlingx/platform-armada-app/
|
||||||
|
|
||||||
|
Files: *
|
||||||
|
Copyright: (c) 2022 Wind River Systems, Inc
|
||||||
|
License: Apache-2
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
.
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
.
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
.
|
||||||
|
On Debian-based systems the full text of the Apache version 2.0 license
|
||||||
|
can be found in `/usr/share/common-licenses/Apache-2.0'.
|
||||||
|
|
||||||
|
# If you want to use GPL v2 or later for the /debian/* files use
|
||||||
|
# the following clauses, or change it to suit. Delete these two lines
|
||||||
|
Files: debian/*
|
||||||
|
Copyright: 2022 Wind River Systems, Inc
|
||||||
|
License: Apache-2
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
.
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
.
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
.
|
||||||
|
On Debian-based systems the full text of the Apache version 2.0 license
|
||||||
|
can be found in `/usr/share/common-licenses/Apache-2.0'.
|
@ -0,0 +1,30 @@
|
|||||||
|
From ae9dc263c28c1820446d3680f3fcc712fc6558b2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 19:41:04 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-cephfs: replace appVersion/version
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-cephfs/Chart.yaml | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/Chart.yaml b/charts/ceph-csi-cephfs/Chart.yaml
|
||||||
|
index 9238c26..2b3f6a0 100644
|
||||||
|
--- a/charts/ceph-csi-cephfs/Chart.yaml
|
||||||
|
+++ b/charts/ceph-csi-cephfs/Chart.yaml
|
||||||
|
@@ -1,10 +1,10 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
-appVersion: canary
|
||||||
|
+appVersion: 3.6.2
|
||||||
|
description: "Container Storage Interface (CSI) driver,
|
||||||
|
provisioner, snapshotter and attacher for Ceph cephfs"
|
||||||
|
name: ceph-csi-cephfs
|
||||||
|
-version: 3-canary
|
||||||
|
+version: 3.6.2
|
||||||
|
keywords:
|
||||||
|
- ceph
|
||||||
|
- cephfs
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,79 @@
|
|||||||
|
From 068b81a7103994dfa0b7e7d14eead3d191733070 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 20:03:05 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-cephfs: add default fields to values.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-cephfs/values.yaml | 51 ++++++++++++++++++++++++++++++
|
||||||
|
1 file changed, 51 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml
|
||||||
|
index 7375ea6..9507ffd 100644
|
||||||
|
--- a/charts/ceph-csi-cephfs/values.yaml
|
||||||
|
+++ b/charts/ceph-csi-cephfs/values.yaml
|
||||||
|
@@ -276,6 +276,24 @@ storageClass:
|
||||||
|
# mountOptions:
|
||||||
|
# - discard
|
||||||
|
|
||||||
|
+ # Ceph user name to access this pool
|
||||||
|
+ userId: kube
|
||||||
|
+ # K8 secret name with key for accessing the Ceph pool
|
||||||
|
+ userSecretName: ceph-secret-kube
|
||||||
|
+ # Pool replication
|
||||||
|
+ replication: 1
|
||||||
|
+ # Pool crush rule name
|
||||||
|
+ crush_rule_name: storage_tier_ruleset
|
||||||
|
+ # Pool chunk size / PG_NUM
|
||||||
|
+ chunk_size: 8
|
||||||
|
+ # Additional namespace to allow storage class access (other than where
|
||||||
|
+ # installed)
|
||||||
|
+ additionalNamespaces:
|
||||||
|
+ - default
|
||||||
|
+ - kube-public
|
||||||
|
+ # Ceph pools name
|
||||||
|
+ metadata_pool: kube-cephfs-metadata
|
||||||
|
+
|
||||||
|
secret:
|
||||||
|
# Specifies whether the secret should be created
|
||||||
|
create: false
|
||||||
|
@@ -326,3 +344,36 @@ configMapName: ceph-csi-config
|
||||||
|
externallyManagedConfigmap: false
|
||||||
|
# Name of the configmap used for ceph.conf
|
||||||
|
cephConfConfigMapName: ceph-config
|
||||||
|
+
|
||||||
|
+#
|
||||||
|
+# Defaults for storage classes.
|
||||||
|
+#
|
||||||
|
+classDefaults:
|
||||||
|
+ # Define ip addresses of Ceph Monitors
|
||||||
|
+ monitors:
|
||||||
|
+ - 192.168.204.2:6789
|
||||||
|
+ # K8 secret name for the admin context
|
||||||
|
+ adminId: admin
|
||||||
|
+ adminSecretName: ceph-secret-admin
|
||||||
|
+ cephFSNamespace: kube-system
|
||||||
|
+
|
||||||
|
+#
|
||||||
|
+# Defines:
|
||||||
|
+# - Provisioner's image name including container registry.
|
||||||
|
+# - CEPH helper image
|
||||||
|
+#
|
||||||
|
+images:
|
||||||
|
+ tags:
|
||||||
|
+ csi_provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
|
+ csi_snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
|
||||||
|
+ csi_attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
|
+ csi_resizer: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||||
|
+ csi_cephcsi: quay.io/cephcsi/cephcsi:v3.6.2
|
||||||
|
+ csi_registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
|
+ cephfs_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||||
|
+ pull_policy: "IfNotPresent"
|
||||||
|
+ local_registry:
|
||||||
|
+ active: false
|
||||||
|
+ exclude:
|
||||||
|
+ - dep_check
|
||||||
|
+ - image_repo_sync
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,274 @@
|
|||||||
|
From 30a69b72f9367802b4ebeb2667db921420328de0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 19:56:35 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-cephfs: add storage-init.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
.../templates/storage-init.yaml | 254 ++++++++++++++++++
|
||||||
|
1 file changed, 254 insertions(+)
|
||||||
|
create mode 100644 charts/ceph-csi-cephfs/templates/storage-init.yaml
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/templates/storage-init.yaml b/charts/ceph-csi-cephfs/templates/storage-init.yaml
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..5c0f00d
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/charts/ceph-csi-cephfs/templates/storage-init.yaml
|
||||||
|
@@ -0,0 +1,254 @@
|
||||||
|
+{{/*
|
||||||
|
+#
|
||||||
|
+# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+#
|
||||||
|
+# SPDX-License-Identifier: Apache-2.0
|
||||||
|
+#
|
||||||
|
+*/}}
|
||||||
|
+
|
||||||
|
+kind: ClusterRole
|
||||||
|
+apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
+metadata:
|
||||||
|
+ name: cephfs-rbac-secrets-namespaces
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+rules:
|
||||||
|
+ - apiGroups: [""]
|
||||||
|
+ resources: ["secrets"]
|
||||||
|
+ verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
+ - apiGroups: [""]
|
||||||
|
+ resources: ["namespaces"]
|
||||||
|
+ verbs: ["get", "create", "list", "update"]
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+kind: ClusterRoleBinding
|
||||||
|
+apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
+metadata:
|
||||||
|
+ name: cephfs-rbac-secrets-namespaces
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+subjects:
|
||||||
|
+ - kind: ServiceAccount
|
||||||
|
+ name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
||||||
|
+roleRef:
|
||||||
|
+ kind: ClusterRole
|
||||||
|
+ name: cephfs-rbac-secrets-namespaces
|
||||||
|
+ apiGroup: rbac.authorization.k8s.io
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+apiVersion: v1
|
||||||
|
+kind: ConfigMap
|
||||||
|
+metadata:
|
||||||
|
+ name: cephfs-storage-init
|
||||||
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+data:
|
||||||
|
+ ceph.conf: |
|
||||||
|
+ #
|
||||||
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+ #
|
||||||
|
+ # SPDX-License-Identifier: Apache-2.0
|
||||||
|
+ #
|
||||||
|
+
|
||||||
|
+ [global]
|
||||||
|
+ # For version 0.55 and beyond, you must explicitly enable
|
||||||
|
+ # or disable authentication with "auth" entries in [global].
|
||||||
|
+ auth_cluster_required = none
|
||||||
|
+ auth_service_required = none
|
||||||
|
+ auth_client_required = none
|
||||||
|
+
|
||||||
|
+ {{ $monitors := .Values.classDefaults.monitors }}
|
||||||
|
+ {{ range $index, $monitor := $monitors}}
|
||||||
|
+ [mon.{{- $index }}]
|
||||||
|
+ mon_addr = {{ $monitor }}
|
||||||
|
+ {{- end }}
|
||||||
|
+
|
||||||
|
+ storage-init.sh: |
|
||||||
|
+ #
|
||||||
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+ #
|
||||||
|
+ # SPDX-License-Identifier: Apache-2.0
|
||||||
|
+ #
|
||||||
|
+
|
||||||
|
+ #! /bin/bash
|
||||||
|
+
|
||||||
|
+ # Copy from read only mount to Ceph config folder
|
||||||
|
+ cp /tmp/ceph.conf /etc/ceph/
|
||||||
|
+
|
||||||
|
+ set -x
|
||||||
|
+
|
||||||
|
+ touch /etc/ceph/ceph.client.admin.keyring
|
||||||
|
+
|
||||||
|
+ # Check if ceph is accessible
|
||||||
|
+ echo "===================================="
|
||||||
|
+ ceph -s
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||||
|
+ exit 1
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ set -ex
|
||||||
|
+ KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
||||||
|
+ # Set up pool key in Ceph format
|
||||||
|
+ CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
||||||
|
+ echo $KEYRING > $CEPH_USER_KEYRING
|
||||||
|
+ set +ex
|
||||||
|
+
|
||||||
|
+ if [ -n "${CEPH_USER_SECRET}" ]; then
|
||||||
|
+ kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Create ${CEPH_USER_SECRET} secret"
|
||||||
|
+ kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=adminKey=$KEYRING --from-literal=adminID=${ADMIN_ID}
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
||||||
|
+ exit 1
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ echo "Secret ${CEPH_USER_SECRET} already exists"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Support creating namespaces and Ceph user secrets for additional
|
||||||
|
+ # namespaces other than that which the provisioner is installed. This
|
||||||
|
+ # allows the provisioner to set up and provide PVs for multiple
|
||||||
|
+ # applications across many namespaces.
|
||||||
|
+ if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
||||||
|
+ for ns in $(
|
||||||
|
+ IFS=,
|
||||||
|
+ echo ${ADDITIONAL_NAMESPACES}
|
||||||
|
+ ); do
|
||||||
|
+ kubectl get namespace $ns 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ kubectl create namespace $ns
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating namespace $ns, exit"
|
||||||
|
+ continue
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
||||||
|
+ kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=adminKey=$KEYRING --from-literal=adminID=${ADMIN_ID}
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
||||||
|
+ fi
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${CHUNK_SIZE}
|
||||||
|
+ ceph osd pool application enable ${POOL_NAME} cephfs
|
||||||
|
+ ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
|
||||||
|
+ ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||||
|
+
|
||||||
|
+ ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE}
|
||||||
|
+ ceph osd pool application enable ${METADATA_POOL_NAME} cephfs
|
||||||
|
+ ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION}
|
||||||
|
+ ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||||
|
+
|
||||||
|
+ ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${POOL_NAME}
|
||||||
|
+
|
||||||
|
+ ceph -s
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+apiVersion: batch/v1
|
||||||
|
+kind: Job
|
||||||
|
+metadata:
|
||||||
|
+ name: cephfs-storage-init
|
||||||
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+spec:
|
||||||
|
+ backoffLimit: 5
|
||||||
|
+ template:
|
||||||
|
+ spec:
|
||||||
|
+ serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
+ volumes:
|
||||||
|
+ - name: cephfs-storage-init-configmap-volume
|
||||||
|
+ configMap:
|
||||||
|
+ name: cephfs-storage-init
|
||||||
|
+ defaultMode: 0555
|
||||||
|
+ containers:
|
||||||
|
+ - name: storage-init-{{- .Values.storageClass.name }}
|
||||||
|
+ image: {{ .Values.images.tags.cephfs_provisioner_storage_init | quote }}
|
||||||
|
+ command: ["/bin/bash", "/tmp/storage-init.sh"]
|
||||||
|
+ env:
|
||||||
|
+ - name: NAMESPACE
|
||||||
|
+ value: {{ .Values.classDefaults.cephFSNamespace }}
|
||||||
|
+ - name: ADDITIONAL_NAMESPACES
|
||||||
|
+ value: {{ join "," .Values.storageClass.additionalNamespaces | quote }}
|
||||||
|
+ - name: CEPH_USER_SECRET
|
||||||
|
+ value: {{ .Values.storageClass.userSecretName }}
|
||||||
|
+ - name: USER_ID
|
||||||
|
+ value: {{ .Values.storageClass.userId }}
|
||||||
|
+ - name: ADMIN_ID
|
||||||
|
+ value: {{ .Values.classDefaults.adminId }}
|
||||||
|
+ - name: POOL_NAME
|
||||||
|
+ value: {{ .Values.storageClass.pool }}
|
||||||
|
+ - name: METADATA_POOL_NAME
|
||||||
|
+ value: {{ .Values.storageClass.metadata_pool }}
|
||||||
|
+ - name: FS_NAME
|
||||||
|
+ value: {{ .Values.storageClass.fsName }}
|
||||||
|
+ - name: CHUNK_SIZE
|
||||||
|
+ value: {{ .Values.storageClass.chunk_size | quote }}
|
||||||
|
+ - name: POOL_REPLICATION
|
||||||
|
+ value: {{ .Values.storageClass.replication | quote }}
|
||||||
|
+ - name: POOL_CRUSH_RULE_NAME
|
||||||
|
+ value: {{ .Values.storageClass.crush_rule_name | quote }}
|
||||||
|
+ volumeMounts:
|
||||||
|
+ - name: cephfs-storage-init-configmap-volume
|
||||||
|
+ mountPath: /tmp
|
||||||
|
+ restartPolicy: OnFailure
|
||||||
|
+{{- if .Values.provisioner.nodeSelector }}
|
||||||
|
+ nodeSelector:
|
||||||
|
+{{ .Values.provisioner.nodeSelector | toYaml | trim | indent 8 }}
|
||||||
|
+{{- end }}
|
||||||
|
+{{- with .Values.provisioner.tolerations }}
|
||||||
|
+ tolerations:
|
||||||
|
+{{ toYaml . | indent 8 }}
|
||||||
|
+{{- end}}
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,37 @@
|
|||||||
|
From 1b00f927ef2f3a279ede03d8971d0cdc306fd43a Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Sun, 6 Nov 2022 18:28:54 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-cephfs: add imagePullSecrets to ServiceAccount
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-cephfs/templates/nodeplugin-serviceaccount.yaml | 2 ++
|
||||||
|
.../ceph-csi-cephfs/templates/provisioner-serviceaccount.yaml | 2 ++
|
||||||
|
2 files changed, 4 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/templates/nodeplugin-serviceaccount.yaml b/charts/ceph-csi-cephfs/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
index 5dedaf4..7c93f52 100644
|
||||||
|
--- a/charts/ceph-csi-cephfs/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
+++ b/charts/ceph-csi-cephfs/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
@@ -10,4 +10,6 @@ metadata:
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+imagePullSecrets:
|
||||||
|
+ - name: default-registry-key
|
||||||
|
{{- end -}}
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/templates/provisioner-serviceaccount.yaml b/charts/ceph-csi-cephfs/templates/provisioner-serviceaccount.yaml
|
||||||
|
index c4ba5c1..3d85b0f 100644
|
||||||
|
--- a/charts/ceph-csi-cephfs/templates/provisioner-serviceaccount.yaml
|
||||||
|
+++ b/charts/ceph-csi-cephfs/templates/provisioner-serviceaccount.yaml
|
||||||
|
@@ -10,4 +10,6 @@ metadata:
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+imagePullSecrets:
|
||||||
|
+ - name: default-registry-key
|
||||||
|
{{- end -}}
|
||||||
|
--
|
||||||
|
2.17.1
|
||||||
|
|
@ -0,0 +1,29 @@
|
|||||||
|
From 727a0bd641df4e6e750242341a9a5b3223b4347a Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Wed, 9 Nov 2022 16:21:04 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-cephfs: add annotations to
|
||||||
|
provisioner-deployment.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml | 3 +++
|
||||||
|
1 file changed, 3 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml
|
||||||
|
index c455b86..91b7042 100644
|
||||||
|
--- a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml
|
||||||
|
+++ b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml
|
||||||
|
@@ -9,6 +9,9 @@ metadata:
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "helm.sh/hook": "post-upgrade, post-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.provisioner.replicaCount }}
|
||||||
|
strategy:
|
||||||
|
--
|
||||||
|
2.17.1
|
||||||
|
|
@ -0,0 +1,30 @@
|
|||||||
|
From 90be61a690e99dd5702551164d8d80faa4d2eb54 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 16:26:38 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-rbd: replace appVersion/version
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-rbd/Chart.yaml | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-rbd/Chart.yaml b/charts/ceph-csi-rbd/Chart.yaml
|
||||||
|
index 107647b..c141529 100644
|
||||||
|
--- a/charts/ceph-csi-rbd/Chart.yaml
|
||||||
|
+++ b/charts/ceph-csi-rbd/Chart.yaml
|
||||||
|
@@ -1,10 +1,10 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
-appVersion: canary
|
||||||
|
+appVersion: 3.6.2
|
||||||
|
description: "Container Storage Interface (CSI) driver,
|
||||||
|
provisioner, snapshotter, and attacher for Ceph RBD"
|
||||||
|
name: ceph-csi-rbd
|
||||||
|
-version: 3-canary
|
||||||
|
+version: 3.6.2
|
||||||
|
keywords:
|
||||||
|
- ceph
|
||||||
|
- rbd
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,78 @@
|
|||||||
|
From 6c0d74c0347ec9cff833f9bdf3ea14677e61ecc0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 20:01:13 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-rbd: add default fields to values.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-rbd/values.yaml | 50 +++++++++++++++++++++++++++++++++
|
||||||
|
1 file changed, 50 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml
|
||||||
|
index 42a06c4..2d9072b 100644
|
||||||
|
--- a/charts/ceph-csi-rbd/values.yaml
|
||||||
|
+++ b/charts/ceph-csi-rbd/values.yaml
|
||||||
|
@@ -406,6 +406,22 @@ storageClass:
|
||||||
|
# mountOptions:
|
||||||
|
# - discard
|
||||||
|
|
||||||
|
+ # Ceph user name to access this pool
|
||||||
|
+ userId: kube
|
||||||
|
+ # K8 secret name with key for accessing the Ceph pool
|
||||||
|
+ userSecretName: ceph-secret-kube
|
||||||
|
+ # Pool replication
|
||||||
|
+ replication: 1
|
||||||
|
+ # Pool crush rule name
|
||||||
|
+ crush_rule_name: storage_tier_ruleset
|
||||||
|
+ # Pool chunk size / PG_NUM
|
||||||
|
+ chunk_size: 8
|
||||||
|
+ # Additional namespace to allow storage class access (other than where
|
||||||
|
+ # installed)
|
||||||
|
+ additionalNamespaces:
|
||||||
|
+ - default
|
||||||
|
+ - kube-public
|
||||||
|
+
|
||||||
|
# Mount the host /etc/selinux inside pods to support
|
||||||
|
# selinux-enabled filesystems
|
||||||
|
selinuxMount: true
|
||||||
|
@@ -458,3 +474,37 @@ externallyManagedConfigmap: false
|
||||||
|
cephConfConfigMapName: ceph-config
|
||||||
|
# Name of the configmap used for encryption kms configuration
|
||||||
|
kmsConfigMapName: ceph-csi-encryption-kms-config
|
||||||
|
+
|
||||||
|
+#
|
||||||
|
+# Defaults for storage classes.
|
||||||
|
+#
|
||||||
|
+classDefaults:
|
||||||
|
+ # Define ip addresses of Ceph Monitors
|
||||||
|
+ monitors:
|
||||||
|
+ - 192.168.204.3:6789
|
||||||
|
+ - 192.168.204.150:6789
|
||||||
|
+ - 192.168.204.4:6789
|
||||||
|
+ # K8 secret name for the admin context
|
||||||
|
+ adminId: admin
|
||||||
|
+ adminSecretName: ceph-secret
|
||||||
|
+
|
||||||
|
+#
|
||||||
|
+# Defines:
|
||||||
|
+# - Provisioner's image name including container registry.
|
||||||
|
+# - CEPH helper image
|
||||||
|
+#
|
||||||
|
+images:
|
||||||
|
+ tags:
|
||||||
|
+ csi_provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
|
+ csi_snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
|
||||||
|
+ csi_attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
|
+ csi_resizer: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||||
|
+ csi_cephcsi: quay.io/cephcsi/cephcsi:v3.6.2
|
||||||
|
+ csi_registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
|
+ rbd_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||||
|
+ pull_policy: "IfNotPresent"
|
||||||
|
+ local_registry:
|
||||||
|
+ active: false
|
||||||
|
+ exclude:
|
||||||
|
+ - dep_check
|
||||||
|
+ - image_repo_sync
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,299 @@
|
|||||||
|
From d58e048aea5ec70f830f1703245b811d1ee54a7b Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Thu, 3 Nov 2022 19:54:49 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-rbd: add storage-init.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
.../ceph-csi-rbd/templates/storage-init.yaml | 279 ++++++++++++++++++
|
||||||
|
1 file changed, 279 insertions(+)
|
||||||
|
create mode 100644 charts/ceph-csi-rbd/templates/storage-init.yaml
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-rbd/templates/storage-init.yaml b/charts/ceph-csi-rbd/templates/storage-init.yaml
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..8e8c4de
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/charts/ceph-csi-rbd/templates/storage-init.yaml
|
||||||
|
@@ -0,0 +1,279 @@
|
||||||
|
+{{/*
|
||||||
|
+#
|
||||||
|
+# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+#
|
||||||
|
+# SPDX-License-Identifier: Apache-2.0
|
||||||
|
+#
|
||||||
|
+*/}}
|
||||||
|
+
|
||||||
|
+kind: ClusterRole
|
||||||
|
+apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
+metadata:
|
||||||
|
+ name: rbd-rbac-secrets-namespaces
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+rules:
|
||||||
|
+ - apiGroups: [""]
|
||||||
|
+ resources: ["secrets"]
|
||||||
|
+ verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
+ - apiGroups: [""]
|
||||||
|
+ resources: ["namespaces"]
|
||||||
|
+ verbs: ["get", "create", "list", "update"]
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+kind: ClusterRoleBinding
|
||||||
|
+apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
+metadata:
|
||||||
|
+ name: rbd-rbac-secrets-namespaces
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+subjects:
|
||||||
|
+ - kind: ServiceAccount
|
||||||
|
+ name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
|
||||||
|
+ namespace: {{ .Release.Namespace }}
|
||||||
|
+roleRef:
|
||||||
|
+ kind: ClusterRole
|
||||||
|
+ name: rbd-rbac-secrets-namespaces
|
||||||
|
+ apiGroup: rbac.authorization.k8s.io
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+apiVersion: v1
|
||||||
|
+kind: ConfigMap
|
||||||
|
+metadata:
|
||||||
|
+ name: rbd-storage-init
|
||||||
|
+ namespace: {{ .Release.Namespace }}
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+data:
|
||||||
|
+ ceph.conf: |
|
||||||
|
+ #
|
||||||
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+ #
|
||||||
|
+ # SPDX-License-Identifier: Apache-2.0
|
||||||
|
+ #
|
||||||
|
+
|
||||||
|
+ [global]
|
||||||
|
+ # For version 0.55 and beyond, you must explicitly enable
|
||||||
|
+ # or disable authentication with "auth" entries in [global].
|
||||||
|
+ auth_cluster_required = none
|
||||||
|
+ auth_service_required = none
|
||||||
|
+ auth_client_required = none
|
||||||
|
+
|
||||||
|
+ {{ $monitors := .Values.classDefaults.monitors }}
|
||||||
|
+ {{ range $index, $monitor := $monitors}}
|
||||||
|
+ [mon.{{- $index }}]
|
||||||
|
+ mon_addr = {{ $monitor }}
|
||||||
|
+ {{- end }}
|
||||||
|
+
|
||||||
|
+ storage-init.sh: |
|
||||||
|
+ #
|
||||||
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
|
+ #
|
||||||
|
+ # SPDX-License-Identifier: Apache-2.0
|
||||||
|
+ #
|
||||||
|
+
|
||||||
|
+ #! /bin/bash
|
||||||
|
+
|
||||||
|
+ # Copy from read only mount to Ceph config folder
|
||||||
|
+ cp /tmp/ceph.conf /etc/ceph/
|
||||||
|
+
|
||||||
|
+ if [ -n "${CEPH_ADMIN_SECRET}" ]; then
|
||||||
|
+ kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Create ${CEPH_ADMIN_SECRET} secret"
|
||||||
|
+ kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
|
||||||
|
+ exit 1
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ touch /etc/ceph/ceph.client.admin.keyring
|
||||||
|
+
|
||||||
|
+ # Check if ceph is accessible
|
||||||
|
+ echo "===================================="
|
||||||
|
+ ceph -s
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||||
|
+ exit 1
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ set -ex
|
||||||
|
+ # Make sure the pool exists.
|
||||||
|
+ ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
|
||||||
|
+ # Set pool configuration.
|
||||||
|
+ ceph osd pool application enable ${POOL_NAME} rbd
|
||||||
|
+ ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
|
||||||
|
+ ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||||
|
+ set +ex
|
||||||
|
+
|
||||||
|
+ if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
|
||||||
|
+ echo "No need to create secrets for pool ${POOL_NAME}"
|
||||||
|
+ exit 0
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ set -ex
|
||||||
|
+ KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
||||||
|
+ # Set up pool key in Ceph format
|
||||||
|
+ CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
||||||
|
+ echo $KEYRING > $CEPH_USER_KEYRING
|
||||||
|
+ set +ex
|
||||||
|
+
|
||||||
|
+ if [ -n "${CEPH_USER_SECRET}" ]; then
|
||||||
|
+ kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Create ${CEPH_USER_SECRET} secret"
|
||||||
|
+ kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
||||||
|
+ exit 1
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ echo "Secret ${CEPH_USER_SECRET} already exists"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Support creating namespaces and Ceph user secrets for additional
|
||||||
|
+ # namespaces other than that which the provisioner is installed. This
|
||||||
|
+ # allows the provisioner to set up and provide PVs for multiple
|
||||||
|
+ # applications across many namespaces.
|
||||||
|
+ if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
||||||
|
+ for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
|
||||||
|
+ kubectl get namespace $ns 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ kubectl create namespace $ns
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating namespace $ns, exit"
|
||||||
|
+ continue
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
||||||
|
+ kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
||||||
|
+ fi
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Check if pool is accessible using provided credentials
|
||||||
|
+ echo "====================================="
|
||||||
|
+ timeout --preserve-status 10 rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
|
||||||
|
+ if [ $? -ne 143 ]; then
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
|
||||||
|
+ exit 1
|
||||||
|
+ else
|
||||||
|
+ echo "Pool ${POOL_NAME} accessible"
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ echo "rbd command timed out and was sent a SIGTERM. Make sure OSDs have been provisioned."
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ ceph -s
|
||||||
|
+
|
||||||
|
+---
|
||||||
|
+
|
||||||
|
+apiVersion: batch/v1
|
||||||
|
+kind: Job
|
||||||
|
+metadata:
|
||||||
|
+ name: rbd-storage-init
|
||||||
|
+ namespace: {{ .Release.Namespace }}
|
||||||
|
+ labels:
|
||||||
|
+ app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
+ chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
+ component: {{ .Values.provisioner.name }}
|
||||||
|
+ release: {{ .Release.Name }}
|
||||||
|
+ heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
||||||
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
||||||
|
+ "helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
+spec:
|
||||||
|
+ backoffLimit: 5
|
||||||
|
+ activeDeadlineSeconds: 360
|
||||||
|
+ template:
|
||||||
|
+ metadata:
|
||||||
|
+ name: "{{ .Release.Name }}"
|
||||||
|
+ namespace: {{ .Release.Namespace }}
|
||||||
|
+ labels:
|
||||||
|
+ heritage: {{ .Release.Service | quote }}
|
||||||
|
+ release: {{ .Release.Name | quote }}
|
||||||
|
+ chart: "{{ .Chart.Name }}-{{- .Chart.Version }}"
|
||||||
|
+ spec:
|
||||||
|
+ serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
|
||||||
|
+ restartPolicy: OnFailure
|
||||||
|
+ volumes:
|
||||||
|
+ - name: rbd-storage-init-configmap-volume
|
||||||
|
+ configMap:
|
||||||
|
+ name: rbd-storage-init
|
||||||
|
+ containers:
|
||||||
|
+ - name: storage-init-{{- .Values.storageClass.name }}
|
||||||
|
+ image: {{ .Values.images.tags.rbd_provisioner_storage_init | quote }}
|
||||||
|
+ command: [ "/bin/bash", "/tmp/storage-init.sh" ]
|
||||||
|
+ env:
|
||||||
|
+ - name: NAMESPACE
|
||||||
|
+ value: {{ .Release.Namespace }}
|
||||||
|
+ - name: ADDITIONAL_NAMESPACES
|
||||||
|
+ value: {{ join "," .Values.storageClass.additionalNamespaces | quote }}
|
||||||
|
+ - name: CEPH_ADMIN_SECRET
|
||||||
|
+ value: {{ .Values.classDefaults.adminSecretName }}
|
||||||
|
+ - name: CEPH_USER_SECRET
|
||||||
|
+ value: {{ .Values.storageClass.userSecretName }}
|
||||||
|
+ - name: USER_ID
|
||||||
|
+ value: {{ .Values.storageClass.userId }}
|
||||||
|
+ - name: POOL_NAME
|
||||||
|
+ value: {{ .Values.storageClass.pool }}
|
||||||
|
+ - name: POOL_REPLICATION
|
||||||
|
+ value: {{ .Values.storageClass.replication | quote }}
|
||||||
|
+ - name: POOL_CRUSH_RULE_NAME
|
||||||
|
+ value: {{ .Values.storageClass.crush_rule_name | quote }}
|
||||||
|
+ - name: POOL_CHUNK_SIZE
|
||||||
|
+ value: {{ .Values.storageClass.chunk_size | quote }}
|
||||||
|
+ volumeMounts:
|
||||||
|
+ - name: rbd-storage-init-configmap-volume
|
||||||
|
+ mountPath: /tmp
|
||||||
|
+{{- if .Values.provisioner.nodeSelector }}
|
||||||
|
+ nodeSelector:
|
||||||
|
+{{ .Values.provisioner.nodeSelector | toYaml | trim | indent 8 }}
|
||||||
|
+{{- end }}
|
||||||
|
+{{- with .Values.provisioner.tolerations }}
|
||||||
|
+ tolerations:
|
||||||
|
+{{ toYaml . | indent 8 }}
|
||||||
|
+{{- end }}
|
||||||
|
--
|
||||||
|
2.17.1
|
@ -0,0 +1,37 @@
|
|||||||
|
From 72e79f8c37dd5509a2cfdd6157ea505f0b15b8d4 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Sun, 6 Nov 2022 18:25:44 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-rbd: add imagePullSecrets to ServiceAccount
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-rbd/templates/nodeplugin-serviceaccount.yaml | 2 ++
|
||||||
|
charts/ceph-csi-rbd/templates/provisioner-serviceaccount.yaml | 2 ++
|
||||||
|
2 files changed, 4 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-rbd/templates/nodeplugin-serviceaccount.yaml b/charts/ceph-csi-rbd/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
index 36e1ee7..30080ad 100644
|
||||||
|
--- a/charts/ceph-csi-rbd/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
+++ b/charts/ceph-csi-rbd/templates/nodeplugin-serviceaccount.yaml
|
||||||
|
@@ -10,4 +10,6 @@ metadata:
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+imagePullSecrets:
|
||||||
|
+ - name: default-registry-key
|
||||||
|
{{- end -}}
|
||||||
|
diff --git a/charts/ceph-csi-rbd/templates/provisioner-serviceaccount.yaml b/charts/ceph-csi-rbd/templates/provisioner-serviceaccount.yaml
|
||||||
|
index 893b43a..cebb2e7 100644
|
||||||
|
--- a/charts/ceph-csi-rbd/templates/provisioner-serviceaccount.yaml
|
||||||
|
+++ b/charts/ceph-csi-rbd/templates/provisioner-serviceaccount.yaml
|
||||||
|
@@ -10,4 +10,6 @@ metadata:
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+imagePullSecrets:
|
||||||
|
+ - name: default-registry-key
|
||||||
|
{{- end -}}
|
||||||
|
--
|
||||||
|
2.17.1
|
||||||
|
|
@ -0,0 +1,28 @@
|
|||||||
|
From c5d76ee99c1728e341a8631d1c06708a63dc6304 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hediberto Cavalcante da Silva
|
||||||
|
<hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
Date: Wed, 9 Nov 2022 09:20:34 -0300
|
||||||
|
Subject: [PATCH] ceph-csi-rbd: add annotations to provisioner-deployment.yaml
|
||||||
|
|
||||||
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
||||||
|
---
|
||||||
|
charts/ceph-csi-rbd/templates/provisioner-deployment.yaml | 3 +++
|
||||||
|
1 file changed, 3 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml
|
||||||
|
index b3b0916..0aab501 100644
|
||||||
|
--- a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml
|
||||||
|
+++ b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml
|
||||||
|
@@ -9,6 +9,9 @@ metadata:
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
+ annotations:
|
||||||
|
+ "helm.sh/hook": "post-upgrade, post-install"
|
||||||
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.provisioner.replicaCount }}
|
||||||
|
strategy:
|
||||||
|
--
|
||||||
|
2.17.1
|
||||||
|
|
10
platform-helm/debian/deb_folder/patches/series
Normal file
10
platform-helm/debian/deb_folder/patches/series
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
0001-ceph-csi-cephfs-replace-appVersion-version.patch
|
||||||
|
0002-ceph-csi-cephfs-add-default-fields-to-values.yaml.patch
|
||||||
|
0003-ceph-csi-cephfs-add-storage-init.yaml.patch
|
||||||
|
0004-ceph-csi-cephfs-add-imagePullSecrets-to-ServiceAccount.patch
|
||||||
|
0005-ceph-csi-cephfs-add-annotations-to-provisioner-deployment.patch
|
||||||
|
0006-ceph-csi-rbd-replace-appVersion-version.patch
|
||||||
|
0007-ceph-csi-rbd-add-default-fields-to-values.yaml.patch
|
||||||
|
0008-ceph-csi-rbd-add-storage-init.yaml.patch
|
||||||
|
0009-ceph-csi-rbd-add-imagePullSecrets-to-ServiceAccount.patch
|
||||||
|
0010-ceph-csi-rbd-add-annotations-to-provisioner-deployment.patch
|
1
platform-helm/debian/deb_folder/platform-helm.install
Normal file
1
platform-helm/debian/deb_folder/platform-helm.install
Normal file
@ -0,0 +1 @@
|
|||||||
|
usr/lib/helm/*
|
28
platform-helm/debian/deb_folder/rules
Normal file
28
platform-helm/debian/deb_folder/rules
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/make -f
|
||||||
|
export DH_VERBOSE = 1
|
||||||
|
|
||||||
|
export ROOT = debian/tmp
|
||||||
|
export APP_FOLDER = $(ROOT)/usr/lib/helm
|
||||||
|
|
||||||
|
%:
|
||||||
|
dh $@
|
||||||
|
|
||||||
|
override_dh_auto_build:
|
||||||
|
|
||||||
|
mkdir -p ceph-csi
|
||||||
|
|
||||||
|
# Copy ceph-csi charts
|
||||||
|
cp -r charts/* ceph-csi
|
||||||
|
|
||||||
|
cp Makefile ceph-csi
|
||||||
|
|
||||||
|
cd ceph-csi && make ceph-csi-rbd
|
||||||
|
cd ceph-csi && make ceph-csi-cephfs
|
||||||
|
|
||||||
|
override_dh_auto_install:
|
||||||
|
# Install the app tar file.
|
||||||
|
install -d -m 755 $(APP_FOLDER)
|
||||||
|
install -p -D -m 755 ceph-csi/ceph-csi-rbd*.tgz $(APP_FOLDER)
|
||||||
|
install -p -D -m 755 ceph-csi/ceph-csi-cephfs*.tgz $(APP_FOLDER)
|
||||||
|
|
||||||
|
override_dh_auto_test:
|
1
platform-helm/debian/deb_folder/source/format
Normal file
1
platform-helm/debian/deb_folder/source/format
Normal file
@ -0,0 +1 @@
|
|||||||
|
3.0 (quilt)
|
12
platform-helm/debian/meta_data.yaml
Normal file
12
platform-helm/debian/meta_data.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
debname: platform-helm
|
||||||
|
debver: 1.0-1
|
||||||
|
dl_path:
|
||||||
|
name: ceph-csi-3.6.2.tar.gz
|
||||||
|
url: https://github.com/ceph/ceph-csi/archive/v3.6.2.tar.gz
|
||||||
|
md5sum: a5fd6785c521faf0cb7df008a1012381
|
||||||
|
src_files:
|
||||||
|
- platform-helm/files/Makefile
|
||||||
|
revision:
|
||||||
|
dist: $STX_DIST
|
||||||
|
PKG_GITREVCOUNT: true
|
5
platform-helm/platform-helm/README
Normal file
5
platform-helm/platform-helm/README
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
This directory contains all StarlingX charts that need to be built for this
|
||||||
|
application. Some charts are common across applications. These common charts
|
||||||
|
reside in the stx-config/kubernetes/helm-charts directory. To include these in
|
||||||
|
this application update the build_srpm.data file and use the COPY_LIST_TO_TAR
|
||||||
|
mechanism to populate these common charts.
|
45
platform-helm/platform-helm/files/Makefile
Normal file
45
platform-helm/platform-helm/files/Makefile
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2017 The Openstack-Helm Authors.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# It's necessary to set this because some environments don't link sh -> bash.
|
||||||
|
SHELL := /bin/bash
|
||||||
|
TASK := build
|
||||||
|
|
||||||
|
EXCLUDES := helm-toolkit doc tests tools logs tmp
|
||||||
|
CHARTS := helm-toolkit $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
|
||||||
|
|
||||||
|
.PHONY: $(EXCLUDES) $(CHARTS)
|
||||||
|
|
||||||
|
all: $(CHARTS)
|
||||||
|
|
||||||
|
$(CHARTS):
|
||||||
|
@if [ -d $@ ]; then \
|
||||||
|
echo; \
|
||||||
|
echo "===== Processing [$@] chart ====="; \
|
||||||
|
make $(TASK)-$@; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
init-%:
|
||||||
|
if [ -f $*/Makefile ]; then make -C $*; fi
|
||||||
|
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
|
||||||
|
|
||||||
|
lint-%: init-%
|
||||||
|
if [ -d $* ]; then helm lint $*; fi
|
||||||
|
@echo "Clobber dependencies from packaging"
|
||||||
|
rm -v -f $*/requirements.lock $*/requirements.yaml
|
||||||
|
|
||||||
|
build-%: lint-%
|
||||||
|
if [ -d $* ]; then helm package $*; fi
|
||||||
|
|
||||||
|
clean:
|
||||||
|
@echo "Clean all build artifacts"
|
||||||
|
rm -f */templates/_partials.tpl */templates/_globals.tpl
|
||||||
|
rm -f *tgz */charts/*tgz */requirements.lock
|
||||||
|
rm -rf */charts */tmpcharts
|
||||||
|
|
||||||
|
%:
|
||||||
|
@:
|
@ -1,10 +0,0 @@
|
|||||||
SRC_DIR="k8sapp_platform"
|
|
||||||
OPT_DEP_LIST="$STX_BASE/platform-armada-app/stx-platform-helm"
|
|
||||||
|
|
||||||
# Bump The version to be one less that what the version was prior to decoupling
|
|
||||||
# as this will align the GITREVCOUNT value to increment the version by one.
|
|
||||||
# Remove this (i.e. reset to 0) on then next major version changes when
|
|
||||||
# TIS_BASE_SRCREV changes. This version should align with the version of the
|
|
||||||
# helm charts in stx-platform-helm
|
|
||||||
TIS_BASE_SRCREV=c608f2aaa92064b712e7076e4141a162b78fe995
|
|
||||||
TIS_PATCH_VER=GITREVCOUNT+7
|
|
@ -1,57 +0,0 @@
|
|||||||
%global app_name platform-integ-apps
|
|
||||||
%global pypi_name k8sapp-platform
|
|
||||||
%global sname k8sapp_platform
|
|
||||||
|
|
||||||
Name: python-%{pypi_name}
|
|
||||||
Version: 1.0
|
|
||||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
|
||||||
Summary: StarlingX sysinv extensions: Platform Integration K8S app
|
|
||||||
|
|
||||||
License: Apache-2.0
|
|
||||||
Source0: %{name}-%{version}.tar.gz
|
|
||||||
|
|
||||||
BuildArch: noarch
|
|
||||||
|
|
||||||
BuildRequires: python-setuptools
|
|
||||||
BuildRequires: python-pbr
|
|
||||||
BuildRequires: python2-pip
|
|
||||||
BuildRequires: python2-wheel
|
|
||||||
|
|
||||||
%description
|
|
||||||
StarlingX sysinv extensions: Platform Integration K8S app
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%setup
|
|
||||||
# Remove bundled egg-info
|
|
||||||
rm -rf %{pypi_name}.egg-info
|
|
||||||
|
|
||||||
%build
|
|
||||||
export PBR_VERSION=%{version}
|
|
||||||
%{__python2} setup.py build
|
|
||||||
|
|
||||||
%py2_build_wheel
|
|
||||||
|
|
||||||
%install
|
|
||||||
export PBR_VERSION=%{version}.%{tis_patch_ver}
|
|
||||||
export SKIP_PIP_INSTALL=1
|
|
||||||
%{__python2} setup.py install --skip-build --root %{buildroot}
|
|
||||||
mkdir -p ${RPM_BUILD_ROOT}/plugins/%{app_name}
|
|
||||||
install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/%{app_name}/
|
|
||||||
|
|
||||||
%files
|
|
||||||
%{python2_sitelib}/%{sname}
|
|
||||||
%{python2_sitelib}/%{sname}-*.egg-info
|
|
||||||
|
|
||||||
%package wheels
|
|
||||||
Summary: %{name} wheels
|
|
||||||
|
|
||||||
%description wheels
|
|
||||||
Contains python wheels for %{name}
|
|
||||||
|
|
||||||
%files wheels
|
|
||||||
/plugins/*
|
|
||||||
|
|
||||||
|
|
||||||
%changelog
|
|
||||||
* Mon May 11 2020 Robert Church <robert.church@windriver.com>
|
|
||||||
- Initial version
|
|
@ -16,13 +16,13 @@ Section: libs
|
|||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: ${misc:Depends}, ${python3:Depends}
|
Depends: ${misc:Depends}, ${python3:Depends}
|
||||||
Description: StarlingX Sysinv Platform Extensions
|
Description: StarlingX Sysinv Platform Extensions
|
||||||
This package contains sysinv plugins for the platform armada
|
This package contains sysinv plugins for the platform K8S
|
||||||
K8S app.
|
apps.
|
||||||
|
|
||||||
Package: python3-k8sapp-platform-wheels
|
Package: python3-k8sapp-platform-wheels
|
||||||
Section: libs
|
Section: libs
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: ${misc:Depends}, ${python3:Depends}, python3-wheel
|
Depends: ${misc:Depends}, ${python3:Depends}, python3-wheel
|
||||||
Description: StarlingX Sysinv Platform Extension Wheels
|
Description: StarlingX Sysinv Platform Extension Wheels
|
||||||
This package contains python wheels for the platform armada
|
This package contains python wheels for the platform K8S
|
||||||
K8S app plugins.
|
app plugins.
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
usr/lib/python3/dist-packages/k8sapp_platform-1.0.0.egg-info/*
|
usr/lib/python3/dist-packages/k8sapp_platform-1.0.*.egg-info/*
|
||||||
usr/lib/python3/dist-packages/k8sapp_platform/*
|
usr/lib/python3/dist-packages/k8sapp_platform/*
|
||||||
|
@ -2,7 +2,12 @@
|
|||||||
# export DH_VERBOSE = 1
|
# export DH_VERBOSE = 1
|
||||||
|
|
||||||
export APP_NAME=platform-integ-apps
|
export APP_NAME=platform-integ-apps
|
||||||
export PBR_VERSION=1.0.0
|
|
||||||
|
export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ')
|
||||||
|
export MAJOR = $(shell echo $(DEB_VERSION) | cut -f 1 -d '-')
|
||||||
|
export MINOR_PATCH = $(shell echo $(DEB_VERSION) | cut -f 4 -d '.')
|
||||||
|
export PBR_VERSION=$(MAJOR).$(MINOR_PATCH)
|
||||||
|
|
||||||
export PYBUILD_NAME=k8sapp-platform
|
export PYBUILD_NAME=k8sapp-platform
|
||||||
export SKIP_PIP_INSTALL=1
|
export SKIP_PIP_INSTALL=1
|
||||||
export ROOT=debian/tmp
|
export ROOT=debian/tmp
|
||||||
|
@ -4,4 +4,6 @@ debver: 1.0-1
|
|||||||
src_path: k8sapp_platform
|
src_path: k8sapp_platform
|
||||||
revision:
|
revision:
|
||||||
dist: $STX_DIST
|
dist: $STX_DIST
|
||||||
PKG_GITREVCOUNT: true
|
GITREVCOUNT:
|
||||||
|
BASE_SRCREV: c608f2aaa92064b712e7076e4141a162b78fe995
|
||||||
|
SRC_DIR: ${MY_REPO}/stx/platform-armada-app
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
class quoted_str(str):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# force strings to be single-quoted to avoid interpretation as numeric values
|
|
||||||
def quoted_presenter(dumper, data):
|
|
||||||
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
|
||||||
|
|
||||||
|
|
||||||
yaml.add_representer(quoted_str, quoted_presenter)
|
|
@ -1,43 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
|
|
||||||
""" System inventory Armada manifest operator."""
|
|
||||||
|
|
||||||
from k8sapp_platform.helm.ceph_pools_audit import CephPoolsAuditHelm
|
|
||||||
from k8sapp_platform.helm.rbd_provisioner import RbdProvisionerHelm
|
|
||||||
from k8sapp_platform.helm.ceph_fs_provisioner import CephFSProvisionerHelm
|
|
||||||
|
|
||||||
from sysinv.common import constants
|
|
||||||
from sysinv.helm import manifest_base as base
|
|
||||||
|
|
||||||
|
|
||||||
class PlatformArmadaManifestOperator(base.ArmadaManifestOperator):
|
|
||||||
|
|
||||||
APP = constants.HELM_APP_PLATFORM
|
|
||||||
ARMADA_MANIFEST = 'platform-integration-manifest'
|
|
||||||
|
|
||||||
CHART_GROUP_CEPH = 'starlingx-ceph-charts'
|
|
||||||
CHART_GROUPS_LUT = {
|
|
||||||
CephPoolsAuditHelm.CHART: CHART_GROUP_CEPH,
|
|
||||||
RbdProvisionerHelm.CHART: CHART_GROUP_CEPH,
|
|
||||||
CephFSProvisionerHelm.CHART: CHART_GROUP_CEPH
|
|
||||||
}
|
|
||||||
|
|
||||||
CHARTS_LUT = {
|
|
||||||
CephPoolsAuditHelm.CHART: 'kube-system-ceph-pools-audit',
|
|
||||||
RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner',
|
|
||||||
CephFSProvisionerHelm.CHART: 'kube-system-cephfs-provisioner'
|
|
||||||
}
|
|
||||||
|
|
||||||
def platform_mode_manifest_updates(self, dbapi, mode):
|
|
||||||
""" Update the application manifest based on the platform
|
|
||||||
|
|
||||||
:param dbapi: DB api object
|
|
||||||
:param mode: mode to control how to apply the application manifest
|
|
||||||
"""
|
|
||||||
pass
|
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -10,14 +10,13 @@ from sysinv.helm import common
|
|||||||
|
|
||||||
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
|
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
|
||||||
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
|
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
|
||||||
HELM_CHART_HELM_TOOLKIT = 'helm-toolkit'
|
|
||||||
HELM_CHART_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
|
HELM_CHART_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
|
||||||
HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM
|
HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM
|
||||||
FLUXCD_HELMRELEASE_RBD_PROVISIONER = 'rbd-provisioner'
|
FLUXCD_HELMRELEASE_RBD_PROVISIONER = 'rbd-provisioner'
|
||||||
FLUXCD_HELMRELEASE_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
|
FLUXCD_HELMRELEASE_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
|
||||||
FLUXCD_HELMRELEASE_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
|
FLUXCD_HELMRELEASE_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
|
||||||
|
|
||||||
HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT = '/pvc-volumes'
|
HELM_CEPH_FS_PROVISIONER_VOLUME_NAME_PREFIX = 'pvc-volumes-'
|
||||||
HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs'
|
HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs'
|
||||||
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin'
|
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin'
|
||||||
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system'
|
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system'
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
from k8sapp_platform.common import constants as app_constants
|
from k8sapp_platform.common import constants as app_constants
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
|
|
||||||
@ -139,19 +141,18 @@ class CephFSProvisionerHelm(base.FluxCDBaseHelm):
|
|||||||
def _skip_ceph_mon_2(name):
|
def _skip_ceph_mon_2(name):
|
||||||
return name != constants.CEPH_MON_2
|
return name != constants.CEPH_MON_2
|
||||||
|
|
||||||
classdefaults = {
|
def _get_ceph_fsid():
|
||||||
"monitors": self._get_formatted_ceph_monitor_ips(
|
process = subprocess.Popen(['timeout', '30', 'ceph', 'fsid'],
|
||||||
name_filter=_skip_ceph_mon_2),
|
stdout=subprocess.PIPE)
|
||||||
"adminId": app_constants.K8S_CEPHFS_PROVISIONER_USER_NAME,
|
stdout, stderr = process.communicate()
|
||||||
"adminSecretName": app_constants.K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME
|
return stdout.strip()
|
||||||
}
|
|
||||||
|
bk = ceph_bks[0]
|
||||||
|
|
||||||
# Get tier info.
|
# Get tier info.
|
||||||
tiers = self.dbapi.storage_tier_get_list()
|
tiers = self.dbapi.storage_tier_get_list()
|
||||||
|
|
||||||
classes = []
|
# Get the ruleset for the new kube-rbd pool.
|
||||||
for bk in ceph_bks:
|
|
||||||
# Get the ruleset for the new kube-cephfs pools.
|
|
||||||
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
|
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
|
||||||
if not tier:
|
if not tier:
|
||||||
raise Exception("No tier present for backend %s" % bk.name)
|
raise Exception("No tier present for backend %s" % bk.name)
|
||||||
@ -161,31 +162,52 @@ class CephFSProvisionerHelm(base.FluxCDBaseHelm):
|
|||||||
constants.CEPH_CRUSH_TIER_SUFFIX,
|
constants.CEPH_CRUSH_TIER_SUFFIX,
|
||||||
"-ruleset").replace('-', '_')
|
"-ruleset").replace('-', '_')
|
||||||
|
|
||||||
cls = {
|
cluster_id = _get_ceph_fsid()
|
||||||
|
user_secret_name = K8CephFSProvisioner.get_user_secret_name(bk)
|
||||||
|
|
||||||
|
class_defaults = {
|
||||||
|
"monitors": self._get_formatted_ceph_monitor_ips(
|
||||||
|
name_filter=_skip_ceph_mon_2),
|
||||||
|
"adminId": app_constants.K8S_CEPHFS_PROVISIONER_USER_NAME,
|
||||||
|
"adminSecretName": constants.K8S_RBD_PROV_ADMIN_SECRET_NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
storage_class = {
|
||||||
|
"clusterID": cluster_id,
|
||||||
"name": K8CephFSProvisioner.get_storage_class_name(bk),
|
"name": K8CephFSProvisioner.get_storage_class_name(bk),
|
||||||
"data_pool_name": K8CephFSProvisioner.get_data_pool(bk),
|
"fsName": K8CephFSProvisioner.get_fs(bk),
|
||||||
"metadata_pool_name": K8CephFSProvisioner.get_metadata_pool(bk),
|
"pool": K8CephFSProvisioner.get_data_pool(bk),
|
||||||
"fs_name": K8CephFSProvisioner.get_fs(bk),
|
"metadata_pool": K8CephFSProvisioner.get_metadata_pool(bk),
|
||||||
|
"volumeNamePrefix": app_constants.HELM_CEPH_FS_PROVISIONER_VOLUME_NAME_PREFIX,
|
||||||
|
"provisionerSecret": user_secret_name,
|
||||||
|
"controllerExpandSecret": user_secret_name,
|
||||||
|
"nodeStageSecret": user_secret_name,
|
||||||
|
"userId": K8CephFSProvisioner.get_user_id(bk),
|
||||||
|
"userSecretName": user_secret_name or class_defaults["adminSecretName"],
|
||||||
|
"chunk_size": 64,
|
||||||
"replication": int(bk.capabilities.get("replication")),
|
"replication": int(bk.capabilities.get("replication")),
|
||||||
"crush_rule_name": rule_name,
|
"crush_rule_name": rule_name,
|
||||||
"chunk_size": 64,
|
|
||||||
"userId": K8CephFSProvisioner.get_user_id(bk),
|
|
||||||
"userSecretName": K8CephFSProvisioner.get_user_secret_name(bk),
|
|
||||||
"claim_root": app_constants.HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT,
|
|
||||||
"additionalNamespaces": ['default', 'kube-public']
|
"additionalNamespaces": ['default', 'kube-public']
|
||||||
}
|
}
|
||||||
|
|
||||||
classes.append(cls)
|
provisioner = {
|
||||||
|
"replicaCount": self._num_replicas_for_platform_app()
|
||||||
global_settings = {
|
|
||||||
"replicas": self._num_replicas_for_platform_app(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
monitors = self._get_formatted_ceph_monitor_ips(
|
||||||
|
name_filter=_skip_ceph_mon_2)
|
||||||
|
|
||||||
|
csi_config = [{
|
||||||
|
"clusterID": cluster_id,
|
||||||
|
"monitors": [monitor for monitor in monitors]
|
||||||
|
}]
|
||||||
|
|
||||||
overrides = {
|
overrides = {
|
||||||
app_constants.HELM_NS_CEPH_FS_PROVISIONER: {
|
app_constants.HELM_NS_CEPH_FS_PROVISIONER: {
|
||||||
"classdefaults": classdefaults,
|
"storageClass": storage_class,
|
||||||
"classes": classes,
|
"provisioner": provisioner,
|
||||||
"global": global_settings
|
"csiConfig": csi_config,
|
||||||
|
"classDefaults": class_defaults
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
from k8sapp_platform.common import constants as app_constants
|
|
||||||
|
|
||||||
from sysinv.common import exception
|
|
||||||
from sysinv.helm import common
|
|
||||||
from sysinv.helm import base
|
|
||||||
|
|
||||||
|
|
||||||
class HelmToolkitHelm(base.BaseHelm):
|
|
||||||
"""Class to encapsulate helm operations for the helm toolkit"""
|
|
||||||
|
|
||||||
CHART = app_constants.HELM_CHART_HELM_TOOLKIT
|
|
||||||
SUPPORTED_NAMESPACES = [
|
|
||||||
common.HELM_NS_HELM_TOOLKIT,
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_namespaces(self):
|
|
||||||
return self.SUPPORTED_NAMESPACES
|
|
||||||
|
|
||||||
def get_overrides(self, namespace=None):
|
|
||||||
overrides = {
|
|
||||||
common.HELM_NS_HELM_TOOLKIT: {}
|
|
||||||
}
|
|
||||||
|
|
||||||
if namespace in self.SUPPORTED_NAMESPACES:
|
|
||||||
return overrides[namespace]
|
|
||||||
elif namespace:
|
|
||||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
|
||||||
namespace=namespace)
|
|
||||||
else:
|
|
||||||
return overrides
|
|
@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
from k8sapp_platform.common import constants as app_constants
|
from k8sapp_platform.common import constants as app_constants
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||||
@ -56,18 +58,17 @@ class RbdProvisionerHelm(base.FluxCDBaseHelm):
|
|||||||
def _skip_ceph_mon_2(name):
|
def _skip_ceph_mon_2(name):
|
||||||
return name != constants.CEPH_MON_2
|
return name != constants.CEPH_MON_2
|
||||||
|
|
||||||
classdefaults = {
|
def _get_ceph_fsid():
|
||||||
"monitors": self._get_formatted_ceph_monitor_ips(
|
process = subprocess.Popen(['timeout', '30', 'ceph', 'fsid'],
|
||||||
name_filter=_skip_ceph_mon_2),
|
stdout=subprocess.PIPE)
|
||||||
"adminId": constants.K8S_RBD_PROV_USER_NAME,
|
stdout, stderr = process.communicate()
|
||||||
"adminSecretName": constants.K8S_RBD_PROV_ADMIN_SECRET_NAME
|
return stdout.strip()
|
||||||
}
|
|
||||||
|
bk = ceph_bks[0]
|
||||||
|
|
||||||
# Get tier info.
|
# Get tier info.
|
||||||
tiers = self.dbapi.storage_tier_get_list()
|
tiers = self.dbapi.storage_tier_get_list()
|
||||||
|
|
||||||
classes = []
|
|
||||||
for bk in ceph_bks:
|
|
||||||
# Get the ruleset for the new kube-rbd pool.
|
# Get the ruleset for the new kube-rbd pool.
|
||||||
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
|
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
|
||||||
if not tier:
|
if not tier:
|
||||||
@ -78,28 +79,46 @@ class RbdProvisionerHelm(base.FluxCDBaseHelm):
|
|||||||
constants.CEPH_CRUSH_TIER_SUFFIX,
|
constants.CEPH_CRUSH_TIER_SUFFIX,
|
||||||
"-ruleset").replace('-', '_')
|
"-ruleset").replace('-', '_')
|
||||||
|
|
||||||
cls = {
|
cluster_id = _get_ceph_fsid()
|
||||||
|
user_secret_name = K8RbdProvisioner.get_user_secret_name(bk)
|
||||||
|
|
||||||
|
class_defaults = {
|
||||||
|
"monitors": self._get_formatted_ceph_monitor_ips(
|
||||||
|
name_filter=_skip_ceph_mon_2),
|
||||||
|
"adminId": constants.K8S_RBD_PROV_USER_NAME,
|
||||||
|
"adminSecretName": constants.K8S_RBD_PROV_ADMIN_SECRET_NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
storage_class = {
|
||||||
|
"clusterID": cluster_id,
|
||||||
"name": K8RbdProvisioner.get_storage_class_name(bk),
|
"name": K8RbdProvisioner.get_storage_class_name(bk),
|
||||||
"pool_name": K8RbdProvisioner.get_pool(bk),
|
"pool": K8RbdProvisioner.get_pool(bk),
|
||||||
|
"provisionerSecret": user_secret_name or class_defaults["adminSecretName"],
|
||||||
|
"controllerExpandSecret": user_secret_name or class_defaults["adminSecretName"],
|
||||||
|
"nodeStageSecret": user_secret_name or class_defaults["adminSecretName"],
|
||||||
|
"userId": K8RbdProvisioner.get_user_id(bk),
|
||||||
|
"userSecretName": user_secret_name,
|
||||||
|
"chunk_size": 64,
|
||||||
"replication": int(bk.capabilities.get("replication")),
|
"replication": int(bk.capabilities.get("replication")),
|
||||||
"crush_rule_name": rule_name,
|
"crush_rule_name": rule_name,
|
||||||
"chunk_size": 64,
|
"additionalNamespaces": ['default', 'kube-public']
|
||||||
"userId": K8RbdProvisioner.get_user_id(bk),
|
|
||||||
"userSecretName": K8RbdProvisioner.get_user_secret_name(bk),
|
|
||||||
"additionalNamespaces": ['default', 'kube-public'],
|
|
||||||
}
|
}
|
||||||
classes.append(cls)
|
|
||||||
|
|
||||||
global_settings = {
|
provisioner = {
|
||||||
"replicas": self._num_replicas_for_platform_app(),
|
"replicaCount": self._num_replicas_for_platform_app()
|
||||||
"defaultStorageClass": constants.K8S_RBD_PROV_STOR_CLASS_NAME
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
csi_config = [{
|
||||||
|
"clusterID": cluster_id,
|
||||||
|
"monitors": [monitor for monitor in class_defaults["monitors"]]
|
||||||
|
}]
|
||||||
|
|
||||||
overrides = {
|
overrides = {
|
||||||
common.HELM_NS_RBD_PROVISIONER: {
|
common.HELM_NS_RBD_PROVISIONER: {
|
||||||
"classdefaults": classdefaults,
|
"storageClass": storage_class,
|
||||||
"classes": classes,
|
"provisioner": provisioner,
|
||||||
"global": global_settings
|
"csiConfig": csi_config,
|
||||||
|
"classDefaults": class_defaults
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ from sysinv.common import constants
|
|||||||
from sysinv.common import exception
|
from sysinv.common import exception
|
||||||
from sysinv.helm import lifecycle_base as base
|
from sysinv.helm import lifecycle_base as base
|
||||||
from sysinv.helm import lifecycle_utils as lifecycle_utils
|
from sysinv.helm import lifecycle_utils as lifecycle_utils
|
||||||
from sysinv.helm.lifecycle_constants import LifecycleConstants
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -59,12 +58,6 @@ class PlatformAppLifecycleOperator(base.AppLifecycleOperator):
|
|||||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||||
return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info)
|
return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info)
|
||||||
|
|
||||||
# Armada apply retry
|
|
||||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST:
|
|
||||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
|
||||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
|
||||||
return self.armada_apply_retry(app_op, app, hook_info)
|
|
||||||
|
|
||||||
# Use the default behaviour for other hooks
|
# Use the default behaviour for other hooks
|
||||||
super(PlatformAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info)
|
super(PlatformAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info)
|
||||||
|
|
||||||
@ -101,23 +94,3 @@ class PlatformAppLifecycleOperator(base.AppLifecycleOperator):
|
|||||||
vim_progress_status=constants.VIM_SERVICES_ENABLED) < 1:
|
vim_progress_status=constants.VIM_SERVICES_ENABLED) < 1:
|
||||||
raise exception.LifecycleSemanticCheckException(
|
raise exception.LifecycleSemanticCheckException(
|
||||||
"Not enough hosts in desired state")
|
"Not enough hosts in desired state")
|
||||||
|
|
||||||
def armada_apply_retry(self, app_op, app, hook_info):
|
|
||||||
"""Retry armada apply
|
|
||||||
|
|
||||||
:param app_op: AppOperator object
|
|
||||||
:param app: AppOperator.Application object
|
|
||||||
:param hook_info: LifecycleHookInfo object
|
|
||||||
"""
|
|
||||||
if LifecycleConstants.EXTRA not in hook_info:
|
|
||||||
raise exception.LifecycleMissingInfo("Missing {}".format(LifecycleConstants.EXTRA))
|
|
||||||
if LifecycleConstants.RETURN_CODE not in hook_info[LifecycleConstants.EXTRA]:
|
|
||||||
raise exception.LifecycleMissingInfo(
|
|
||||||
"Missing {} {}".format(LifecycleConstants.EXTRA, LifecycleConstants.RETURN_CODE))
|
|
||||||
|
|
||||||
# Raise a specific exception to be caught by the
|
|
||||||
# retry decorator and attempt a re-apply
|
|
||||||
if not hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] and \
|
|
||||||
not app_op.is_app_aborted(app.name):
|
|
||||||
LOG.info("%s app failed applying. Retrying." % str(app.name))
|
|
||||||
raise exception.ApplicationApplyFailure(name=app.name)
|
|
||||||
|
@ -33,13 +33,9 @@ systemconfig.helm_applications =
|
|||||||
platform-integ-apps = systemconfig.helm_plugins.platform_integ_apps
|
platform-integ-apps = systemconfig.helm_plugins.platform_integ_apps
|
||||||
|
|
||||||
systemconfig.helm_plugins.platform_integ_apps =
|
systemconfig.helm_plugins.platform_integ_apps =
|
||||||
001_helm-toolkit = k8sapp_platform.helm.helm_toolkit:HelmToolkitHelm
|
001_cephfs-provisioner = k8sapp_platform.helm.ceph_fs_provisioner:CephFSProvisionerHelm
|
||||||
002_rbd-provisioner = k8sapp_platform.helm.rbd_provisioner:RbdProvisionerHelm
|
002_rbd-provisioner = k8sapp_platform.helm.rbd_provisioner:RbdProvisionerHelm
|
||||||
003_ceph-pools-audit = k8sapp_platform.helm.ceph_pools_audit:CephPoolsAuditHelm
|
003_ceph-pools-audit = k8sapp_platform.helm.ceph_pools_audit:CephPoolsAuditHelm
|
||||||
004_cephfs-provisioner = k8sapp_platform.helm.ceph_fs_provisioner:CephFSProvisionerHelm
|
|
||||||
|
|
||||||
systemconfig.armada.manifest_ops =
|
|
||||||
platform-integ-apps = k8sapp_platform.armada.manifest_platform:PlatformArmadaManifestOperator
|
|
||||||
|
|
||||||
systemconfig.fluxcd.kustomize_ops =
|
systemconfig.fluxcd.kustomize_ops =
|
||||||
platform-integ-apps = k8sapp_platform.kustomize.kustomize_platform:PlatformFluxCDKustomizeOperator
|
platform-integ-apps = k8sapp_platform.kustomize.kustomize_platform:PlatformFluxCDKustomizeOperator
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
SRC_DIR="stx-platform-helm"
|
|
||||||
|
|
||||||
COPY_LIST="$PKG_BASE/$SRC_DIR/files/*"
|
|
||||||
|
|
||||||
|
|
||||||
COPY_LIST_TO_TAR="\
|
|
||||||
$STX_BASE/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts \
|
|
||||||
"
|
|
||||||
|
|
||||||
OPT_DEP_LIST="$STX_BASE/platform-armada-app/python-k8sapp-platform"
|
|
||||||
|
|
||||||
# Bump The version to be one less that what the version was prior to decoupling
|
|
||||||
# as this will align the GITREVCOUNT value to increment the version by one.
|
|
||||||
# Remove this (i.e. reset to 0) on then next major version changes when
|
|
||||||
# TIS_BASE_SRCREV changes. This version should align with the version of the
|
|
||||||
# plugins in python-k8sapp-platform
|
|
||||||
TIS_BASE_SRCREV=c608f2aaa92064b712e7076e4141a162b78fe995
|
|
||||||
TIS_PATCH_VER=GITREVCOUNT+7
|
|
@ -1,102 +0,0 @@
|
|||||||
# Application tunables (maps to metadata)
|
|
||||||
%global app_name platform-integ-apps
|
|
||||||
%global helm_repo stx-platform
|
|
||||||
|
|
||||||
# Install location
|
|
||||||
%global app_folder /usr/local/share/applications/helm
|
|
||||||
|
|
||||||
# Build variables
|
|
||||||
%global helm_folder /usr/lib/helm
|
|
||||||
%global toolkit_version 0.2.19
|
|
||||||
|
|
||||||
Summary: StarlingX K8S FluxCD application: Platform Integration
|
|
||||||
Name: stx-platform-helm
|
|
||||||
Version: 1.0
|
|
||||||
Release: %{tis_patch_ver}%{?_tis_dist}
|
|
||||||
License: Apache-2.0
|
|
||||||
Group: base
|
|
||||||
Packager: Wind River <info@windriver.com>
|
|
||||||
URL: unknown
|
|
||||||
|
|
||||||
Source0: %{name}-%{version}.tar.gz
|
|
||||||
|
|
||||||
BuildArch: noarch
|
|
||||||
|
|
||||||
BuildRequires: helm
|
|
||||||
BuildRequires: openstack-helm-infra
|
|
||||||
BuildRequires: chartmuseum
|
|
||||||
BuildRequires: python-k8sapp-platform
|
|
||||||
BuildRequires: python-k8sapp-platform-wheels
|
|
||||||
|
|
||||||
%description
|
|
||||||
The StarlingX K8S FluxCD application for platform integration
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%setup
|
|
||||||
|
|
||||||
%build
|
|
||||||
# Stage helm-toolkit in the local repo
|
|
||||||
cp %{helm_folder}/helm-toolkit-%{toolkit_version}.tgz helm-charts/
|
|
||||||
|
|
||||||
# Host a server for the charts.
|
|
||||||
chartmuseum --debug --port=8879 --context-path='/charts' --storage="local" --storage-local-rootdir="./helm-charts" &
|
|
||||||
sleep 2
|
|
||||||
helm repo add local http://localhost:8879/charts
|
|
||||||
|
|
||||||
# Make the charts. These produce a tgz file
|
|
||||||
cd helm-charts
|
|
||||||
make rbd-provisioner
|
|
||||||
make ceph-pools-audit
|
|
||||||
make cephfs-provisioner
|
|
||||||
# TODO (rchurch): remove
|
|
||||||
make node-feature-discovery
|
|
||||||
cd -
|
|
||||||
|
|
||||||
# Terminate helm server (the last backgrounded task)
|
|
||||||
kill %1
|
|
||||||
|
|
||||||
# Create a chart tarball compliant with sysinv kube-app.py
|
|
||||||
%define app_staging %{_builddir}/staging
|
|
||||||
%define app_tarball_fluxcd %{app_name}-%{version}-%{tis_patch_ver}.tgz
|
|
||||||
%define fluxcd_app_path %{_builddir}/%{app_tarball_fluxcd}
|
|
||||||
|
|
||||||
# Setup staging
|
|
||||||
mkdir -p %{app_staging}
|
|
||||||
cp files/metadata.yaml %{app_staging}
|
|
||||||
mkdir -p %{app_staging}/charts
|
|
||||||
cp helm-charts/*.tgz %{app_staging}/charts
|
|
||||||
|
|
||||||
# Populate metadata
|
|
||||||
sed -i 's/@APP_NAME@/%{app_name}/g' %{app_staging}/metadata.yaml
|
|
||||||
sed -i 's/@APP_VERSION@/%{version}-%{tis_patch_ver}/g' %{app_staging}/metadata.yaml
|
|
||||||
sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml
|
|
||||||
|
|
||||||
# Copy the plugins: installed in the buildroot
|
|
||||||
mkdir -p %{app_staging}/plugins
|
|
||||||
cp /plugins/%{app_name}/*.whl %{app_staging}/plugins
|
|
||||||
|
|
||||||
cp -R fluxcd-manifests %{app_staging}/
|
|
||||||
|
|
||||||
# calculate checksum of all files in app_staging
|
|
||||||
cd %{app_staging}
|
|
||||||
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
|
|
||||||
# package fluxcd app
|
|
||||||
tar -zcf %fluxcd_app_path -C %{app_staging}/ .
|
|
||||||
|
|
||||||
# switch back to source root
|
|
||||||
cd -
|
|
||||||
|
|
||||||
# Cleanup staging
|
|
||||||
rm -fr %{app_staging}
|
|
||||||
|
|
||||||
%install
|
|
||||||
install -d -m 755 %{buildroot}/%{app_folder}
|
|
||||||
install -p -D -m 755 %fluxcd_app_path %{buildroot}/%{app_folder}
|
|
||||||
install -d -m 755 ${RPM_BUILD_ROOT}/opt/extracharts
|
|
||||||
# TODO (rchurch): remove
|
|
||||||
install -p -D -m 755 helm-charts/node-feature-discovery-*.tgz ${RPM_BUILD_ROOT}/opt/extracharts
|
|
||||||
|
|
||||||
%files
|
|
||||||
%defattr(-,root,root,-)
|
|
||||||
/opt/extracharts/*
|
|
||||||
%{app_folder}/%{app_tarball_fluxcd}
|
|
@ -3,12 +3,10 @@ Section: libs
|
|||||||
Priority: optional
|
Priority: optional
|
||||||
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
||||||
Build-Depends: debhelper-compat (= 13),
|
Build-Depends: debhelper-compat (= 13),
|
||||||
chartmuseum,
|
|
||||||
helm,
|
helm,
|
||||||
openstack-helm-infra,
|
|
||||||
procps,
|
|
||||||
python3-k8sapp-platform,
|
python3-k8sapp-platform,
|
||||||
python3-k8sapp-platform-wheels
|
python3-k8sapp-platform-wheels,
|
||||||
|
platform-helm
|
||||||
Standards-Version: 4.5.1
|
Standards-Version: 4.5.1
|
||||||
Homepage: https://www.starlingx.io
|
Homepage: https://www.starlingx.io
|
||||||
|
|
||||||
@ -17,5 +15,4 @@ Section: libs
|
|||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: ${misc:Depends}
|
Depends: ${misc:Depends}
|
||||||
Description: StarlingX Platform FluxCD Helm Charts
|
Description: StarlingX Platform FluxCD Helm Charts
|
||||||
This package contains FluxCD helm charts for the platform armada
|
This package contains FluxCD helm charts for the platform applications.
|
||||||
application.
|
|
||||||
|
@ -6,52 +6,31 @@ export APP_FOLDER = $(ROOT)/usr/local/share/applications/helm
|
|||||||
export EXTRA_CHARTS = $(ROOT)/opt/extracharts
|
export EXTRA_CHARTS = $(ROOT)/opt/extracharts
|
||||||
|
|
||||||
export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ')
|
export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ')
|
||||||
export MAJOR = $(shell echo $(DEB_VERSION) | cut -f 1 -d '.')
|
export MAJOR = $(shell echo $(DEB_VERSION) | cut -f 1 -d '-')
|
||||||
export MINOR_PATCH = $(shell echo $(DEB_VERSION) | cut -f 2 -d '.')
|
export MINOR_PATCH = $(shell echo $(DEB_VERSION) | cut -f 4 -d '.')
|
||||||
|
|
||||||
export APP_NAME = platform-integ-apps
|
export APP_NAME = platform-integ-apps
|
||||||
export APP_VERSION = $(MAJOR).$(MINOR_PATCH)
|
export APP_VERSION = $(MAJOR)-$(MINOR_PATCH)
|
||||||
export APP_TARBALL_FLUXCD = $(APP_NAME)-$(APP_VERSION).tgz
|
export APP_TARBALL_FLUXCD = $(APP_NAME)-$(APP_VERSION).tgz
|
||||||
export HELM_FOLDER = /usr/lib/helm
|
export HELM_FOLDER = /usr/lib/helm
|
||||||
export HELM_REPO = stx-platform
|
export HELM_REPO = stx-platform
|
||||||
export STAGING_FLUXCD = staging-fluxcd
|
export STAGING_FLUXCD = staging-fluxcd
|
||||||
export TOOLKIT_VERSION = 0.2.19
|
|
||||||
|
|
||||||
%:
|
%:
|
||||||
dh $@
|
dh $@
|
||||||
|
|
||||||
override_dh_auto_build:
|
override_dh_auto_build:
|
||||||
|
|
||||||
############
|
|
||||||
# COMMON #
|
|
||||||
############
|
|
||||||
# Stage helm-toolkit in the local repo.
|
|
||||||
cp $(HELM_FOLDER)/helm-toolkit-$(TOOLKIT_VERSION).tgz helm-charts/
|
|
||||||
|
|
||||||
# Host a server for the helm charts.
|
|
||||||
chartmuseum --debug --port=8879 --context-path='/charts' --storage="local" \
|
|
||||||
--storage-local-rootdir="./helm-charts" &
|
|
||||||
sleep 2
|
|
||||||
helm repo add local http://localhost:8879/charts
|
|
||||||
|
|
||||||
# Create the TGZ file.
|
# Create the TGZ file.
|
||||||
cd helm-charts && make rbd-provisioner
|
|
||||||
cd helm-charts && make ceph-pools-audit
|
cd helm-charts && make ceph-pools-audit
|
||||||
cd helm-charts && make cephfs-provisioner
|
|
||||||
cd helm-charts && make node-feature-discovery
|
cd helm-charts && make node-feature-discovery
|
||||||
|
|
||||||
# Terminate the helm chart server.
|
|
||||||
pkill chartmuseum
|
|
||||||
|
|
||||||
############
|
|
||||||
# FLUXCD #
|
|
||||||
############
|
|
||||||
# Setup the staging directory.
|
# Setup the staging directory.
|
||||||
mkdir -p $(STAGING_FLUXCD)
|
mkdir -p $(STAGING_FLUXCD)
|
||||||
cp files/metadata.yaml $(STAGING_FLUXCD)
|
cp files/metadata.yaml $(STAGING_FLUXCD)
|
||||||
cp -Rv fluxcd-manifests $(STAGING_FLUXCD)
|
cp -Rv fluxcd-manifests $(STAGING_FLUXCD)
|
||||||
mkdir -p $(STAGING_FLUXCD)/charts
|
mkdir -p $(STAGING_FLUXCD)/charts
|
||||||
cp helm-charts/*.tgz $(STAGING_FLUXCD)/charts
|
cp helm-charts/*.tgz $(STAGING_FLUXCD)/charts
|
||||||
|
cp /usr/lib/helm/ceph-csi-*.tgz $(STAGING_FLUXCD)/charts
|
||||||
|
|
||||||
# Populate metadata.
|
# Populate metadata.
|
||||||
sed -i 's/@APP_NAME@/$(APP_NAME)/g' $(STAGING_FLUXCD)/metadata.yaml
|
sed -i 's/@APP_NAME@/$(APP_NAME)/g' $(STAGING_FLUXCD)/metadata.yaml
|
||||||
|
@ -6,4 +6,6 @@ src_files:
|
|||||||
- ${MY_REPO}/stx/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts
|
- ${MY_REPO}/stx/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts
|
||||||
revision:
|
revision:
|
||||||
dist: $STX_DIST
|
dist: $STX_DIST
|
||||||
PKG_GITREVCOUNT: true
|
GITREVCOUNT:
|
||||||
|
BASE_SRCREV: c608f2aaa92064b712e7076e4141a162b78fe995
|
||||||
|
SRC_DIR: ${MY_REPO}/stx/platform-armada-app
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
entries: {}
|
|
||||||
generated: 2019-01-07T12:33:46.098166523-06:00
|
|
@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
generated: 2019-01-02T15:19:36.215111369-06:00
|
|
||||||
repositories:
|
|
||||||
- caFile: ""
|
|
||||||
cache: /builddir/.helm/repository/cache/local-index.yaml
|
|
||||||
certFile: ""
|
|
||||||
keyFile: ""
|
|
||||||
name: local
|
|
||||||
password: ""
|
|
||||||
url: http://127.0.0.1:8879/charts
|
|
||||||
username: ""
|
|
||||||
|
|
@ -8,3 +8,6 @@ tolerations:
|
|||||||
- key: "node-role.kubernetes.io/master"
|
- key: "node-role.kubernetes.io/master"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
|
- key: "node-role.kubernetes.io/control-plane"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
|
@ -4,8 +4,104 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
global:
|
nameOverride: cephfs-provisioner
|
||||||
|
fullnameOverride: cephfs-provisioner
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
|
||||||
|
provisioner:
|
||||||
|
name: cephfs-provisioner
|
||||||
|
fullnameOverride: cephfs-provisioner
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
priorityClassName:
|
||||||
|
nodeSelector: { "node-role.kubernetes.io/control-plane": "" }
|
||||||
tolerations:
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/control-plane"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
- key: "node-role.kubernetes.io/master"
|
- key: "node-role.kubernetes.io/master"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
|
httpMetrics:
|
||||||
|
enabled: false
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
nodeplugin:
|
||||||
|
name: cephfs-nodeplugin
|
||||||
|
nameOverride: cephfs-nodeplugin
|
||||||
|
fullnameOverride: cephfs-nodeplugin
|
||||||
|
nodeSelector: { "kubernetes.io/os": "linux" }
|
||||||
|
tolerations:
|
||||||
|
- operator: "Exists"
|
||||||
|
httpMetrics:
|
||||||
|
enabled: false
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
serviceAccounts:
|
||||||
|
nodeplugin:
|
||||||
|
create: true
|
||||||
|
name: cephfs-nodeplugin
|
||||||
|
provisioner:
|
||||||
|
create: true
|
||||||
|
name: cephfs-provisioner
|
||||||
|
|
||||||
|
classDefaults:
|
||||||
|
cephFSNamespace: kube-system
|
||||||
|
|
||||||
|
storageClass:
|
||||||
|
create: true
|
||||||
|
provisionerSecretNamespace: kube-system
|
||||||
|
controllerExpandSecretNamespace: kube-system
|
||||||
|
nodeStageSecretNamespace: kube-system
|
||||||
|
annotations: {
|
||||||
|
"storageclass.kubernetes.io/is-default-class": "false",
|
||||||
|
"helm.sh/hook": "post-upgrade, post-install",
|
||||||
|
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
}
|
||||||
|
mountOptions:
|
||||||
|
- debug
|
||||||
|
|
||||||
|
topology:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
configMapName: cephfs-csiplugin-config
|
||||||
|
cephConfConfigMapName: cephfs-ceph-config
|
||||||
|
|
||||||
|
cephconf: |-
|
||||||
|
[global]
|
||||||
|
auth_cluster_required = none
|
||||||
|
auth_service_required = none
|
||||||
|
auth_client_required = none
|
||||||
|
|
||||||
|
# Workaround for http://tracker.ceph.com/issues/23446
|
||||||
|
fuse_set_user_groups = false
|
||||||
|
|
||||||
|
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
|
||||||
|
# adding 'fuse_big_writes = true' option by default to override this limit
|
||||||
|
# see https://github.com/ceph/ceph-csi/issues/1928
|
||||||
|
fuse_big_writes = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Defines:
|
||||||
|
# - Provisioner's image name including container registry.
|
||||||
|
# - CEPH helper image
|
||||||
|
#
|
||||||
|
images:
|
||||||
|
tags:
|
||||||
|
csi_provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
|
csi_snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
|
||||||
|
csi_attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
|
csi_resizer: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||||
|
csi_cephcsi: quay.io/cephcsi/cephcsi:v3.6.2
|
||||||
|
csi_registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
|
cephfs_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||||
|
pull_policy: "IfNotPresent"
|
||||||
|
local_registry:
|
||||||
|
active: false
|
||||||
|
exclude:
|
||||||
|
- dep_check
|
||||||
|
- image_repo_sync
|
||||||
|
@ -14,8 +14,8 @@ spec:
|
|||||||
releaseName: stx-cephfs-provisioner
|
releaseName: stx-cephfs-provisioner
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: cephfs-provisioner
|
chart: ceph-csi-cephfs
|
||||||
version: 0.1.0
|
version: 3.6.2
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: stx-platform
|
name: stx-platform
|
||||||
|
@ -14,8 +14,8 @@ spec:
|
|||||||
releaseName: stx-rbd-provisioner
|
releaseName: stx-rbd-provisioner
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: rbd-provisioner
|
chart: ceph-csi-rbd
|
||||||
version: 0.1.0
|
version: 3.6.2
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: stx-platform
|
name: stx-platform
|
||||||
|
@ -4,8 +4,104 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
global:
|
nameOverride: rbd-provisioner
|
||||||
|
fullnameOverride: rbd-provisioner
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
|
||||||
|
provisioner:
|
||||||
|
name: rbd-provisioner
|
||||||
|
fullnameOverride: rbd-provisioner
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
priorityClassName:
|
||||||
|
nodeSelector: { "node-role.kubernetes.io/control-plane": "" }
|
||||||
tolerations:
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/control-plane"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
- key: "node-role.kubernetes.io/master"
|
- key: "node-role.kubernetes.io/master"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
|
httpMetrics:
|
||||||
|
enabled: false
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
provisioner:
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
||||||
|
tag: v3.1.0
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
nodeplugin:
|
||||||
|
name: rbd-nodeplugin
|
||||||
|
nameOverride: rbd-nodeplugin
|
||||||
|
fullnameOverride: rbd-nodeplugin
|
||||||
|
nodeSelector: { "kubernetes.io/os": "linux" }
|
||||||
|
tolerations:
|
||||||
|
- operator: "Exists"
|
||||||
|
httpMetrics:
|
||||||
|
enabled: false
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
serviceAccounts:
|
||||||
|
nodeplugin:
|
||||||
|
create: true
|
||||||
|
name: rbd-nodeplugin
|
||||||
|
provisioner:
|
||||||
|
create: true
|
||||||
|
name: rbd-provisioner
|
||||||
|
|
||||||
|
storageClass:
|
||||||
|
create: true
|
||||||
|
annotations: {
|
||||||
|
"storageclass.kubernetes.io/is-default-class": "true",
|
||||||
|
"helm.sh/hook": "post-upgrade, post-install",
|
||||||
|
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||||
|
}
|
||||||
|
mountOptions:
|
||||||
|
- discard
|
||||||
|
|
||||||
|
topology:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
configMapName: rbd-csiplugin-config
|
||||||
|
cephConfConfigMapName: rbd-ceph-config
|
||||||
|
|
||||||
|
cephconf: |-
|
||||||
|
[global]
|
||||||
|
auth_cluster_required = none
|
||||||
|
auth_service_required = none
|
||||||
|
auth_client_required = none
|
||||||
|
|
||||||
|
# Workaround for http://tracker.ceph.com/issues/23446
|
||||||
|
fuse_set_user_groups = false
|
||||||
|
|
||||||
|
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
|
||||||
|
# adding 'fuse_big_writes = true' option by default to override this limit
|
||||||
|
# see https://github.com/ceph/ceph-csi/issues/1928
|
||||||
|
fuse_big_writes = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Defines:
|
||||||
|
# - Provisioner's image name including container registry.
|
||||||
|
# - CEPH helper image
|
||||||
|
#
|
||||||
|
images:
|
||||||
|
tags:
|
||||||
|
csi_provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
|
csi_snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
|
||||||
|
csi_attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
|
csi_resizer: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||||
|
csi_cephcsi: quay.io/cephcsi/cephcsi:v3.6.2
|
||||||
|
csi_registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
|
rbd_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||||
|
pull_policy: "IfNotPresent"
|
||||||
|
local_registry:
|
||||||
|
active: false
|
||||||
|
exclude:
|
||||||
|
- dep_check
|
||||||
|
- image_repo_sync
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
dependencies:
|
|
||||||
- name: helm-toolkit
|
|
||||||
repository: http://localhost:8879/charts
|
|
||||||
version: ">= 0.1.0"
|
|
@ -1,55 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
ceph -s
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
touch /etc/ceph/ceph.client.admin.keyring
|
|
||||||
|
|
||||||
echo "RBD_POOL_CRUSH_RULE_NAME: ${RBD_POOL_CRUSH_RULE_NAME}"
|
|
||||||
if [ -z "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
|
|
||||||
echo "Error: No Ceph crush rule name specified"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
ceph osd crush rule ls | grep -q "${RBD_POOL_CRUSH_RULE_NAME}"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Ceph crush rule ${RBD_POOL_CRUSH_RULE_NAME} not found, exit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
POOLS=( $(ceph osd pool ls) )
|
|
||||||
|
|
||||||
for pool in "${POOLS[@]}"; do
|
|
||||||
echo "Check for pool name: $pool"
|
|
||||||
|
|
||||||
pool_rule=$(ceph osd pool get $pool crush_rule | awk '{print $2}')
|
|
||||||
echo "Pool crush rule name: ${pool_rule}"
|
|
||||||
if [ "${pool_rule}" != "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
pool_size=$(ceph osd pool get $pool size | awk '{print $2}')
|
|
||||||
pool_min_size=$(ceph osd pool get $pool min_size | awk '{print $2}')
|
|
||||||
|
|
||||||
echo "===> pool_size: ${pool_size} pool_min_size: ${pool_min_size}"
|
|
||||||
if [ "${pool_size}" != "${RBD_POOL_REPLICATION}" ]; then
|
|
||||||
echo "Set size for $pool to ${RBD_POOL_REPLICATION}"
|
|
||||||
ceph osd pool set $pool size "${RBD_POOL_REPLICATION}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${pool_min_size}" != "${RBD_POOL_MIN_REPLICATION}" ]; then
|
|
||||||
echo "Set min_size for $pool to ${RBD_POOL_MIN_REPLICATION}"
|
|
||||||
ceph osd pool set $pool min_size "${RBD_POOL_MIN_REPLICATION}"
|
|
||||||
fi
|
|
||||||
done
|
|
@ -1,6 +1,6 @@
|
|||||||
{{/*
|
{{/*
|
||||||
#
|
#
|
||||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
# Copyright (c) 2019-2022 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -15,5 +15,60 @@ metadata:
|
|||||||
name: ceph-pools-bin
|
name: ceph-pools-bin
|
||||||
data:
|
data:
|
||||||
ceph-pools-audit.sh: |
|
ceph-pools-audit.sh: |
|
||||||
{{ tuple "bin/_ceph-pools-audit.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
#!/bin/bash
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
#
|
||||||
|
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
ceph -s
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
touch /etc/ceph/ceph.client.admin.keyring
|
||||||
|
|
||||||
|
echo "RBD_POOL_CRUSH_RULE_NAME: ${RBD_POOL_CRUSH_RULE_NAME}"
|
||||||
|
if [ -z "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
|
||||||
|
echo "Error: No Ceph crush rule name specified"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ceph osd crush rule ls | grep -q "${RBD_POOL_CRUSH_RULE_NAME}"
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Error: Ceph crush rule ${RBD_POOL_CRUSH_RULE_NAME} not found, exit"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
POOLS=( $(ceph osd pool ls) )
|
||||||
|
|
||||||
|
for pool in "${POOLS[@]}"; do
|
||||||
|
echo "Check for pool name: $pool"
|
||||||
|
|
||||||
|
pool_rule=$(ceph osd pool get $pool crush_rule | awk '{print $2}')
|
||||||
|
echo "Pool crush rule name: ${pool_rule}"
|
||||||
|
if [ "${pool_rule}" != "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
pool_size=$(ceph osd pool get $pool size | awk '{print $2}')
|
||||||
|
pool_min_size=$(ceph osd pool get $pool min_size | awk '{print $2}')
|
||||||
|
|
||||||
|
echo "===> pool_size: ${pool_size} pool_min_size: ${pool_min_size}"
|
||||||
|
if [ "${pool_size}" != "${RBD_POOL_REPLICATION}" ]; then
|
||||||
|
echo "Set size for $pool to ${RBD_POOL_REPLICATION}"
|
||||||
|
ceph osd pool set $pool size "${RBD_POOL_REPLICATION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${pool_min_size}" != "${RBD_POOL_MIN_REPLICATION}" ]; then
|
||||||
|
echo "Set min_size for $pool to ${RBD_POOL_MIN_REPLICATION}"
|
||||||
|
ceph osd pool set $pool min_size "${RBD_POOL_MIN_REPLICATION}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{{/*
|
{{/*
|
||||||
#
|
#
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
@ -10,7 +10,17 @@
|
|||||||
{{- $envAll := . }}
|
{{- $envAll := . }}
|
||||||
|
|
||||||
{{- $serviceAccountName := "ceph-pools-audit" }}
|
{{- $serviceAccountName := "ceph-pools-audit" }}
|
||||||
{{ tuple $envAll "job_ceph_pools_audit" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ $serviceAccountName }}
|
||||||
|
namespace: {{ $envAll.Release.namespace }}
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: default-registry-key
|
||||||
|
|
||||||
---
|
---
|
||||||
#
|
#
|
||||||
# The CronJob makes sure all the Ceph pools have the right replication,
|
# The CronJob makes sure all the Ceph pools have the right replication,
|
||||||
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||||||
|
|
||||||
labels:
|
labels:
|
||||||
job:
|
job:
|
||||||
node_selector_key: node-role.kubernetes.io/master
|
node_selector_key: node-role.kubernetes.io/control-plane
|
||||||
node_selector_value: ""
|
node_selector_value: ""
|
||||||
|
|
||||||
name: ceph-pools-audit
|
name: ceph-pools-audit
|
||||||
@ -26,7 +26,7 @@ conf:
|
|||||||
|
|
||||||
images:
|
images:
|
||||||
tags:
|
tags:
|
||||||
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223
|
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
local_registry:
|
local_registry:
|
||||||
active: false
|
active: false
|
||||||
@ -59,7 +59,7 @@ jobs:
|
|||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
nodeSelector: { node-role.kubernetes.io/master: "" }
|
nodeSelector: { node-role.kubernetes.io/control-plane: "" }
|
||||||
|
|
||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
appVersion: "1.0"
|
|
||||||
description: CephFS provisioner for Kubernetes
|
|
||||||
name: cephfs-provisioner
|
|
||||||
version: 0.1.0
|
|
@ -1,9 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
dependencies:
|
|
||||||
- name: helm-toolkit
|
|
||||||
repository: http://localhost:8879/charts
|
|
||||||
version: ">= 0.1.0"
|
|
@ -1,86 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
#! /bin/bash
|
|
||||||
set -x
|
|
||||||
|
|
||||||
{{ $classes := .Values.classes}}
|
|
||||||
|
|
||||||
touch /etc/ceph/ceph.client.admin.keyring
|
|
||||||
|
|
||||||
# Check if ceph is accessible
|
|
||||||
echo "===================================="
|
|
||||||
ceph -s
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${DATA_POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
|
||||||
# Set up pool key in Ceph format
|
|
||||||
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
|
||||||
echo $KEYRING >$CEPH_USER_KEYRING
|
|
||||||
set +ex
|
|
||||||
|
|
||||||
if [ -n "${CEPH_USER_SECRET}" ]; then
|
|
||||||
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Create ${CEPH_USER_SECRET} secret"
|
|
||||||
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Secret ${CEPH_USER_SECRET} already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Support creating namespaces and Ceph user secrets for additional
|
|
||||||
# namespaces other than that which the provisioner is installed. This
|
|
||||||
# allows the provisioner to set up and provide PVs for multiple
|
|
||||||
# applications across many namespaces.
|
|
||||||
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
|
||||||
for ns in $(
|
|
||||||
IFS=,
|
|
||||||
echo ${ADDITIONAL_NAMESPACES}
|
|
||||||
); do
|
|
||||||
kubectl get namespace $ns 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
kubectl create namespace $ns
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error creating namespace $ns, exit"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
|
||||||
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
ceph osd pool stats ${DATA_POOL_NAME} || ceph osd pool create ${DATA_POOL_NAME} ${CHUNK_SIZE}
|
|
||||||
ceph osd pool application enable ${DATA_POOL_NAME} cephfs
|
|
||||||
ceph osd pool set ${DATA_POOL_NAME} size ${POOL_REPLICATION}
|
|
||||||
ceph osd pool set ${DATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
|
||||||
|
|
||||||
ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE}
|
|
||||||
ceph osd pool application enable ${METADATA_POOL_NAME} cephfs
|
|
||||||
ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION}
|
|
||||||
ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
|
||||||
|
|
||||||
ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME}
|
|
||||||
|
|
||||||
ceph -s
|
|
@ -1,19 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
[global]
|
|
||||||
# For version 0.55 and beyond, you must explicitly enable
|
|
||||||
# or disable authentication with "auth" entries in [global].
|
|
||||||
auth_cluster_required = none
|
|
||||||
auth_service_required = none
|
|
||||||
auth_client_required = none
|
|
||||||
|
|
||||||
{{ $defaults := .Values.classdefaults}}
|
|
||||||
|
|
||||||
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
|
||||||
[mon.{{- $index }}]
|
|
||||||
mon_addr = {{ $element }}
|
|
||||||
{{- end }}
|
|
@ -1,102 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- $defaults := .Values.classdefaults }}
|
|
||||||
{{- $cephfs_provisioner_storage_init := .Values.images.tags.cephfs_provisioner_storage_init }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: ceph-config-file
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
data:
|
|
||||||
ceph.conf: |
|
|
||||||
{{ tuple "conf/_ceph-conf.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: cephfs-storage-init
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
data:
|
|
||||||
storage-init.sh: |
|
|
||||||
{{ tuple "bin/_storage_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: cephfs-storage-init
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
spec:
|
|
||||||
backoffLimit: 5
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
serviceAccountName: {{ $defaults.provisionerConfigName }}
|
|
||||||
volumes:
|
|
||||||
- name: cephfs-storage-init
|
|
||||||
configMap:
|
|
||||||
name: cephfs-storage-init
|
|
||||||
defaultMode: 0555
|
|
||||||
- name: ceph-config
|
|
||||||
configMap:
|
|
||||||
name: ceph-config-file
|
|
||||||
defaultMode: 0555
|
|
||||||
containers:
|
|
||||||
{{- range $classConfig := .Values.classes }}
|
|
||||||
- name: storage-init-{{- $classConfig.name }}
|
|
||||||
image: {{ $cephfs_provisioner_storage_init | quote }}
|
|
||||||
command: ["/bin/bash", "/tmp/storage-init.sh"]
|
|
||||||
env:
|
|
||||||
- name: NAMESPACE
|
|
||||||
value: {{ $defaults.cephFSNamespace }}
|
|
||||||
- name: ADDITIONAL_NAMESPACES
|
|
||||||
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
|
|
||||||
- name: CEPH_USER_SECRET
|
|
||||||
value: {{ $defaults.adminSecretName }}
|
|
||||||
- name: USER_ID
|
|
||||||
value: {{ $classConfig.userId }}
|
|
||||||
- name: DATA_POOL_NAME
|
|
||||||
value: {{ $classConfig.data_pool_name }}
|
|
||||||
- name: METADATA_POOL_NAME
|
|
||||||
value: {{ $classConfig.metadata_pool_name }}
|
|
||||||
- name: FS_NAME
|
|
||||||
value: {{ $classConfig.fs_name }}
|
|
||||||
- name: CHUNK_SIZE
|
|
||||||
value: {{ $classConfig.chunk_size | quote }}
|
|
||||||
- name: POOL_REPLICATION
|
|
||||||
value: {{ $classConfig.replication | quote }}
|
|
||||||
- name: POOL_CRUSH_RULE_NAME
|
|
||||||
value: {{ $classConfig.crush_rule_name | quote }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: cephfs-storage-init
|
|
||||||
mountPath: /tmp/storage-init.sh
|
|
||||||
subPath: storage-init.sh
|
|
||||||
readOnly: true
|
|
||||||
- name: ceph-config
|
|
||||||
mountPath: /etc/ceph/ceph.conf
|
|
||||||
subPath: ceph.conf
|
|
||||||
readOnly: true
|
|
||||||
{{- end }}
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
{{- if .Values.global.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.global.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end}}
|
|
@ -1,64 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- $defaults := .Values.classdefaults }}
|
|
||||||
{{- $cephfs_provisioner_image := .Values.images.tags.cephfs_provisioner }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.global.replicas }}
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ $defaults.provisionerConfigName }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ $defaults.provisionerConfigName }}
|
|
||||||
spec:
|
|
||||||
affinity:
|
|
||||||
podAntiAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- labelSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- {{ .Values.global.name }}
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
containers:
|
|
||||||
- name: {{ $defaults.provisionerConfigName }}
|
|
||||||
image: {{ $cephfs_provisioner_image | quote }}
|
|
||||||
env:
|
|
||||||
- name: PROVISIONER_NAME
|
|
||||||
value: {{ $defaults.provisionerName }}
|
|
||||||
- name: PROVISIONER_SECRET_NAMESPACE
|
|
||||||
value: {{ $defaults.cephFSNamespace }}
|
|
||||||
command:
|
|
||||||
- "/usr/local/bin/{{ $defaults.provisionerConfigName }}"
|
|
||||||
args:
|
|
||||||
- "-id={{ $defaults.provisionerConfigName }}-1"
|
|
||||||
serviceAccount: {{ $defaults.provisionerConfigName }}
|
|
||||||
{{- if .Values.global.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.global.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
|
||||||
{{- end}}
|
|
||||||
{{- if .Values.global.resources }}
|
|
||||||
resources:
|
|
||||||
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,93 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- $defaults := .Values.classdefaults }}
|
|
||||||
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumes"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumeclaims"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["storageclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["create", "update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
resourceNames: ["kube-dns","coredns"]
|
|
||||||
verbs: ["list", "get"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["namespaces"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
---
|
|
||||||
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["create", "get", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["endpoints"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["namespaces"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: {{ $defaults.provisionerConfigName }}
|
|
||||||
namespace: {{ $defaults.cephFSNamespace }}
|
|
||||||
imagePullSecrets:
|
|
||||||
- name: default-registry-key
|
|
@ -1,30 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{ $defaults := .Values.classdefaults }}
|
|
||||||
{{ $provisioner := .Values.global.provisioner_name }}
|
|
||||||
{{ $defaultSC := .Values.global.defaultStorageClass }}
|
|
||||||
{{- range $classConfig := .Values.classes }}
|
|
||||||
|
|
||||||
kind: StorageClass
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
{{- if eq $defaultSC $classConfig.name}}
|
|
||||||
annotations:
|
|
||||||
"storageclass.kubernetes.io/is-default-class": "true"
|
|
||||||
{{- end }}
|
|
||||||
name: {{ $classConfig.name }}
|
|
||||||
provisioner: {{ $provisioner }}
|
|
||||||
parameters:
|
|
||||||
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
|
|
||||||
adminId: {{ or $classConfig.adminId $defaults.adminId }}
|
|
||||||
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
|
|
||||||
adminSecretNamespace: {{ or $classConfig.adminSecretNamespace $defaults.adminSecretNamespace }}
|
|
||||||
claimRoot: {{ $classConfig.claim_root }}
|
|
||||||
---
|
|
||||||
{{- end }}
|
|
@ -1,128 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
#
|
|
||||||
# Global options.
|
|
||||||
# Defaults should be fine in most cases.
|
|
||||||
global:
|
|
||||||
#
|
|
||||||
# Defines the application name of the provisioner.
|
|
||||||
#
|
|
||||||
name: "cephfs-provisioner"
|
|
||||||
#
|
|
||||||
# Defines the name of the provisioner associated with a set of storage classes
|
|
||||||
#
|
|
||||||
provisioner_name: "ceph.com/cephfs"
|
|
||||||
#
|
|
||||||
# Enable this storage class as the system default storage class
|
|
||||||
#
|
|
||||||
defaultStorageClass: fast-cephfs-disabled
|
|
||||||
#
|
|
||||||
# If configured, tolerations will add a toleration field to the Pod.
|
|
||||||
#
|
|
||||||
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
|
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
||||||
# Examples :
|
|
||||||
# tolerations:
|
|
||||||
# [
|
|
||||||
# {
|
|
||||||
# key: "node-role.kubernetes.io/master",
|
|
||||||
# operator: "Exists",
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
#
|
|
||||||
# tolerations:
|
|
||||||
# - key: "node-role.kubernetes.io/master"
|
|
||||||
# operator: "Exists"
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
#
|
|
||||||
tolerations: []
|
|
||||||
# If configured, resources will set the requests/limits field to the Pod.
|
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
||||||
# Example:
|
|
||||||
# {
|
|
||||||
# "limits": {
|
|
||||||
# "memory": "200Mi"
|
|
||||||
# },
|
|
||||||
# "requests": {
|
|
||||||
# "cpu": "100m",
|
|
||||||
# "memory": "200Mi"
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
resources: {}
|
|
||||||
#
|
|
||||||
# Number of replicas to start when configured as deployment
|
|
||||||
#
|
|
||||||
replicas: 1
|
|
||||||
#
|
|
||||||
# Node Selector
|
|
||||||
#
|
|
||||||
nodeSelector: { node-role.kubernetes.io/master: "" }
|
|
||||||
|
|
||||||
#
|
|
||||||
# Configure storage classes.
|
|
||||||
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
|
|
||||||
# No need to add them to each class.
|
|
||||||
#
|
|
||||||
classdefaults:
|
|
||||||
# Ceph admin account
|
|
||||||
adminId: admin
|
|
||||||
# K8 secret name for the admin context
|
|
||||||
adminSecretName: ceph-secret-admin
|
|
||||||
adminSecretNamespace: kube-system
|
|
||||||
cephFSNamespace: kube-system
|
|
||||||
# Define ip addresses of Ceph Monitors
|
|
||||||
monitors:
|
|
||||||
- 192.168.204.2:6789
|
|
||||||
provisionerConfigName: cephfs-provisioner
|
|
||||||
provisionerName: ceph.com/cephfs
|
|
||||||
|
|
||||||
# Configure storage classes.
|
|
||||||
# This section should be tailored to your setup. It allows you to define multiple storage
|
|
||||||
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
|
|
||||||
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
|
|
||||||
classes:
|
|
||||||
- name: fast-cephfs # Name of storage class.
|
|
||||||
# Ceph pools name
|
|
||||||
data_pool_name: kube-cephfs-data
|
|
||||||
metadata_pool_name: kube-cephfs-metadata
|
|
||||||
# CephFS name
|
|
||||||
fs_name: kube-cephfs
|
|
||||||
# Ceph user name to access this pool
|
|
||||||
userId: ceph-pool-kube-cephfs-data
|
|
||||||
# K8 secret name with key for accessing the Ceph pool
|
|
||||||
userSecretName: ceph-pool-kube-cephfs-data
|
|
||||||
# Pool replication
|
|
||||||
replication: 1
|
|
||||||
# Pool crush rule name
|
|
||||||
crush_rule_name: storage_tier_ruleset
|
|
||||||
# Pool chunk size / PG_NUM
|
|
||||||
chunk_size: 64
|
|
||||||
# Additional namespace to allow storage class access (other than where
|
|
||||||
# installed)
|
|
||||||
claim_root: "/pvc-volumes"
|
|
||||||
additionalNamespaces:
|
|
||||||
- default
|
|
||||||
- kube-public
|
|
||||||
|
|
||||||
# Defines:
|
|
||||||
# - Provisioner's image name including container registry.
|
|
||||||
# - CEPH helper image
|
|
||||||
#
|
|
||||||
images:
|
|
||||||
tags:
|
|
||||||
cephfs_provisioner: quay.io/external_storage/cephfs-provisioner:v2.1.0-k8s1.11
|
|
||||||
cephfs_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223
|
|
||||||
pull_policy: "IfNotPresent"
|
|
||||||
local_registry:
|
|
||||||
active: false
|
|
||||||
exclude:
|
|
||||||
- dep_check
|
|
||||||
- image_repo_sync
|
|
||||||
manifests:
|
|
||||||
configmap_bin: true
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
description: rbd provisioner chart
|
|
||||||
name: rbd-provisioner
|
|
||||||
version: 0.1.0
|
|
@ -1,5 +0,0 @@
|
|||||||
RBD Provisioner Chart
|
|
||||||
-------------------------------------------------------------------------------
|
|
||||||
This chart was last validated with:
|
|
||||||
* Repo: https://github.com/kubernetes-incubator/external-storage.git
|
|
||||||
* Commit: (6776bba1) Merge pull request #1048 from AdamDang/patch-3
|
|
@ -1,22 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
classdefaults:
|
|
||||||
adminId: admin
|
|
||||||
adminSecretName: ceph-admin
|
|
||||||
monitors:
|
|
||||||
- 192.168.204.4:6789
|
|
||||||
- 192.168.204.3:6789
|
|
||||||
- 192.168.204.22:6789
|
|
||||||
classes:
|
|
||||||
- name: rbd
|
|
||||||
pool: kube-rbd
|
|
||||||
userId: ceph-pool-kube-rbd
|
|
||||||
userSecretName: ceph-pool-kube-rbd
|
|
||||||
- name: gold-rbd
|
|
||||||
pool: kube-rbd-gold
|
|
||||||
userId: ceph-pool-gold-kube-rbd-gold
|
|
||||||
userSecretName: ceph-pool-gold-kube-rbd-gold
|
|
@ -1,17 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
classes:
|
|
||||||
- name: slow-rbd
|
|
||||||
monitors:
|
|
||||||
- 192.168.204.3:6789
|
|
||||||
- 192.168.204.150:6789
|
|
||||||
- 192.168.204.4:6789
|
|
||||||
adminId: admin
|
|
||||||
adminSecretName: ceph-secret
|
|
||||||
pool: kube
|
|
||||||
userId: kube
|
|
||||||
userSecretName: ceph-secret-kube
|
|
@ -1,9 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
dependencies:
|
|
||||||
- name: helm-toolkit
|
|
||||||
repository: http://localhost:8879/charts
|
|
||||||
version: ">= 0.1.0"
|
|
@ -1,40 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.rbac }}
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.rbac.clusterRole }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumes"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumeclaims"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["storageclasses"]
|
|
||||||
verbs: ["get", "list", "watch", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["create", "update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["endpoints"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
resourceNames: ["kube-dns"]
|
|
||||||
verbs: ["list", "get"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["namespaces"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
{{- end}}
|
|
@ -1,22 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.rbac }}
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.rbac.clusterRoleBinding }}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: {{ .Values.rbac.serviceAccount }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: {{ .Values.rbac.clusterRole }}
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end}}
|
|
@ -1,47 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if eq .Values.global.deployAs "DaemonSet" }}
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.global.name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
app: {{ .Values.global.name }}
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ .Values.global.name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ .Values.global.name }}
|
|
||||||
spec:
|
|
||||||
{{- if or .Values.global.rbac .Values.global.reuseRbac }}
|
|
||||||
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.global.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.global.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
containers:
|
|
||||||
- image: {{ .Values.global.image | quote }}
|
|
||||||
name: {{ .Values.global.name }}
|
|
||||||
{{- if .Values.global.resources }}
|
|
||||||
resources:
|
|
||||||
{{ .Values.global.resources | toYaml | trim | indent 12 }}
|
|
||||||
{{- end }}
|
|
||||||
env:
|
|
||||||
- name: PROVISIONER_NAME
|
|
||||||
value: {{ .Values.global.provisioner_name }}
|
|
||||||
{{- end }}
|
|
@ -1,58 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if eq .Values.global.deployAs "Deployment" }}
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.global.name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.global.replicas }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ .Values.global.name }}
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ .Values.global.name }}
|
|
||||||
spec:
|
|
||||||
{{- if or .Values.global.rbac .Values.global.reuseRbac }}
|
|
||||||
serviceAccount: {{ .Values.rbac.serviceAccount }}
|
|
||||||
{{- end }}
|
|
||||||
affinity:
|
|
||||||
podAntiAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- labelSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- {{ .Values.global.name }}
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
containers:
|
|
||||||
- name: {{ .Values.global.name }}
|
|
||||||
image: {{ .Values.images.tags.rbd_provisioner | quote }}
|
|
||||||
env:
|
|
||||||
- name: PROVISIONER_NAME
|
|
||||||
value: {{ .Values.global.provisioner_name }}
|
|
||||||
{{- if .Values.global.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.global.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.global.resources }}
|
|
||||||
resources:
|
|
||||||
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,222 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.job_storage_init }}
|
|
||||||
{{ $root := . }}
|
|
||||||
{{ $defaults := .Values.classdefaults}}
|
|
||||||
{{ $mount := "/tmp/mount" }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: config-{{- $root.Values.global.name }}
|
|
||||||
namespace: {{ $root.Release.Namespace }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
data:
|
|
||||||
ceph.conf: |
|
|
||||||
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
|
||||||
[mon.{{- $index }}]
|
|
||||||
mon_addr = {{ $element }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
check_ceph.sh: |-
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copy from read only mount to Ceph config folder
|
|
||||||
cp {{ $mount -}}/ceph.conf /etc/ceph/
|
|
||||||
|
|
||||||
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
|
|
||||||
kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Create ${CEPH_ADMIN_SECRET} secret"
|
|
||||||
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
touch /etc/ceph/ceph.client.admin.keyring
|
|
||||||
|
|
||||||
# Check if ceph is accessible
|
|
||||||
echo "===================================="
|
|
||||||
ceph -s
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
# Make sure the pool exists.
|
|
||||||
ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
|
|
||||||
# Set pool configuration.
|
|
||||||
ceph osd pool application enable $POOL_NAME rbd
|
|
||||||
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
|
|
||||||
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
|
||||||
set +ex
|
|
||||||
|
|
||||||
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
|
|
||||||
echo "No need to create secrets for pool ${POOL_NAME}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
|
||||||
# Set up pool key in Ceph format
|
|
||||||
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
|
||||||
echo $KEYRING > $CEPH_USER_KEYRING
|
|
||||||
set +ex
|
|
||||||
|
|
||||||
if [ -n "${CEPH_USER_SECRET}" ]; then
|
|
||||||
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Create ${CEPH_USER_SECRET} secret"
|
|
||||||
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Secret ${CEPH_USER_SECRET} already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Support creating namespaces and Ceph user secrets for additional
|
|
||||||
# namespaces other than that which the provisioner is installed. This
|
|
||||||
# allows the provisioner to set up and provide PVs for multiple
|
|
||||||
# applications across many namespaces.
|
|
||||||
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
|
||||||
for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
|
|
||||||
kubectl get namespace $ns 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
kubectl create namespace $ns
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error creating namespace $ns, exit"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
|
||||||
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if pool is accessible using provided credentials
|
|
||||||
echo "====================================="
|
|
||||||
timeout --preserve-status 10 rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
|
|
||||||
if [ $? -ne 143 ]; then
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Pool ${POOL_NAME} accessible"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "rbd command timed out and was sent a SIGTERM. Make sure OSDs have been provisioned."
|
|
||||||
fi
|
|
||||||
|
|
||||||
ceph -s
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: storage-init-{{- $root.Values.global.name }}
|
|
||||||
namespace: {{ $root.Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
heritage: {{$root.Release.Service | quote }}
|
|
||||||
release: {{$root.Release.Name | quote }}
|
|
||||||
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
spec:
|
|
||||||
backoffLimit: 5 # Limit the number of job restart in case of failure: ~5 minutes.
|
|
||||||
activeDeadlineSeconds: 360
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: "{{$root.Release.Name}}"
|
|
||||||
namespace: {{ $root.Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
heritage: {{$root.Release.Service | quote }}
|
|
||||||
release: {{$root.Release.Name | quote }}
|
|
||||||
chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
|
||||||
spec:
|
|
||||||
serviceAccountName: {{ $root.Values.rbac.serviceAccount }}
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
volumes:
|
|
||||||
- name: config-volume-{{- $root.Values.global.name }}
|
|
||||||
configMap:
|
|
||||||
name: config-{{- $root.Values.global.name }}
|
|
||||||
containers:
|
|
||||||
{{- range $classConfig := $root.Values.classes }}
|
|
||||||
- name: storage-init-{{- $classConfig.name }}
|
|
||||||
image: {{ $root.Values.images.tags.rbd_provisioner_storage_init | quote }}
|
|
||||||
command: [ "/bin/bash", "{{ $mount }}/check_ceph.sh" ]
|
|
||||||
env:
|
|
||||||
- name: NAMESPACE
|
|
||||||
value: {{ $root.Release.Namespace }}
|
|
||||||
- name: ADDITIONAL_NAMESPACES
|
|
||||||
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
|
|
||||||
- name: CEPH_ADMIN_SECRET
|
|
||||||
value: {{ $defaults.adminSecretName }}
|
|
||||||
- name: CEPH_USER_SECRET
|
|
||||||
value: {{ $classConfig.userSecretName }}
|
|
||||||
- name: USER_ID
|
|
||||||
value: {{ $classConfig.userId }}
|
|
||||||
- name: POOL_NAME
|
|
||||||
value: {{ $classConfig.pool_name }}
|
|
||||||
- name: POOL_REPLICATION
|
|
||||||
value: {{ $classConfig.replication | quote }}
|
|
||||||
- name: POOL_CRUSH_RULE_NAME
|
|
||||||
value: {{ $classConfig.crush_rule_name | quote }}
|
|
||||||
- name: POOL_CHUNK_SIZE
|
|
||||||
value: {{ $classConfig.chunk_size | quote }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: config-volume-{{- $root.Values.global.name }}
|
|
||||||
mountPath: {{ $mount }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if $root.Values.global.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ $root.Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with $root.Values.global.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end}}
|
|
||||||
|
|
||||||
---
|
|
||||||
# This ConfigMap is needed because we're not using ceph's helm chart
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: ceph-etc
|
|
||||||
namespace: {{ $root.Release.Namespace }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
data:
|
|
||||||
ceph.conf: |
|
|
||||||
[global]
|
|
||||||
auth_supported = none
|
|
||||||
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
|
||||||
[mon.{{- $index }}]
|
|
||||||
mon_addr = {{ $element }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,22 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.rbac }}
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.rbac.role }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["namespaces"]
|
|
||||||
verbs: ["get", "create", "list", "update"]
|
|
||||||
{{- end}}
|
|
@ -1,23 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.rbac }}
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.rbac.roleBinding }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: {{ .Values.rbac.role }}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: {{ .Values.rbac.serviceAccount }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end}}
|
|
@ -1,17 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.rbac }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: {{ .Values.rbac.serviceAccount }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
imagePullSecrets:
|
|
||||||
- name: default-registry-key
|
|
||||||
{{- end }}
|
|
@ -1,40 +0,0 @@
|
|||||||
{{/*
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018-2022 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{- if .Values.global.provisionStorageClass }}
|
|
||||||
{{ $namespace := .Release.Namespace }}
|
|
||||||
{{ $defaults := .Values.classdefaults}}
|
|
||||||
{{ $provisioner := .Values.global.provisioner_name }}
|
|
||||||
{{ $defaultSC := .Values.global.defaultStorageClass }}
|
|
||||||
{{- range $classConfig := .Values.classes }}
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
{{- if eq $defaultSC $classConfig.name}}
|
|
||||||
annotations:
|
|
||||||
"storageclass.kubernetes.io/is-default-class": "true"
|
|
||||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
|
||||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
|
||||||
{{- end }}
|
|
||||||
name: {{ $classConfig.name }}
|
|
||||||
provisioner: {{ $provisioner }}
|
|
||||||
parameters:
|
|
||||||
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
|
|
||||||
adminId: {{ or $classConfig.adminId $defaults.adminId}}
|
|
||||||
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
|
|
||||||
adminSecretNamespace: {{ $namespace }}
|
|
||||||
pool: {{ or $classConfig.pool_name $defaults.pool_name }}
|
|
||||||
userId: {{ or $classConfig.userId $defaults.userId }}
|
|
||||||
userSecretName: {{ $classConfig.userSecretName }}
|
|
||||||
imageFormat: {{ or $classConfig.imageFormat $defaults.imageFormat | quote }}
|
|
||||||
imageFeatures: {{ or $classConfig.imageFeatures $defaults.imageFeatures}}
|
|
||||||
mountOptions:
|
|
||||||
- discard
|
|
||||||
---
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,182 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
#
|
|
||||||
# Global options.
|
|
||||||
# Defaults should be fine in most cases.
|
|
||||||
global:
|
|
||||||
#
|
|
||||||
# Defines the application name of the provisioner.
|
|
||||||
#
|
|
||||||
name: "rbd-provisioner"
|
|
||||||
#
|
|
||||||
# Defines the name of the provisioner associated with a set of storage classes
|
|
||||||
#
|
|
||||||
provisioner_name: "ceph.com/rbd"
|
|
||||||
#
|
|
||||||
# Execute initialization job to verify external Ceph cluster access
|
|
||||||
# and setup additional dependencies assumed by dependent helm charts
|
|
||||||
# (i.e. configmap and secrets).
|
|
||||||
# Skipping is not recommended.
|
|
||||||
#
|
|
||||||
job_storage_init: true
|
|
||||||
#
|
|
||||||
# Defines whether to reuse an already defined RBAC policy.
|
|
||||||
# Make sure that the serviceAccount defined in the RBAC section matches the one
|
|
||||||
# in the policy you reuse.
|
|
||||||
#
|
|
||||||
reuseRbac: false
|
|
||||||
#
|
|
||||||
# Defines whether to generate service account and role bindings.
|
|
||||||
#
|
|
||||||
rbac: true
|
|
||||||
#
|
|
||||||
# Provision storage class. If false you have to provision storage classes by hand.
|
|
||||||
#
|
|
||||||
provisionStorageClass: true
|
|
||||||
#
|
|
||||||
# Enable this storage class as the system default storage class
|
|
||||||
#
|
|
||||||
defaultStorageClass: fast-rbd
|
|
||||||
#
|
|
||||||
# Choose if rbd-provisioner pod should be deployed as deplyment or DaemonSet
|
|
||||||
# Values: none, Deployment, DaemonSet
|
|
||||||
#
|
|
||||||
deployAs: Deployment
|
|
||||||
#
|
|
||||||
# If configured, tolerations will add a toleration field to the Pod.
|
|
||||||
#
|
|
||||||
# Node tolerations for rbd-volume-provisioner scheduling to nodes with taints.
|
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
||||||
# Examples :
|
|
||||||
# tolerations:
|
|
||||||
# [
|
|
||||||
# {
|
|
||||||
# key: "node-role.kubernetes.io/master",
|
|
||||||
# operator: "Exists",
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
#
|
|
||||||
# tolerations:
|
|
||||||
# - key: "node-role.kubernetes.io/master"
|
|
||||||
# operator: "Exists"
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
#
|
|
||||||
tolerations: []
|
|
||||||
#
|
|
||||||
# If configured, resources will set the requests/limits field to the Pod.
|
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
|
||||||
# Example:
|
|
||||||
# {
|
|
||||||
# "limits": {
|
|
||||||
# "memory": "200Mi"
|
|
||||||
# },
|
|
||||||
# "requests": {
|
|
||||||
# "cpu": "100m",
|
|
||||||
# "memory": "200Mi"
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
resources: {}
|
|
||||||
#
|
|
||||||
# Number of replicas to start when configured as deployment
|
|
||||||
#
|
|
||||||
replicas: 1
|
|
||||||
#
|
|
||||||
# Node Selector
|
|
||||||
#
|
|
||||||
nodeSelector: { node-role.kubernetes.io/master: "" }
|
|
||||||
#
|
|
||||||
# RBAC options.
|
|
||||||
# Defaults should be fine in most cases.
|
|
||||||
rbac:
|
|
||||||
#
|
|
||||||
# Cluster Role name
|
|
||||||
#
|
|
||||||
clusterRole: rbd-provisioner
|
|
||||||
#
|
|
||||||
# Cluster Role Binding name
|
|
||||||
#
|
|
||||||
clusterRoleBinding: rbd-provisioner
|
|
||||||
#
|
|
||||||
# Role name
|
|
||||||
#
|
|
||||||
role: rbd-provisioner
|
|
||||||
#
|
|
||||||
# Role Binding name
|
|
||||||
#
|
|
||||||
roleBinding: rbd-provisioner
|
|
||||||
#
|
|
||||||
# Defines a name of the service account which Provisioner will use to communicate with API server.
|
|
||||||
#
|
|
||||||
serviceAccount: rbd-provisioner
|
|
||||||
|
|
||||||
#
|
|
||||||
# Configure storage classes.
|
|
||||||
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
|
|
||||||
# No need to add them to each class.
|
|
||||||
#
|
|
||||||
classdefaults:
|
|
||||||
# Define ip addresses of Ceph Monitors
|
|
||||||
monitors:
|
|
||||||
- 192.168.204.3:6789
|
|
||||||
- 192.168.204.150:6789
|
|
||||||
- 192.168.204.4:6789
|
|
||||||
# Ceph admin account
|
|
||||||
adminId: admin
|
|
||||||
# K8 secret name for the admin context
|
|
||||||
adminSecretName: ceph-secret
|
|
||||||
# Ceph RBD image format version
|
|
||||||
imageFormat: 2
|
|
||||||
# Ceph RBD image features.
|
|
||||||
imageFeatures: layering
|
|
||||||
|
|
||||||
#
|
|
||||||
# Configure storage classes.
|
|
||||||
# This section should be tailored to your setup. It allows you to define multiple storage
|
|
||||||
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
|
|
||||||
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
|
|
||||||
classes:
|
|
||||||
- name: fast-rbd # Name of storage class.
|
|
||||||
# Ceph pool name
|
|
||||||
pool_name: kube
|
|
||||||
# Ceph user name to access this pool
|
|
||||||
userId: kube
|
|
||||||
# K8 secret name with key for accessing the Ceph pool
|
|
||||||
userSecretName: ceph-secret-kube
|
|
||||||
# Pool replication
|
|
||||||
replication: 1
|
|
||||||
# Pool crush rule name
|
|
||||||
crush_rule_name: storage_tier_ruleset
|
|
||||||
# Pool chunk size / PG_NUM
|
|
||||||
chunk_size: 8
|
|
||||||
# Additional namespace to allow storage class access (other than where
|
|
||||||
# installed)
|
|
||||||
additionalNamespaces:
|
|
||||||
- default
|
|
||||||
- kube-public
|
|
||||||
# Configuration data for the ephemeral pool(s)
|
|
||||||
ephemeral_pools:
|
|
||||||
- chunk_size: 8
|
|
||||||
crush_rule_name: storage_tier_ruleset
|
|
||||||
pool_name: ephemeral
|
|
||||||
replication: 1
|
|
||||||
|
|
||||||
#
|
|
||||||
# Defines:
|
|
||||||
# - Provisioner's image name including container registry.
|
|
||||||
# - CEPH helper image
|
|
||||||
#
|
|
||||||
images:
|
|
||||||
tags:
|
|
||||||
rbd_provisioner: quay.io/external_storage/rbd-provisioner:v2.1.1-k8s1.11
|
|
||||||
rbd_provisioner_storage_init: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223
|
|
||||||
pull_policy: "IfNotPresent"
|
|
||||||
local_registry:
|
|
||||||
active: false
|
|
||||||
exclude:
|
|
||||||
- dep_check
|
|
||||||
- image_repo_sync
|
|
Loading…
Reference in New Issue
Block a user