CephFS provisioner for k8sapp_platform

This commit implements CephFS Provisioner as a helm chart
in platform-integ-apps.
CephFS Provisioner implements the same design as RBD Provisioner,
but with CephFS Provisioner we can have mounts shared by differents
components, such as pods.

It is activated automatically same as RBD Provisioner.

Signed-off-by: Daniel Safta <daniel.safta@windriver.com>
Story: 2008162
Task: 40908
Change-Id: Ic4270e401b2c3e51c3aecfab23af1e874e733831
This commit is contained in:
Daniel Safta 2020-10-19 10:01:42 +00:00
parent a9afaabf55
commit 3466dd67da
15 changed files with 852 additions and 2 deletions

View File

@ -10,6 +10,7 @@
from k8sapp_platform.helm.ceph_pools_audit import CephPoolsAuditHelm
from k8sapp_platform.helm.rbd_provisioner import RbdProvisionerHelm
from k8sapp_platform.helm.ceph_fs_provisioner import CephFSProvisionerHelm
from sysinv.common import constants
from sysinv.helm import manifest_base as base
@ -23,12 +24,14 @@ class PlatformArmadaManifestOperator(base.ArmadaManifestOperator):
CHART_GROUP_CEPH = 'starlingx-ceph-charts'
CHART_GROUPS_LUT = {
CephPoolsAuditHelm.CHART: CHART_GROUP_CEPH,
RbdProvisionerHelm.CHART: CHART_GROUP_CEPH
RbdProvisionerHelm.CHART: CHART_GROUP_CEPH,
CephFSProvisionerHelm.CHART: CHART_GROUP_CEPH
}
CHARTS_LUT = {
CephPoolsAuditHelm.CHART: 'kube-system-ceph-pools-audit',
RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner'
RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner',
CephFSProvisionerHelm.CHART: 'kube-system-cephfs-provisioner'
}
def platform_mode_manifest_updates(self, dbapi, mode):

View File

@ -6,6 +6,29 @@
# Helm: Supported charts:
# These values match the names in the chart package's Chart.yaml
from sysinv.helm import common
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
HELM_CHART_HELM_TOOLKIT = 'helm-toolkit'
HELM_CHART_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM
HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT = '/pvc-volumes'
HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs'
K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME = 'cephfs'
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin'
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system'
K8S_CEPHFS_PROVISIONER_USER_NAME = 'admin'
K8S_CEPHFS_PROVISIONER_DEFAULT_NAMESPACE = 'kube-system'
K8S_CEPHFS_PROVISIONER_RBAC_CONFIG_NAME = 'cephfs-provisioner-keyring'
# CephFS Provisioner backend
K8S_CEPHFS_PROV_STORAGECLASS_NAME = 'cephfs_storageclass_name' # Customer
K8S_CEPHFS_PROV_STOR_CLASS_NAME = 'cephfs'
# Ceph FS constants for pools and fs
CEPHFS_DATA_POOL_KUBE_NAME = 'kube-cephfs-data'
CEPHFS_METADATA_POOL_KUBE_NAME = 'kube-cephfs-metadata'
CEPHFS_FS_KUBE_NAME = 'kube-cephfs'

View File

@ -0,0 +1,191 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from k8sapp_platform.common import constants as app_constants
from sysinv.common import constants
from sysinv.common import exception
from sysinv.helm import base
class K8CephFSProvisioner(object):
""" Utility methods for getting the k8 overrides for internal ceph
from a corresponding storage backend.
"""
@staticmethod
def get_storage_class_name(bk):
""" Get the name of the storage class for an rbd provisioner
:param bk: Ceph storage backend object
:returns: name of the rbd provisioner
"""
if bk['capabilities'].get(app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME):
name = bk['capabilities'][app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME]
elif bk.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
name = app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME
else:
name = bk.name + '-' + app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME
return str(name)
@staticmethod
def get_data_pool(bk):
""" Get the name of the ceph pool for an rbd provisioner
This naming convention is valid only for internal backends
:param bk: Ceph storage backend object
:returns: name of the rbd provisioner
"""
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
return app_constants.CEPHFS_DATA_POOL_KUBE_NAME
else:
return str(app_constants.CEPHFS_DATA_POOL_KUBE_NAME + '-' + bk['name'])
@staticmethod
def get_metadata_pool(bk):
""" Get the name of the ceph pool for an rbd provisioner
This naming convention is valid only for internal backends
:param bk: Ceph storage backend object
:returns: name of the rbd provisioner
"""
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
return app_constants.CEPHFS_METADATA_POOL_KUBE_NAME
else:
return str(app_constants.CEPHFS_METADATA_POOL_KUBE_NAME + '-' + bk['name'])
@staticmethod
def get_fs(bk):
""" Get the name of the ceph pool for an rbd provisioner
This naming convention is valid only for internal backends
:param bk: Ceph storage backend object
:returns: name of the rbd provisioner
"""
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
return app_constants.CEPHFS_FS_KUBE_NAME
else:
return str(app_constants.CEPHFS_FS_KUBE_NAME + '-' + bk['name'])
@staticmethod
def get_user_id(bk):
""" Get the non admin user name for an cephfs provisioner secret
:param bk: Ceph storage backend object
:returns: name of the cephfs provisioner
"""
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
name = K8CephFSProvisioner.get_data_pool(bk)
else:
name = K8CephFSProvisioner.get_data_pool(bk)
prefix = 'ceph-pool'
return str(prefix + '-' + name)
@staticmethod
def get_user_secret_name(bk):
""" Get the name for the non admin secret key of a pool
:param bk: Ceph storage backend object
:returns: name of k8 secret
"""
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
name = K8CephFSProvisioner.get_data_pool(bk)
else:
name = K8CephFSProvisioner.get_data_pool(bk)
base_name = 'ceph-pool'
return str(base_name + '-' + name)
class CephFSProvisionerHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the cephfs-provisioner chart"""
CHART = app_constants.HELM_CHART_CEPH_FS_PROVISIONER
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[app_constants.HELM_NS_CEPH_FS_PROVISIONER]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_PLATFORM:
base.BaseHelm.SUPPORTED_NAMESPACES + [app_constants.HELM_NS_CEPH_FS_PROVISIONER],
}
SERVICE_NAME = app_constants.HELM_CHART_CEPH_FS_PROVISIONER
SERVICE_PORT_MON = 6789
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
app_constants.HELM_NS_CEPH_FS_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_overrides(self, namespace=None):
backends = self.dbapi.storage_backend_get_list()
ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH]
if not ceph_bks:
return {} # ceph is not configured
def _skip_ceph_mon_2(name):
return name != constants.CEPH_MON_2
classdefaults = {
"monitors": self._get_formatted_ceph_monitor_ips(
name_filter=_skip_ceph_mon_2),
"adminId": app_constants.K8S_CEPHFS_PROVISIONER_USER_NAME,
"adminSecretName": app_constants.K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME
}
# Get tier info.
tiers = self.dbapi.storage_tier_get_list()
classes = []
for bk in ceph_bks:
# Get the ruleset for the new kube-cephfs pools.
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
if not tier:
raise Exception("No tier present for backend %s" % bk.name)
rule_name = "{0}{1}{2}".format(
tier.name,
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
cls = {
"name": K8CephFSProvisioner.get_storage_class_name(bk),
"data_pool_name": K8CephFSProvisioner.get_data_pool(bk),
"metadata_pool_name": K8CephFSProvisioner.get_metadata_pool(bk),
"fs_name": K8CephFSProvisioner.get_fs(bk),
"replication": int(bk.capabilities.get("replication")),
"crush_rule_name": rule_name,
"chunk_size": 64,
"userId": K8CephFSProvisioner.get_user_id(bk),
"userSecretName": K8CephFSProvisioner.get_user_secret_name(bk),
"claim_root": app_constants.HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT,
"additionalNamespaces": ['default', 'kube-public']
}
classes.append(cls)
global_settings = {
"replicas": self._num_provisioned_controllers(),
"defaultStorageClass": app_constants.K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME
}
overrides = {
app_constants.HELM_NS_CEPH_FS_PROVISIONER: {
"classdefaults": classdefaults,
"classes": classes,
"global": global_settings
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides

View File

@ -36,6 +36,7 @@ systemconfig.helm_plugins.platform_integ_apps =
001_helm-toolkit = k8sapp_platform.helm.helm_toolkit:HelmToolkitHelm
002_rbd-provisioner = k8sapp_platform.helm.rbd_provisioner:RbdProvisionerHelm
003_ceph-pools-audit = k8sapp_platform.helm.ceph_pools_audit:CephPoolsAuditHelm
004_cephfs-provisioner = k8sapp_platform.helm.ceph_fs_provisioner:CephFSProvisionerHelm
systemconfig.armada.manifest_ops =
platform-integ-apps = k8sapp_platform.armada.manifest_platform:PlatformArmadaManifestOperator

View File

@ -47,6 +47,7 @@ helm repo add local http://localhost:8879/charts
cd helm-charts
make rbd-provisioner
make ceph-pools-audit
make cephfs-provisioner
# TODO (rchurch): remove
make node-feature-discovery
cd -

View File

@ -0,0 +1,11 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: v1
appVersion: "1.0"
description: CephFS provisioner for Kubernetes
name: cephfs-provisioner
version: 0.1.0

View File

@ -0,0 +1,9 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
dependencies:
- name: helm-toolkit
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,86 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#! /bin/bash
set -x
{{ $classes := .Values.classes}}
touch /etc/ceph/ceph.client.admin.keyring
# Check if ceph is accessible
echo "===================================="
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
set -ex
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${DATA_POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
# Set up pool key in Ceph format
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
echo $KEYRING >$CEPH_USER_KEYRING
set +ex
if [ -n "${CEPH_USER_SECRET}" ]; then
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Create ${CEPH_USER_SECRET} secret"
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
exit 1
fi
else
echo "Secret ${CEPH_USER_SECRET} already exists"
fi
# Support creating namespaces and Ceph user secrets for additional
# namespaces other than that which the provisioner is installed. This
# allows the provisioner to set up and provide PVs for multiple
# applications across many namespaces.
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
for ns in $(
IFS=,
echo ${ADDITIONAL_NAMESPACES}
); do
kubectl get namespace $ns 2>/dev/null
if [ $? -ne 0 ]; then
kubectl create namespace $ns
if [ $? -ne 0 ]; then
echo "Error creating namespace $ns, exit"
continue
fi
fi
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
if [ $? -ne 0 ]; then
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
if [ $? -ne 0 ]; then
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
fi
else
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
fi
done
fi
fi
ceph osd pool stats ${DATA_POOL_NAME} || ceph osd pool create ${DATA_POOL_NAME} ${CHUNK_SIZE}
ceph osd pool application enable ${DATA_POOL_NAME} cephfs
ceph osd pool set ${DATA_POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${DATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE}
ceph osd pool application enable ${METADATA_POOL_NAME} cephfs
ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION}
ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME}
ceph -s

View File

@ -0,0 +1,19 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
[global]
# For version 0.55 and beyond, you must explicitly enable
# or disable authentication with "auth" entries in [global].
auth_cluster_required = none
auth_service_required = none
auth_client_required = none
{{ $defaults := .Values.classdefaults}}
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
[mon.{{- $index }}]
mon_addr = {{ $element }}
{{- end }}

View File

@ -0,0 +1,88 @@
{{/*
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- $defaults := .Values.classdefaults }}
{{- $cephfs_provisioner_storage_init := .Values.images.tags.cephfs_provisioner_storage_init }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-config-file
namespace: {{ $defaults.cephFSNamespace }}
data:
ceph.conf: |
{{ tuple "conf/_ceph-conf.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cephfs-storage-init
namespace: {{ $defaults.cephFSNamespace }}
data:
storage-init.sh: |
{{ tuple "bin/_storage_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: cephfs-storage-init
namespace: {{ $defaults.cephFSNamespace }}
spec:
template:
spec:
serviceAccountName: {{ $defaults.rbacConfigName }}
volumes:
- name: cephfs-storage-init
configMap:
name: cephfs-storage-init
defaultMode: 0555
- name: ceph-config
configMap:
name: ceph-config-file
defaultMode: 0555
containers:
{{- range $classConfig := .Values.classes }}
- name: storage-init-{{- $classConfig.name }}
image: {{ $cephfs_provisioner_storage_init | quote }}
command: ["/bin/bash", "/tmp/storage-init.sh"]
env:
- name: NAMESPACE
value: {{ $defaults.cephFSNamespace }}
- name: ADDITIONAL_NAMESPACES
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
- name: CEPH_USER_SECRET
value: {{ $defaults.adminSecretName }}
- name: USER_ID
value: {{ $classConfig.userId }}
- name: DATA_POOL_NAME
value: {{ $classConfig.data_pool_name }}
- name: METADATA_POOL_NAME
value: {{ $classConfig.metadata_pool_name }}
- name: FS_NAME
value: {{ $classConfig.fs_name }}
- name: CHUNK_SIZE
value: {{ $classConfig.chunk_size | quote }}
- name: POOL_REPLICATION
value: {{ $classConfig.replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $classConfig.crush_rule_name | quote }}
volumeMounts:
- name: cephfs-storage-init
mountPath: /tmp/storage-init.sh
subPath: storage-init.sh
readOnly: true
- name: ceph-config
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
{{- end }}
restartPolicy: Never
backoffLimit: 4

View File

@ -0,0 +1,67 @@
{{/*
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- $defaults := .Values.classdefaults }}
{{- $cephfs_provisioner_image := .Values.images.tags.cephfs_provisioner }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: {{ $defaults.provisionerConfigName }}
template:
metadata:
labels:
app: {{ $defaults.provisionerConfigName }}
spec:
{{- if (.Values.global.rbac) or (.Values.global.reuseRbac)}}
serviceAccount: {{ .Values.rbac.serviceAccount }}
{{- end }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ .Values.global.name }}
topologyKey: kubernetes.io/hostname
containers:
- name: {{ $defaults.provisionerConfigName }}
image: {{ $cephfs_provisioner_image | quote }}
env:
- name: PROVISIONER_NAME
value: {{ $defaults.provisionerName }}
- name: PROVISIONER_SECRET_NAMESPACE
value: {{ $defaults.cephFSNamespace }}
command:
- "/usr/local/bin/{{ $defaults.provisionerConfigName }}"
args:
- "-id={{ $defaults.provisionerConfigName }}-1"
serviceAccount: {{ $defaults.provisionerConfigName }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
{{- end}}
{{- if .Values.global.resources }}
resources:
{{ .Values.global.resources | toYaml | trim | indent 8 }}
{{- end }}

View File

@ -0,0 +1,149 @@
{{/*
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- $defaults := .Values.classdefaults }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $defaults.rbacConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $defaults.rbacConfigName }}
subjects:
- kind: ServiceAccount
name: {{ $defaults.rbacConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
roleRef:
kind: ClusterRole
name: {{ $defaults.rbacConfigName }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $defaults.rbacConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "create", "list", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $defaults.rbacConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ $defaults.rbacConfigName }}
subjects:
- kind: ServiceAccount
name: {{ $defaults.rbacConfigName }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $defaults.rbacConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $defaults.provisionerConfigName }}
subjects:
- kind: ServiceAccount
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
roleRef:
kind: ClusterRole
name: {{ $defaults.provisionerConfigName }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ $defaults.provisionerConfigName }}
subjects:
- kind: ServiceAccount
name: {{ $defaults.provisionerConfigName }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $defaults.provisionerConfigName }}
namespace: {{ $defaults.cephFSNamespace }}
imagePullSecrets:
- name: default-registry-key

View File

@ -0,0 +1,30 @@
{{/*
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{ $defaults := .Values.classdefaults }}
{{ $provisioner := .Values.global.provisioner_name }}
{{ $defaultSC := .Values.global.defaultStorageClass }}
{{- range $classConfig := .Values.classes }}
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
{{- if eq $defaultSC $classConfig.name}}
annotations:
"storageclass.kubernetes.io/is-default-class": "true"
{{- end }}
name: {{ $classConfig.name }}
provisioner: {{ $provisioner }}
parameters:
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
adminId: {{ or $classConfig.adminId $defaults.adminId }}
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
adminSecretNamespace: {{ or $classConfig.adminSecretNamespace $defaults.adminSecretNamespace }}
claimRoot: {{ $classConfig.claim_root }}
---
{{- end }}

View File

@ -0,0 +1,142 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Global options.
# Defaults should be fine in most cases.
global:
#
# Defines the application name of the provisioner.
#
name: "cephfs-provisioner"
#
# Defines the name of the provisioner associated with a set of storage classes
#
provisioner_name: "ceph.com/cephfs"
#
# If configured, tolerations will add a toleration field to the Pod.
#
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# Example:
# [
# {
# "key": "node-role.kubernetes.io/master",
# "operator": "Exists"
# }
# ]
#
tolerations: []
# If configured, resources will set the requests/limits field to the Pod.
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
# Example:
# {
# "limits": {
# "memory": "200Mi"
# },
# "requests": {
# "cpu": "100m",
# "memory": "200Mi"
# }
# }
resources: {}
#
# Number of replicas to start when configured as deployment
#
replicas: 1
#
# Node Selector
#
nodeSelector: { node-role.kubernetes.io/master: "" }
#
# RBAC options.
# Defaults should be fine in most cases.
rbac:
#
# Cluster Role name
#
clusterRole: cephfs-provisioner
#
# Cluster Role Binding name
#
clusterRoleBinding: cephfs-provisioner
#
# Role name
#
role: cephfs-provisioner
#
# Role Binding name
#
roleBinding: cephfs-provisioner
#
# Defines a name of the service account which Provisioner will use to communicate with API server.
#
serviceAccount: cephfs-provisioner
#
# Configure storage classes.
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
# No need to add them to each class.
#
classdefaults:
# Ceph admin account
adminId: admin
# K8 secret name for the admin context
adminSecretName: ceph-secret-admin
adminSecretNamespace: kube-system
cephFSNamespace: kube-system
# Define ip addresses of Ceph Monitors
monitors:
- 192.168.204.2:6789
provisionerConfigName: cephfs-provisioner
provisionerName: ceph.com/cephfs
rbacConfigName: cephfs-provisioner-keyring
# Configure storage classes.
# This section should be tailored to your setup. It allows you to define multiple storage
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
classes:
- name: fast-cephfs # Name of storage class.
# Ceph pools name
data_pool_name: kube-cephfs-data
metadata_pool_name: kube-cephfs-metadata
# CephFS name
fs_name: kube-cephfs
# Ceph user name to access this pool
userId: ceph-pool-kube-cephfs-data
# K8 secret name with key for accessing the Ceph pool
userSecretName: ceph-pool-kube-cephfs-data
# Pool replication
replication: 1
# Pool crush rule name
crush_rule_name: storage_tier_ruleset
# Pool chunk size / PG_NUM
chunk_size: 64
# Additional namespace to allow storage class access (other than where
# installed)
claim_root: "/pvc-volumes"
additionalNamespaces:
- default
- kube-public
# Defines:
# - Provisioner's image name including container registry.
# - CEPH helper image
#
images:
tags:
cephfs_provisioner: quay.io/external_storage/cephfs-provisioner:v2.1.0-k8s1.11
cephfs_provisioner_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
manifests:
configmap_bin: true

View File

@ -45,6 +45,35 @@ data:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-cephfs-provisioner
data:
chart_name: cephfs-provisioner
release: cephfs-provisioner
namespace: kube-system
wait:
timeout: 1800
labels:
app: cephfs-provisioner
install:
no_hooks: false
upgrade:
no_hooks: false
pre:
delete:
- type: job
labels:
app: cephfs-provisioner
source:
type: tar
location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz
subpath: cephfs-provisioner
reference: master
dependencies:
- helm-toolkit
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: kube-system-ceph-pools-audit
@ -83,6 +112,7 @@ data:
chart_group:
- kube-system-rbd-provisioner
- kube-system-ceph-pools-audit
- kube-system-cephfs-provisioner
---
schema: armada/Manifest/v1
metadata: