enable cephfs

Add crd ceph-mds and create storage class for cephfs

Story: 2005527
Task: 41793

Change-Id: I8cd8cc545f4171fa2d730665064ca904eda20541
Signed-off-by: Chen, Haochuan Z <haochuan.z.chen@intel.com>
This commit is contained in:
Chen, Haochuan Z 2021-02-09 11:14:01 +08:00 committed by Austin Sun
parent 5945b9b82a
commit 8fcb259d37
6 changed files with 71 additions and 1 deletions

View File

@ -43,6 +43,7 @@ class RookCephHelm(base.BaseHelm):
overrides = {
common.HELM_NS_STORAGE_PROVISIONER: {
'cluster': self._get_cluster_override(),
'mds': self._get_mds_override(),
'hook': self._get_hook_override(),
}
}
@ -73,6 +74,18 @@ class RookCephHelm(base.BaseHelm):
else:
return 3
def _get_mds_override(self):
if cutils.is_aio_simplex_system(self.dbapi):
replica = 1
else:
replica = 2
mds = {
'replica': replica,
}
return mds
def _get_hook_override(self):
hook = {
'cleanup': {

View File

@ -8,5 +8,5 @@ import setuptools
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
setup_requires=['pbr>=0.5'],
pbr=True)

View File

@ -0,0 +1,27 @@
{{- if .Values.global.cephfs_storage }}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs
provisioner: {{ .Values.cephfsStorage.provisioner_name }}
parameters:
clusterID: {{ .Release.Namespace }}
# CephFS filesystem name into which the volume shall be created
fsName: {{ .Values.cephfsStorage.fs_name }}
#
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: {{ .Values.cephfsStorage.pool_name }}
#
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }}
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
reclaimPolicy: Delete
{{- end }}

View File

@ -12,6 +12,7 @@ global:
configmap_key_init: ceph-key-init-bin
#
provision_storage: true
cephfs_storage: true
job_ceph_mgr_provision: true
job_ceph_mon_audit: false
job_ceph_osd_audit: true
@ -75,6 +76,12 @@ provisionStorage:
chunk_size: 8
cephfsStorage:
provisioner_name: kube-system.cephfs.csi.ceph.com
fs_name: stxfs
pool_name: stxfs-data0
host_provision:
controller_hosts:
- controller-0

View File

@ -104,3 +104,20 @@ spec:
directories:
- path: "{{ .Values.cluster.storage.dataPath }}"
{{ end }}
---
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: {{ .Values.mds.name }}
namespace: {{ .Release.Namespace }}
spec:
metadataPool:
replicated:
size: {{ .Values.mds.replica }}
dataPools:
- replicated:
size: {{ .Values.mds.replica }}
preserveFilesystemOnDelete: true
metadataServer:
activeCount: 1
activeStandby: true

View File

@ -25,6 +25,12 @@ cluster:
# Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
mds:
name: stxfs
replica: 3
toolbox:
image:
prefix: rook