Merge "[ceph] Update Rook release and comment out CephFS values"

This commit is contained in:
Zuul
2025-06-18 22:46:13 +00:00
committed by Gerrit Code Review
3 changed files with 80 additions and 76 deletions

View File

@@ -15,7 +15,7 @@
set -xe
# Specify the Rook release tag to use for the Rook operator here
ROOK_RELEASE=v1.16.6
ROOK_RELEASE=v1.17.3
: ${CEPH_OSD_DATA_DEVICE:="/dev/loop100"}
@@ -293,39 +293,41 @@ cephBlockPools:
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: cephfs
namespace: ceph
spec:
metadataPool:
replicated:
size: 1
dataPools:
- failureDomain: host
replicated:
size: 1
name: data
metadataServer:
activeCount: 1
activeStandby: false
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems: []
# Not needed in general for openstack-helm. Uncomment if needed.
# cephFileSystems:
# - name: cephfs
# namespace: ceph
# spec:
# metadataPool:
# replicated:
# size: 1
# dataPools:
# - failureDomain: host
# replicated:
# size: 1
# name: data
# metadataServer:
# activeCount: 1
# activeStandby: false
# priorityClassName: system-cluster-critical
# storageClass:
# enabled: true
# isDefault: false
# name: ceph-filesystem
# pool: data0
# reclaimPolicy: Delete
# allowVolumeExpansion: true
# volumeBindingMode: "Immediate"
# mountOptions: []
# parameters:
# csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
# csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
# csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
# csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/fstype: ext4
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: general

View File

@@ -21,7 +21,7 @@ set -x
# The default values deploy the Rook operator in the rook-ceph namespace and
# the Ceph cluster in the ceph namespace using rook-operator.yaml and
# rook-ceph.yaml in the current directory.
ROOK_RELEASE=${ROOK_RELEASE:-1.16.6}
ROOK_RELEASE=${ROOK_RELEASE:-1.17.3}
CEPH_RELEASE=${CEPH_RELEASE:-19.2.2}
ROOK_CEPH_NAMESPACE=${ROOK_CEPH_NAMESPACE:-rook-ceph}
CEPH_NAMESPACE=${CEPH_NAMESPCE:-ceph}

View File

@@ -14,7 +14,7 @@
set -xe
ROOK_RELEASE=v1.16.6
ROOK_RELEASE=v1.17.3
: ${CEPH_OSD_DATA_DEVICE:="/dev/loop100"}
@@ -537,46 +537,48 @@ cephBlockPools:
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: cephfs
namespace: ceph
spec:
metadataPool:
replicated:
size: 1
dataPools:
- failureDomain: host
replicated:
size: 1
name: data
metadataServer:
activeCount: 1
activeStandby: false
resources:
limits:
cpu: "250m"
memory: "50Mi"
requests:
cpu: "250m"
memory: "10Mi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems: []
# Not needed in general for openstack-helm. Uncomment if needed.
# cephFileSystems:
# - name: cephfs
# namespace: ceph
# spec:
# metadataPool:
# replicated:
# size: 1
# dataPools:
# - failureDomain: host
# replicated:
# size: 1
# name: data
# metadataServer:
# activeCount: 1
# activeStandby: false
# resources:
# limits:
# cpu: "250m"
# memory: "50Mi"
# requests:
# cpu: "250m"
# memory: "10Mi"
# priorityClassName: system-cluster-critical
# storageClass:
# enabled: true
# isDefault: false
# name: ceph-filesystem
# pool: data0
# reclaimPolicy: Delete
# allowVolumeExpansion: true
# volumeBindingMode: "Immediate"
# mountOptions: []
# parameters:
# csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
# csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
# csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
# csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# csi.storage.k8s.io/fstype: ext4
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: general