Merge "Use upstream rook yamls as base"

This commit is contained in:
Zuul 2021-06-11 16:52:38 +00:00 committed by Gerrit Code Review
commit b20777dd7d
27 changed files with 535 additions and 177 deletions

View File

@ -0,0 +1,19 @@
In-place edits to the local copies of upstream Rook Custom Resource
examples are not recommended. The upstream examples can be considered
an immutable starting point.
Changes to the upstream examples should be made via Kustomize.
Rook Custom Resource Examples:
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml
Local: cluster.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/toolbox.yaml
Local: toolbox.yaml
Tag: v1.6.3
Kustomize Doc:
https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization

View File

@ -1,9 +1,9 @@
# Use the config section below to create your custom
# ceph.conf file
# Refer to the documentation at:
# https://rook.io/docs/rook/v1.5/ceph-advanced-configuration.html#custom-cephconf-settings
# Be aware - the rook operator doesn't perform any validations and syntax check against
# the configuration below. Even a typo in this file can cause the entire cluster failure.
# https://rook.io/docs/rook/v1.6/ceph-advanced-configuration.html#custom-cephconf-settings
# Be aware - the rook operator doesn't perform any validations or syntax check against
# the configuration below. Even a typo in this file can cause an entire cluster failure.
kind: ConfigMap
apiVersion: v1
metadata:

View File

@ -1,77 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: ceph
namespace: rook-ceph
spec:
dataDirHostPath: /var/lib/rook
cephVersion:
image: ceph/ceph:v15.2.11
#allowUnsupported: true
mon:
count: 3
allowMultiplePerNode: false
mgr:
count: 1
modules:
- name: pg_autoscaler
enabled: true
dashboard:
enabled: true
# If you are going to use the dashboard together with ingress-controller,
# make sure it is deployed.
ssl: true
crashCollector:
disable: false
network:
# Instead of 'host' you can enable the 'multus' network provider.
# However, the Multus network is in the EXPERIMENTAL stage.
provider: host
storage:
# Using settings below is not recommended for the production environment
useAllNodes: true # Recommended setting is `false`
useAllDevices: true # Recommended setting is `false`
# # To gain more control over you deployment, you should uncomment settings
# # listed below and setup your storage layout per node.
# # Please refer to the official rook documentation
# nodes:
# - name: changemes02
# devices:
# # You can use a list of devices (by path)
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:3:0
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:4:0
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:5:0
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:6:0
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:7:0
# # - name: /dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:8:0
# - name: changemes04
# # Instead of enlisting available devices you can use regex
# devicePathFilter: "^/dev/disk/by-path/pci-0000:18:00.0-scsi-0:2:[3-8]:0"
# - name: changemes05
# devices:
# # Or you can use drive names (directly)
# # - name: /dev/sdc
# # - name: /dev/sdd
# # - name: /dev/sde
# # - name: /dev/sdf
# # - name: /dev/sdg
# # - name: /dev/sdh
# - name: changemes06
# # Or via regex
# deviceFilter: "^/dev/sd[c-h]"
# Also you can configure each device and/or each node. Please refer to the official rook
# documentation for the branch 1.5.x
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
---

View File

@ -0,0 +1,267 @@
#################################################################################################################
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
# in this example. See the documentation for more details on storage settings available.
# For example, to create the cluster:
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# kubectl create -f cluster.yaml
#################################################################################################################
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph # namespace:cluster
spec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v13 is mimic, v14 is nautilus, and v15 is octopus.
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.11-20200419
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: ceph/ceph:v15.2.11
# Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported.
# Future versions such as `pacific` would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
mon:
# Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 1
modules:
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
# are already enabled by other settings in the cluster CR.
- name: pg_autoscaler
enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# serve the dashboard using SSL
ssl: true
# enable prometheus alerting for cluster
monitoring:
# requires Prometheus to be pre-installed
enabled: false
# namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
# Recommended:
# If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespace: rook-ceph
network:
# enable host networking
#provider: host
# EXPERIMENTAL: enable the Multus network provider
#provider: multus
#selectors:
# The selector keys are required to be `public` and `cluster`.
# Based on the configuration, the operator will do the following:
# 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
# 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
#
# In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
#
#public: public-conf --> NetworkAttachmentDefinition object name in Multus
#cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
# Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
#ipFamily: "IPv6"
# Ceph daemons to listen on both IPv4 and Ipv6 networks
#dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
#daysToRetain: 30
# enable log collector, daemons will log on files and rotate
# logCollector:
# enabled: true
# periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# The above placement information can also be specified for mon, osd, and mgr components
# mon:
# Monitor deployments may contain an anti-affinity rule for avoiding monitor
# collocation on the same node. This is a required rule when host network is used
# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# preferred rule with weight: 50.
# osd:
# mgr:
# cleanup:
annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# These labels can be passed as LabelSelector to Prometheus
# monitoring:
resources:
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# The above example requests/limits can also be added to the other components
# mon:
# osd:
# prepareosd:
# mgr-sidecar:
# crashcollector:
# logcollector:
# cleanup:
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priorityClassNames:
# all: rook-ceph-default-priority-class
# mon: rook-ceph-mon-priority-class
# osd: rook-ceph-osd-priority-class
# mgr: rook-ceph-mgr-priority-class
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
#deviceFilter:
config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
# Only available on OpenShift.
manageMachineDisruptionBudgets: false
# Namespace in which to watch for the MachineDisruptionBudgets.
machineDisruptionBudgetNamespace: openshift-machine-api
# healthChecks
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe, it works for all mon,mgr,osd daemons
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false

View File

@ -3,8 +3,8 @@ kind: Kustomization
namespace: rook-ceph
resources:
- ceph-conf.yaml
- cephcluster.yaml
- rook-ceph-tools.yaml
- cluster.yaml
- toolbox.yaml
# Below is the functions section. You are free to comment out or in oppsite to remove comments
# enabling or disabling any function in the list.
# All functions are independent, and can be deployed simultaneously at any moment.
@ -15,12 +15,14 @@ resources:
patchesJSON6902:
- target:
kind: CephCluster
name: ceph
name: rook-ceph
patch: |-
- op: replace
path: /metadata/name
value: ceph
- op: replace
path: /spec/dashboard/enabled
value: true
- op: replace
path: /spec/dashboard/ssl
value: true
@ -31,10 +33,19 @@ patchesJSON6902:
name: rook-ceph-mgr-dashboard
patch: |-
- op: replace
path: "/spec/ports"
path: /spec/ports
value:
- name: dashboard
port: 8443
protocol: TCP
targetPort: 8443
patchesStrategicMerge:
- |-
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
network:
provider: host

View File

@ -1,4 +1,3 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -0,0 +1,31 @@
In-place edits to the local copies of upstream Rook Custom Resource
examples are not recommended. The upstream examples can be considered
an immutable starting point.
Changes to the upstream examples should be made via Kustomize.
Rook Custom Resource Examples:
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/filesystem.yaml
Local: cephfs/base/filesystem.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/dashboard-external-http.yaml
Local: dashboard/base/dashboard-external-http.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/pool.yaml
Local: pools/base/pool.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
Local: storageclasses/block/storageclass.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml
Local: storageclasses/file/storageclass.yaml
Tag: v1.6.3
Kustomize Doc:
https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization

View File

@ -7,7 +7,7 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: cephfs
name: myfs
namespace: rook-ceph # namespace:cluster
spec:
# The metadata pool spec. Must use replication.
@ -18,15 +18,36 @@ spec:
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode: none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
compression_mode:
none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# The list of data pool specs. Can use replication or erasure coding.
dataPools:
- failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode:
none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# Whether to preserve filesystem after CephFilesystem CRD deletion
preserveFilesystemOnDelete: true
# The metadata service (mds) configuration
metadataServer:
# The number of active MDS instances
activeCount: 1
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
# If false, standbys will be available, but will not have a warm cache.
activeStandby: true
# The affinity rules to apply to the mds deployment
placement:
# nodeAffinity:
@ -71,3 +92,14 @@ spec:
# A key/value list of labels
labels:
# key: value
resources:
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# priorityClassName: my-priority-class
mirroring:
enabled: false

View File

@ -1,2 +1,10 @@
resources:
- filesystem.yaml
patchesJSON6902:
- target:
kind: CephFilesystem
name: myfs
patch: |-
- op: replace
path: /metadata/name
value: cephfs

View File

@ -10,12 +10,3 @@ spec:
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
# If false, standbys will be available, but will not have a warm cache.
activeStandby: true
resources:
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# priorityClassName: my-priority-class

View File

@ -1,5 +1,5 @@
resources:
- ./base
patchesStrategicMerge:
- cephfs-mds.yaml
- cephfs-pool.yaml
- cephfs-mds.yaml

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Service
metadata:
name: rook-ceph-mgr-dashboard
name: rook-ceph-mgr-dashboard-external-http
namespace: rook-ceph # namespace:cluster
labels:
app: rook-ceph-mgr
@ -16,4 +16,4 @@ spec:
app: rook-ceph-mgr
rook_cluster: rook-ceph
sessionAffinity: None
type: NodePort
type: NodePort

View File

@ -1,2 +1,10 @@
resources:
- external-dashboard.yaml
- dashboard-external-http.yaml
patchesJSON6902:
- target:
kind: Service
name: rook-ceph-mgr-dashboard-external-http
patch: |-
- op: replace
path: /metadata/name
value: rook-ceph-mgr-dashboard

View File

@ -1,2 +1,2 @@
resources:
- ../base
- ../base

View File

@ -1,2 +1,12 @@
resources:
- pool.yaml
patchesJSON6902:
- target:
kind: CephBlockPool
name: replicapool
patch: |-
- op: replace
path: /metadata/name
value: pool
- op: remove
path: /spec/annotations

View File

@ -7,11 +7,10 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: "pool"
name: replicapool
namespace: rook-ceph # namespace:cluster
spec:
# The failure domain will spread the replicas of the data across different failure zones
# Default value is host. Could be osd or rack, depending on your crushmap
failureDomain: host
# For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy.
replicated:

View File

@ -1,7 +1,7 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: "pool"
name: pool
namespace: rook-ceph # namespace:cluster
spec:
failureDomain: host

View File

@ -1,55 +0,0 @@
##################################################
# Create storaclass for pv/pvc creation
# make sure pool is getting create prior to this
# kubectl create -f storageclass.yaml
##################################################
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: storage-sc
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
clusterID: rook-ceph
# Ceph pool into which the RBD image shall be created
pool: pool1
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# allow pvc resize
allowVolumeExpansion: true
# Delete the rbd volume when a PVC is deleted
reclaimPolicy: Delete

View File

@ -1,5 +1,13 @@
resources:
- block-storageclass.yaml
namePrefix: block-
- storageclass.yaml
patchesJSON6902:
- target:
kind: StorageClass
name: rook-ceph-block
patch: |-
- op: replace
path: /metadata/name
value: block-storage-sc
patchesStrategicMerge:
- patches/delete-pool.yaml
- patches/rook-ceph-block.yaml

View File

@ -0,0 +1,6 @@
$patch: delete
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: replicapool
namespace: rook-ceph

View File

@ -1,7 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: storage-sc
name: rook-ceph-block
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph

View File

@ -0,0 +1,74 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: replicapool
namespace: rook-ceph
spec:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#targetSizeRatio: .5
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-block
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph # namespace:cluster
# If you want to use erasure coded pool with RBD, you need to create
# two pools. one erasure coded and one replicated.
# You need to specify the replicated pool here in the `pool` parameter, it is
# used for the metadata of the images.
# The erasure coded pool must be set as the `dataPool` parameter below.
#dataPool: ec-data-pool
pool: replicapool
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# uncomment the following to use rbd-nbd as mounter on supported nodes
# **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi
# issue that causes the mount to be disconnected. You will need to follow special upgrade steps
# to restart your application pods. Therefore, this option is not recommended.
#mounter: rbd-nbd
allowVolumeExpansion: true
reclaimPolicy: Delete

View File

@ -1,5 +1,18 @@
resources:
- file-storageclass.yaml
namePrefix: cephfs-
- storageclass.yaml
patchesJSON6902:
- target:
kind: StorageClass
name: rook-cephfs
patch: |-
- op: replace
path: /metadata/name
value: cephfs-storage-sc
- op: replace
path: /parameters/fsName
value: cephfs
- op: replace
path: /parameters/pool
value: cephfs-data0
patchesStrategicMerge:
- patches/rook-ceph-cephfs.yaml

View File

@ -1,7 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: storage-sc
name: rook-cephfs
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
clusterID: rook-ceph

View File

@ -1,24 +1,18 @@
##################################################
# Create storaclass for pv/pvc creation
# make sure pool is getting create prior to this
# kubectl create -f storageclass.yaml
##################################################
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: storage-sc
name: rook-cephfs
provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
parameters:
# clusterID is the namespace where operator is deployed.
clusterID: rook-ceph # namespace:cluster
# CephFS filesystem name into which the volume shall be created
fsName: cephfs
fsName: myfs
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: cephfs-data0
pool: myfs-data0
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.

View File

@ -0,0 +1,11 @@
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/common.yaml
Local: common.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/crds.yaml
Local: crds.yaml
Tag: v1.6.3
Upstream: https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/operator.yaml
Local: operator.yaml
Tag: v1.6.3

View File

@ -3,3 +3,12 @@ resources:
- crds.yaml
- common.yaml
- operator.yaml
patchesStrategicMerge:
- |-
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-ceph-operator-config
namespace: rook-ceph
data:
CSI_ENABLE_HOST_NETWORK: "true"