b867289ad2
The gitea service needs an HA shared filesystem, which is provided by cephfs and managed by rook.io. It also needs a database service, which is provided by percona-xtradb-cluster. Change-Id: Ie019c2e24c3780cec2468a00987dba4ac34ed570
47 lines
1.3 KiB
YAML
47 lines
1.3 KiB
YAML
apiVersion: ceph.rook.io/v1
|
|
kind: CephFilesystem
|
|
metadata:
|
|
name: myfs
|
|
namespace: rook-ceph
|
|
spec:
|
|
# The metadata pool spec
|
|
metadataPool:
|
|
replicated:
|
|
# Increase the replication size if you have more than one osd
|
|
size: 3
|
|
# The list of data pool specs
|
|
dataPools:
|
|
- failureDomain: osd
|
|
replicated:
|
|
size: 3
|
|
# The metadata service (mds) configuration
|
|
metadataServer:
|
|
# The number of active MDS instances
|
|
activeCount: 1
|
|
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
|
|
# If false, standbys will be available, but will not have a warm cache.
|
|
activeStandby: true
|
|
# The affinity rules to apply to the mds deployment
|
|
placement:
|
|
# nodeAffinity:
|
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
# nodeSelectorTerms:
|
|
# - matchExpressions:
|
|
# - key: role
|
|
# operator: In
|
|
# values:
|
|
# - mds-node
|
|
# tolerations:
|
|
# - key: mds-node
|
|
# operator: Exists
|
|
# podAffinity:
|
|
# podAntiAffinity:
|
|
resources:
|
|
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
|
|
# limits:
|
|
# cpu: "500m"
|
|
# memory: "1024Mi"
|
|
# requests:
|
|
# cpu: "500m"
|
|
# memory: "1024Mi"
|