b867289ad2
The gitea service needs an HA shared filesystem, which is provided by cephfs and managed by rook.io. It also needs a database service, which is provided by percona-xtradb-cluster. Change-Id: Ie019c2e24c3780cec2468a00987dba4ac34ed570
475 lines
12 KiB
YAML
475 lines
12 KiB
YAML
apiVersion: v1
|
|
kind: Namespace
|
|
metadata:
|
|
name: rook-ceph-system
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: cephclusters.ceph.rook.io
|
|
spec:
|
|
group: ceph.rook.io
|
|
names:
|
|
kind: CephCluster
|
|
listKind: CephClusterList
|
|
plural: cephclusters
|
|
singular: cephcluster
|
|
scope: Namespaced
|
|
version: v1
|
|
validation:
|
|
openAPIV3Schema:
|
|
properties:
|
|
spec:
|
|
properties:
|
|
cephVersion:
|
|
properties:
|
|
allowUnsupported:
|
|
type: boolean
|
|
image:
|
|
type: string
|
|
name:
|
|
pattern: ^(luminous|mimic|nautilus)$
|
|
type: string
|
|
dashboard:
|
|
properties:
|
|
enabled:
|
|
type: boolean
|
|
urlPrefix:
|
|
type: string
|
|
dataDirHostPath:
|
|
pattern: ^/(\S+)
|
|
type: string
|
|
mon:
|
|
properties:
|
|
allowMultiplePerNode:
|
|
type: boolean
|
|
count:
|
|
maximum: 9
|
|
minimum: 1
|
|
type: integer
|
|
required:
|
|
- count
|
|
network:
|
|
properties:
|
|
hostNetwork:
|
|
type: boolean
|
|
storage:
|
|
properties:
|
|
nodes:
|
|
items: {}
|
|
type: array
|
|
useAllDevices: {}
|
|
useAllNodes:
|
|
type: boolean
|
|
required:
|
|
- mon
|
|
additionalPrinterColumns:
|
|
- name: DataDirHostPath
|
|
type: string
|
|
description: Directory used on the K8s nodes
|
|
JSONPath: .spec.dataDirHostPath
|
|
- name: MonCount
|
|
type: string
|
|
description: Number of MONs
|
|
JSONPath: .spec.mon.count
|
|
- name: Age
|
|
type: date
|
|
JSONPath: .metadata.creationTimestamp
|
|
- name: State
|
|
type: string
|
|
description: Current State
|
|
JSONPath: .status.state
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: cephfilesystems.ceph.rook.io
|
|
spec:
|
|
group: ceph.rook.io
|
|
names:
|
|
kind: CephFilesystem
|
|
listKind: CephFilesystemList
|
|
plural: cephfilesystems
|
|
singular: cephfilesystem
|
|
scope: Namespaced
|
|
version: v1
|
|
additionalPrinterColumns:
|
|
- name: MdsCount
|
|
type: string
|
|
description: Number of MDSs
|
|
JSONPath: .spec.metadataServer.activeCount
|
|
- name: Age
|
|
type: date
|
|
JSONPath: .metadata.creationTimestamp
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: cephobjectstores.ceph.rook.io
|
|
spec:
|
|
group: ceph.rook.io
|
|
names:
|
|
kind: CephObjectStore
|
|
listKind: CephObjectStoreList
|
|
plural: cephobjectstores
|
|
singular: cephobjectstore
|
|
scope: Namespaced
|
|
version: v1
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: cephobjectstoreusers.ceph.rook.io
|
|
spec:
|
|
group: ceph.rook.io
|
|
names:
|
|
kind: CephObjectStoreUser
|
|
listKind: CephObjectStoreUserList
|
|
plural: cephobjectstoreusers
|
|
singular: cephobjectstoreuser
|
|
scope: Namespaced
|
|
version: v1
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: cephblockpools.ceph.rook.io
|
|
spec:
|
|
group: ceph.rook.io
|
|
names:
|
|
kind: CephBlockPool
|
|
listKind: CephBlockPoolList
|
|
plural: cephblockpools
|
|
singular: cephblockpool
|
|
scope: Namespaced
|
|
version: v1
|
|
---
|
|
apiVersion: apiextensions.k8s.io/v1beta1
|
|
kind: CustomResourceDefinition
|
|
metadata:
|
|
name: volumes.rook.io
|
|
spec:
|
|
group: rook.io
|
|
names:
|
|
kind: Volume
|
|
listKind: VolumeList
|
|
plural: volumes
|
|
singular: volume
|
|
shortNames:
|
|
- rv
|
|
scope: Namespaced
|
|
version: v1alpha2
|
|
---
|
|
# The cluster role for managing all the cluster-specific resources in a namespace
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
kind: ClusterRole
|
|
metadata:
|
|
name: rook-ceph-cluster-mgmt
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- secrets
|
|
- pods
|
|
- pods/log
|
|
- services
|
|
- configmaps
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- patch
|
|
- create
|
|
- update
|
|
- delete
|
|
- apiGroups:
|
|
- extensions
|
|
resources:
|
|
- deployments
|
|
- daemonsets
|
|
- replicasets
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- create
|
|
- update
|
|
- delete
|
|
---
|
|
# The role for the operator to manage resources in the system namespace
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
kind: Role
|
|
metadata:
|
|
name: rook-ceph-system
|
|
namespace: rook-ceph-system
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- pods
|
|
- configmaps
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- patch
|
|
- create
|
|
- update
|
|
- delete
|
|
- apiGroups:
|
|
- extensions
|
|
resources:
|
|
- daemonsets
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- create
|
|
- update
|
|
- delete
|
|
---
|
|
# The cluster role for managing the Rook CRDs
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
kind: ClusterRole
|
|
metadata:
|
|
name: rook-ceph-global
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
# Pod access is needed for fencing
|
|
- pods
|
|
# Node access is needed for determining nodes where mons should run
|
|
- nodes
|
|
- nodes/proxy
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- events
|
|
# PVs and PVCs are managed by the Rook provisioner
|
|
- persistentvolumes
|
|
- persistentvolumeclaims
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- patch
|
|
- create
|
|
- update
|
|
- delete
|
|
- apiGroups:
|
|
- storage.k8s.io
|
|
resources:
|
|
- storageclasses
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- batch
|
|
resources:
|
|
- jobs
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- create
|
|
- update
|
|
- delete
|
|
- apiGroups:
|
|
- ceph.rook.io
|
|
resources:
|
|
- "*"
|
|
verbs:
|
|
- "*"
|
|
- apiGroups:
|
|
- rook.io
|
|
resources:
|
|
- "*"
|
|
verbs:
|
|
- "*"
|
|
---
|
|
# Aspects of ceph-mgr that require cluster-wide access
|
|
kind: ClusterRole
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
metadata:
|
|
name: rook-ceph-mgr-cluster
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- configmaps
|
|
- nodes
|
|
- nodes/proxy
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
---
|
|
# The rook system service account used by the operator, agent, and discovery pods
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: rook-ceph-system
|
|
namespace: rook-ceph-system
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
---
|
|
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
|
|
kind: RoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
metadata:
|
|
name: rook-ceph-system
|
|
namespace: rook-ceph-system
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: Role
|
|
name: rook-ceph-system
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: rook-ceph-system
|
|
namespace: rook-ceph-system
|
|
---
|
|
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
|
|
kind: ClusterRoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
metadata:
|
|
name: rook-ceph-global
|
|
namespace: rook-ceph-system
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: ClusterRole
|
|
name: rook-ceph-global
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: rook-ceph-system
|
|
namespace: rook-ceph-system
|
|
---
|
|
# The deployment for the rook operator
|
|
apiVersion: apps/v1beta1
|
|
kind: Deployment
|
|
metadata:
|
|
name: rook-ceph-operator
|
|
namespace: rook-ceph-system
|
|
labels:
|
|
operator: rook
|
|
storage-backend: ceph
|
|
spec:
|
|
replicas: 1
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app: rook-ceph-operator
|
|
spec:
|
|
serviceAccountName: rook-ceph-system
|
|
containers:
|
|
- name: rook-ceph-operator
|
|
image: rook/ceph:v0.9.0
|
|
args: ["ceph", "operator"]
|
|
volumeMounts:
|
|
- mountPath: /var/lib/rook
|
|
name: rook-config
|
|
- mountPath: /etc/ceph
|
|
name: default-config-dir
|
|
env:
|
|
# To disable RBAC, uncomment the following:
|
|
# - name: RBAC_ENABLED
|
|
# value: "false"
|
|
# Rook Agent toleration. Will tolerate all taints with all keys.
|
|
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
|
# - name: AGENT_TOLERATION
|
|
# value: "NoSchedule"
|
|
# (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate
|
|
# - name: AGENT_TOLERATION_KEY
|
|
# value: "<KeyOfTheTaintToTolerate>"
|
|
# (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`.
|
|
# `Any` uses Ceph admin credentials by default/fallback.
|
|
# For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and
|
|
# set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name.
|
|
# to the namespace in which the `mountSecret` Kubernetes secret namespace.
|
|
# - name: AGENT_MOUNT_SECURITY_MODE
|
|
# value: "Any"
|
|
# Set the path where the Rook agent can find the flex volumes
|
|
# - name: FLEXVOLUME_DIR_PATH
|
|
# value: /var/lib/kubelet/volumeplugins
|
|
# Set the path where kernel modules can be found
|
|
# - name: LIB_MODULES_DIR_PATH
|
|
# value: "<PathToLibModules>"
|
|
# Mount any extra directories into the agent container
|
|
# - name: AGENT_MOUNTS
|
|
# value: "rootfs=/:/rootfs,varlibkubelet=/var/lib/kubelet:/var/lib/kubelet"
|
|
# Rook Discover toleration. Will tolerate all taints with all keys.
|
|
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
|
# - name: DISCOVER_TOLERATION
|
|
# value: "NoSchedule"
|
|
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
|
|
# - name: DISCOVER_TOLERATION_KEY
|
|
# value: "<KeyOfTheTaintToTolerate>"
|
|
# Allow rook to create multiple file systems. Note: This is considered
|
|
# an experimental feature in Ceph as described at
|
|
# http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster
|
|
# which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027
|
|
- name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS
|
|
value: "false"
|
|
# The logging level for the operator: INFO | DEBUG
|
|
- name: ROOK_LOG_LEVEL
|
|
value: "INFO"
|
|
# The interval to check if every mon is in the quorum.
|
|
- name: ROOK_MON_HEALTHCHECK_INTERVAL
|
|
value: "45s"
|
|
# The duration to wait before trying to failover or remove/replace the
|
|
# current mon with a new mon (useful for compensating flapping network).
|
|
- name: ROOK_MON_OUT_TIMEOUT
|
|
value: "300s"
|
|
# The duration between discovering devices in the rook-discover daemonset.
|
|
- name: ROOK_DISCOVER_DEVICES_INTERVAL
|
|
value: "60m"
|
|
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
|
|
# This is necessary to workaround the anyuid issues when running on OpenShift.
|
|
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
|
|
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
|
|
value: "false"
|
|
# The name of the node to pass with the downward API
|
|
- name: NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: spec.nodeName
|
|
# The pod name to pass with the downward API
|
|
- name: POD_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.name
|
|
# The pod namespace to pass with the downward API
|
|
- name: POD_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.namespace
|
|
volumes:
|
|
- name: rook-config
|
|
emptyDir: {}
|
|
- name: default-config-dir
|
|
emptyDir: {}
|