Add RadosGateway support to the rook-ceph cluster
This is to support RadosGateway functionality provided by the upstream Rook. The patchset contains: * RGW deployment module * Storage class needed for RGW related operations * Object Bucket claim * And supporting files for daily operations on Rook-based RGW service Change-Id: I16c9324eefa74dc6fbe3294334083e5a7f387b54
This commit is contained in:
parent
433a38cf0b
commit
a97dcad92f
|
@ -19,4 +19,4 @@ data:
|
||||||
[mon]
|
[mon]
|
||||||
auth_allow_insecure_global_id_reclaim = false
|
auth_allow_insecure_global_id_reclaim = false
|
||||||
# [osd]
|
# [osd]
|
||||||
# [rgw]
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ resources:
|
||||||
- ../../function/rook-cluster/cephfs
|
- ../../function/rook-cluster/cephfs
|
||||||
- ../../function/rook-cluster/dashboard/http
|
- ../../function/rook-cluster/dashboard/http
|
||||||
- ../../function/rook-cluster/storageclasses
|
- ../../function/rook-cluster/storageclasses
|
||||||
|
- ../../function/rook-cluster/objectstore
|
||||||
# Resource customizations
|
# Resource customizations
|
||||||
patchesJSON6902:
|
patchesJSON6902:
|
||||||
- target:
|
- target:
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: kpt.dev/v1alpha1
|
||||||
|
kind: Kptfile
|
||||||
|
metadata:
|
||||||
|
name: upstream
|
||||||
|
upstream:
|
||||||
|
type: git
|
||||||
|
git:
|
||||||
|
commit: 69591248f69e23964734f0192944ef2442bc7885
|
||||||
|
repo: https://github.com/rook/rook
|
||||||
|
directory: cluster/examples/kubernetes/ceph/csi/cephfs
|
||||||
|
ref: v1.6.3
|
|
@ -0,0 +1,135 @@
|
||||||
|
#################################################################################################################
|
||||||
|
# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with
|
||||||
|
# OSDs are required in this example.
|
||||||
|
# kubectl create -f object.yaml
|
||||||
|
#################################################################################################################
|
||||||
|
|
||||||
|
apiVersion: ceph.rook.io/v1
|
||||||
|
kind: CephObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
namespace: rook-ceph # namespace:cluster
|
||||||
|
spec:
|
||||||
|
# The pool spec used to create the metadata pools. Must use replication.
|
||||||
|
metadataPool:
|
||||||
|
failureDomain: host
|
||||||
|
replicated:
|
||||||
|
size: 3
|
||||||
|
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
|
||||||
|
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
|
||||||
|
requireSafeReplicaSize: true
|
||||||
|
parameters:
|
||||||
|
# Inline compression mode for the data pool
|
||||||
|
# Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression
|
||||||
|
compression_mode: none
|
||||||
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
||||||
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
||||||
|
#target_size_ratio: ".5"
|
||||||
|
# The pool spec used to create the data pool. Can use replication or erasure coding.
|
||||||
|
dataPool:
|
||||||
|
failureDomain: host
|
||||||
|
replicated:
|
||||||
|
size: 3
|
||||||
|
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
|
||||||
|
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
|
||||||
|
requireSafeReplicaSize: true
|
||||||
|
parameters:
|
||||||
|
# Inline compression mode for the data pool
|
||||||
|
# Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression
|
||||||
|
compression_mode: none
|
||||||
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
||||||
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
||||||
|
#target_size_ratio: ".5"
|
||||||
|
# Whether to preserve metadata and data pools on object store deletion
|
||||||
|
preservePoolsOnDelete: false
|
||||||
|
# The gateway service configuration
|
||||||
|
gateway:
|
||||||
|
# A reference to the secret in the rook namespace where the ssl certificate is stored
|
||||||
|
# sslCertificateRef:
|
||||||
|
# The port that RGW pods will listen on (http)
|
||||||
|
port: 80
|
||||||
|
# The port that RGW pods will listen on (https). An ssl certificate is required.
|
||||||
|
# securePort: 443
|
||||||
|
# The number of pods in the rgw deployment
|
||||||
|
instances: 1
|
||||||
|
# The affinity rules to apply to the rgw deployment.
|
||||||
|
placement:
|
||||||
|
podAntiAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-rgw
|
||||||
|
# topologyKey: */zone can be used to spread RGW across different AZ
|
||||||
|
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
|
||||||
|
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
# A key/value list of annotations
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: role
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - rgw-node
|
||||||
|
# topologySpreadConstraints:
|
||||||
|
# tolerations:
|
||||||
|
# - key: rgw-node
|
||||||
|
# operator: Exists
|
||||||
|
# podAffinity:
|
||||||
|
# podAntiAffinity:
|
||||||
|
# A key/value list of annotations
|
||||||
|
annotations:
|
||||||
|
# key: value
|
||||||
|
# A key/value list of labels
|
||||||
|
labels:
|
||||||
|
# key: value
|
||||||
|
resources:
|
||||||
|
# The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory
|
||||||
|
# limits:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "1024Mi"
|
||||||
|
# requests:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "1024Mi"
|
||||||
|
# priorityClassName: my-priority-class
|
||||||
|
#zone:
|
||||||
|
#name: zone-a
|
||||||
|
# service endpoint healthcheck
|
||||||
|
healthCheck:
|
||||||
|
bucket:
|
||||||
|
disabled: false
|
||||||
|
interval: 60s
|
||||||
|
# Configure the pod liveness probe for the rgw daemon
|
||||||
|
livenessProbe:
|
||||||
|
disabled: false
|
||||||
|
# security oriented settings
|
||||||
|
# security:
|
||||||
|
# To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file
|
||||||
|
# kms:
|
||||||
|
# # name of the config map containing all the kms connection details
|
||||||
|
# connectionDetails:
|
||||||
|
# KMS_PROVIDER: "vault"
|
||||||
|
# VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200
|
||||||
|
# VAULT_BACKEND_PATH: "rook"
|
||||||
|
# VAULT_SECRET_ENGINE: "kv"
|
||||||
|
# VAULT_BACKEND: v2
|
||||||
|
# # name of the secret containing the kms authentication token
|
||||||
|
# tokenSecretName: rook-vault-token
|
||||||
|
# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION
|
||||||
|
# # Also, do not forget to replace both:
|
||||||
|
# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use
|
||||||
|
# # * VAULT_ADDR_CHANGE_ME: with the Vault address
|
||||||
|
# ---
|
||||||
|
# apiVersion: v1
|
||||||
|
# kind: Secret
|
||||||
|
# metadata:
|
||||||
|
# name: rook-vault-token
|
||||||
|
# namespace: rook-ceph # namespace:cluster
|
||||||
|
# data:
|
||||||
|
# token: ROOK_TOKEN_CHANGE_ME
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- base/upstream/object.yaml
|
||||||
|
patches:
|
||||||
|
- path: patches/labels-and-resources.yaml
|
||||||
|
patchesJSON6902:
|
||||||
|
- target:
|
||||||
|
kind: CephObjectStore
|
||||||
|
name: my-store
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: rgw
|
|
@ -0,0 +1,22 @@
|
||||||
|
apiVersion: ceph.rook.io/v1
|
||||||
|
kind: CephObjectStore
|
||||||
|
metadata:
|
||||||
|
name: my-store
|
||||||
|
namespace: rook-ceph # namespace:cluster
|
||||||
|
spec:
|
||||||
|
gateway:
|
||||||
|
# A key/value list of annotations
|
||||||
|
annotations:
|
||||||
|
backend: beast
|
||||||
|
labels:
|
||||||
|
application: ceph
|
||||||
|
component: rgw
|
||||||
|
resources:
|
||||||
|
# Resource requests and limits are set here. RGW pod(s) require one CPU core and 1 GiB of memory
|
||||||
|
# to be scheduled on a node. Once scheduled the pod(s) are limited to two CPU cores and 4 GiB of memory.
|
||||||
|
limits:
|
||||||
|
cpu: "2000m"
|
||||||
|
memory: "4096Mi"
|
||||||
|
requests:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "1024Mi"
|
|
@ -1,2 +1,3 @@
|
||||||
resources:
|
resources:
|
||||||
- rbd
|
- rbd
|
||||||
|
- rgw-buckets
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
resources:
|
||||||
|
- upstream/object-bucket-claim-delete.yaml
|
||||||
|
patchesJSON6902:
|
||||||
|
- target:
|
||||||
|
kind: ObjectBucketClaim
|
||||||
|
name: ceph-delete-bucket
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: object-bucket-claim
|
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: kpt.dev/v1alpha1
|
||||||
|
kind: Kptfile
|
||||||
|
metadata:
|
||||||
|
name: upstream
|
||||||
|
upstream:
|
||||||
|
type: git
|
||||||
|
git:
|
||||||
|
commit: 69591248f69e23964734f0192944ef2442bc7885
|
||||||
|
repo: https://github.com/rook/rook
|
||||||
|
directory: cluster/examples/kubernetes/ceph/csi/cephfs
|
||||||
|
ref: v1.6.3
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: objectbucket.io/v1alpha1
|
||||||
|
kind: ObjectBucketClaim
|
||||||
|
metadata:
|
||||||
|
name: ceph-delete-bucket
|
||||||
|
spec:
|
||||||
|
# To create a new bucket specify either `bucketName` or
|
||||||
|
# `generateBucketName` here. Both cannot be used. To access
|
||||||
|
# an existing bucket the bucket name needs to be defined in
|
||||||
|
# the StorageClass referenced here, and both `bucketName` and
|
||||||
|
# `generateBucketName` must be omitted in the OBC.
|
||||||
|
#bucketName:
|
||||||
|
generateBucketName: ceph-bkt
|
||||||
|
storageClassName: rook-ceph-delete-bucket
|
||||||
|
additionalConfig:
|
||||||
|
# To set for quota for OBC
|
||||||
|
#maxObjects: "1000"
|
||||||
|
#maxSize: "2G"
|
|
@ -0,0 +1,10 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- ../base
|
||||||
|
# This should be changed to match current name and prefix
|
||||||
|
# after first stage of testing - vs422h
|
||||||
|
namePrefix: bucket-
|
||||||
|
|
||||||
|
patchesStrategicMerge:
|
||||||
|
- obc.yaml
|
|
@ -0,0 +1,18 @@
|
||||||
|
apiVersion: objectbucket.io/v1alpha1
|
||||||
|
kind: ObjectBucketClaim
|
||||||
|
metadata:
|
||||||
|
name: object-bucket-claim
|
||||||
|
spec:
|
||||||
|
# To create a new bucket specify either `bucketName` or
|
||||||
|
# `generateBucketName` here. Both cannot be used. To access
|
||||||
|
# an existing bucket the bucket name needs to be defined in
|
||||||
|
# the StorageClass referenced here, and both `bucketName` and
|
||||||
|
# `generateBucketName` must be omitted in the OBC.
|
||||||
|
|
||||||
|
# This should be changed to match current name and prefix
|
||||||
|
# after first stage of testing - vs422h
|
||||||
|
bucketName: bucket
|
||||||
|
storageClassName: object-storage-sc
|
||||||
|
additionalConfig:
|
||||||
|
maxObjects: "1000"
|
||||||
|
maxSize: "2G"
|
|
@ -0,0 +1,6 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- base
|
||||||
|
- bucket
|
|
@ -1,3 +1,4 @@
|
||||||
resources:
|
resources:
|
||||||
- block
|
- block
|
||||||
- file
|
- file
|
||||||
|
- object
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
resources:
|
||||||
|
- upstream/storageclass-bucket-delete.yaml
|
||||||
|
patchesJSON6902:
|
||||||
|
- target:
|
||||||
|
kind: StorageClass
|
||||||
|
name: rook-ceph-delete-bucket
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: object-storage-sc
|
||||||
|
patchesStrategicMerge:
|
||||||
|
- patches/rook-ceph-object-sc.yaml
|
|
@ -0,0 +1,13 @@
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: rook-ceph-delete-bucket
|
||||||
|
parameters:
|
||||||
|
objectStoreName: rgw
|
||||||
|
objectStoreNamespace: rook-ceph # namespace:cluster
|
||||||
|
region: us-east-1
|
||||||
|
# To accommodate brownfield cases reference the existing bucket name here instead
|
||||||
|
# of in the ObjectBucketClaim (OBC). In this case the provisioner will grant
|
||||||
|
# access to the bucket by creating a new user, attaching it to the bucket, and
|
||||||
|
# providing the credentials via a Secret in the namespace of the requesting OBC.
|
||||||
|
#bucketName:
|
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: kpt.dev/v1alpha1
|
||||||
|
kind: Kptfile
|
||||||
|
metadata:
|
||||||
|
name: upstream
|
||||||
|
upstream:
|
||||||
|
type: git
|
||||||
|
git:
|
||||||
|
commit: 69591248f69e23964734f0192944ef2442bc7885
|
||||||
|
repo: https://github.com/rook/rook
|
||||||
|
directory: cluster/examples/kubernetes/ceph/csi/cephfs
|
||||||
|
ref: v1.6.3
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: rook-ceph-delete-bucket
|
||||||
|
provisioner: rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster
|
||||||
|
# set the reclaim policy to delete the bucket and all objects
|
||||||
|
# when its OBC is deleted.
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
parameters:
|
||||||
|
objectStoreName: my-store
|
||||||
|
objectStoreNamespace: rook-ceph # namespace:cluster
|
||||||
|
region: us-east-1
|
||||||
|
# To accommodate brownfield cases reference the existing bucket name here instead
|
||||||
|
# of in the ObjectBucketClaim (OBC). In this case the provisioner will grant
|
||||||
|
# access to the bucket by creating a new user, attaching it to the bucket, and
|
||||||
|
# providing the credentials via a Secret in the namespace of the requesting OBC.
|
||||||
|
#bucketName:
|
Loading…
Reference in New Issue