diff --git a/manifests/composite/storage-cluster/ceph-conf.yaml b/manifests/composite/storage-cluster/ceph-conf.yaml index ca668ca53..ba1e375e4 100644 --- a/manifests/composite/storage-cluster/ceph-conf.yaml +++ b/manifests/composite/storage-cluster/ceph-conf.yaml @@ -19,4 +19,4 @@ data: [mon] auth_allow_insecure_global_id_reclaim = false # [osd] - # [rgw] + diff --git a/manifests/composite/storage-cluster/kustomization.yaml b/manifests/composite/storage-cluster/kustomization.yaml index 0b08066d8..e76aa4374 100644 --- a/manifests/composite/storage-cluster/kustomization.yaml +++ b/manifests/composite/storage-cluster/kustomization.yaml @@ -12,6 +12,7 @@ resources: - ../../function/rook-cluster/cephfs - ../../function/rook-cluster/dashboard/http - ../../function/rook-cluster/storageclasses + - ../../function/rook-cluster/objectstore # Resource customizations patchesJSON6902: - target: diff --git a/manifests/function/rook-cluster/objectstore/base/upstream/Kptfile b/manifests/function/rook-cluster/objectstore/base/upstream/Kptfile new file mode 100644 index 000000000..23c54ce9a --- /dev/null +++ b/manifests/function/rook-cluster/objectstore/base/upstream/Kptfile @@ -0,0 +1,11 @@ +apiVersion: kpt.dev/v1alpha1 +kind: Kptfile +metadata: + name: upstream +upstream: + type: git + git: + commit: 69591248f69e23964734f0192944ef2442bc7885 + repo: https://github.com/rook/rook + directory: cluster/examples/kubernetes/ceph/csi/cephfs + ref: v1.6.3 diff --git a/manifests/function/rook-cluster/objectstore/base/upstream/object.yaml b/manifests/function/rook-cluster/objectstore/base/upstream/object.yaml new file mode 100644 index 000000000..430c9c60d --- /dev/null +++ b/manifests/function/rook-cluster/objectstore/base/upstream/object.yaml @@ -0,0 +1,135 @@ +################################################################################################################# +# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with +# OSDs are required in this example. +# kubectl create -f object.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph # namespace:cluster +spec: + # The pool spec used to create the metadata pools. Must use replication. + metadataPool: + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # The pool spec used to create the data pool. Can use replication or erasure coding. + dataPool: + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # Whether to preserve metadata and data pools on object store deletion + preservePoolsOnDelete: false + # The gateway service configuration + gateway: + # A reference to the secret in the rook namespace where the ssl certificate is stored + # sslCertificateRef: + # The port that RGW pods will listen on (http) + port: 80 + # The port that RGW pods will listen on (https). An ssl certificate is required. + # securePort: 443 + # The number of pods in the rgw deployment + instances: 1 + # The affinity rules to apply to the rgw deployment. + placement: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-rgw + # topologyKey: */zone can be used to spread RGW across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: kubernetes.io/hostname + # A key/value list of annotations + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - rgw-node + # topologySpreadConstraints: + # tolerations: + # - key: rgw-node + # operator: Exists + # podAffinity: + # podAntiAffinity: + # A key/value list of annotations + annotations: + # key: value + # A key/value list of labels + labels: + # key: value + resources: + # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # priorityClassName: my-priority-class + #zone: + #name: zone-a + # service endpoint healthcheck + healthCheck: + bucket: + disabled: false + interval: 60s + # Configure the pod liveness probe for the rgw daemon + livenessProbe: + disabled: false + # security oriented settings + # security: + # To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file + # kms: + # # name of the config map containing all the kms connection details + # connectionDetails: + # KMS_PROVIDER: "vault" + # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200 + # VAULT_BACKEND_PATH: "rook" + # VAULT_SECRET_ENGINE: "kv" + # VAULT_BACKEND: v2 + # # name of the secret containing the kms authentication token + # tokenSecretName: rook-vault-token +# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION +# # Also, do not forget to replace both: +# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use +# # * VAULT_ADDR_CHANGE_ME: with the Vault address +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: rook-vault-token +# namespace: rook-ceph # namespace:cluster +# data: +# token: ROOK_TOKEN_CHANGE_ME diff --git a/manifests/function/rook-cluster/objectstore/kustomization.yaml b/manifests/function/rook-cluster/objectstore/kustomization.yaml new file mode 100644 index 000000000..ba298b0ee --- /dev/null +++ b/manifests/function/rook-cluster/objectstore/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - base/upstream/object.yaml +patches: + - path: patches/labels-and-resources.yaml +patchesJSON6902: +- target: + kind: CephObjectStore + name: my-store + patch: |- + - op: replace + path: /metadata/name + value: rgw diff --git a/manifests/function/rook-cluster/objectstore/patches/labels-and-resources.yaml b/manifests/function/rook-cluster/objectstore/patches/labels-and-resources.yaml new file mode 100644 index 000000000..d522784ec --- /dev/null +++ b/manifests/function/rook-cluster/objectstore/patches/labels-and-resources.yaml @@ -0,0 +1,22 @@ +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph # namespace:cluster +spec: + gateway: + # A key/value list of annotations + annotations: + backend: beast + labels: + application: ceph + component: rgw + resources: + # Resource requests and limits are set here. RGW pod(s) require one CPU core and 1 GiB of memory + # to be scheduled on a node. Once scheduled the pod(s) are limited to two CPU cores and 4 GiB of memory. + limits: + cpu: "2000m" + memory: "4096Mi" + requests: + cpu: "500m" + memory: "1024Mi" diff --git a/manifests/function/rook-cluster/pools/kustomization.yaml b/manifests/function/rook-cluster/pools/kustomization.yaml index 813dabdcc..349da0e50 100644 --- a/manifests/function/rook-cluster/pools/kustomization.yaml +++ b/manifests/function/rook-cluster/pools/kustomization.yaml @@ -1,2 +1,3 @@ resources: - rbd +- rgw-buckets diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/base/kustomization.yaml b/manifests/function/rook-cluster/pools/rgw-buckets/base/kustomization.yaml new file mode 100644 index 000000000..3aad737f2 --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/base/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- upstream/object-bucket-claim-delete.yaml +patchesJSON6902: +- target: + kind: ObjectBucketClaim + name: ceph-delete-bucket + patch: |- + - op: replace + path: /metadata/name + value: object-bucket-claim diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/Kptfile b/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/Kptfile new file mode 100644 index 000000000..23c54ce9a --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/Kptfile @@ -0,0 +1,11 @@ +apiVersion: kpt.dev/v1alpha1 +kind: Kptfile +metadata: + name: upstream +upstream: + type: git + git: + commit: 69591248f69e23964734f0192944ef2442bc7885 + repo: https://github.com/rook/rook + directory: cluster/examples/kubernetes/ceph/csi/cephfs + ref: v1.6.3 diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/object-bucket-claim-delete.yaml b/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/object-bucket-claim-delete.yaml new file mode 100644 index 000000000..1b58b8f0f --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/base/upstream/object-bucket-claim-delete.yaml @@ -0,0 +1,17 @@ +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: ceph-delete-bucket +spec: + # To create a new bucket specify either `bucketName` or + # `generateBucketName` here. Both cannot be used. To access + # an existing bucket the bucket name needs to be defined in + # the StorageClass referenced here, and both `bucketName` and + # `generateBucketName` must be omitted in the OBC. + #bucketName: + generateBucketName: ceph-bkt + storageClassName: rook-ceph-delete-bucket + additionalConfig: + # To set for quota for OBC + #maxObjects: "1000" + #maxSize: "2G" diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/bucket/kustomization.yaml b/manifests/function/rook-cluster/pools/rgw-buckets/bucket/kustomization.yaml new file mode 100644 index 000000000..cceb4aef4 --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/bucket/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../base +# This should be changed to match current name and prefix +# after first stage of testing - vs422h +namePrefix: bucket- + +patchesStrategicMerge: + - obc.yaml diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/bucket/obc.yaml b/manifests/function/rook-cluster/pools/rgw-buckets/bucket/obc.yaml new file mode 100644 index 000000000..741302c27 --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/bucket/obc.yaml @@ -0,0 +1,18 @@ +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: object-bucket-claim +spec: + # To create a new bucket specify either `bucketName` or + # `generateBucketName` here. Both cannot be used. To access + # an existing bucket the bucket name needs to be defined in + # the StorageClass referenced here, and both `bucketName` and + # `generateBucketName` must be omitted in the OBC. + + # This should be changed to match current name and prefix + # after first stage of testing - vs422h + bucketName: bucket + storageClassName: object-storage-sc + additionalConfig: + maxObjects: "1000" + maxSize: "2G" diff --git a/manifests/function/rook-cluster/pools/rgw-buckets/kustomization.yaml b/manifests/function/rook-cluster/pools/rgw-buckets/kustomization.yaml new file mode 100644 index 000000000..5ffbd84e7 --- /dev/null +++ b/manifests/function/rook-cluster/pools/rgw-buckets/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - base + - bucket diff --git a/manifests/function/rook-cluster/storageclasses/kustomization.yaml b/manifests/function/rook-cluster/storageclasses/kustomization.yaml index 72f71918b..6e37cd421 100644 --- a/manifests/function/rook-cluster/storageclasses/kustomization.yaml +++ b/manifests/function/rook-cluster/storageclasses/kustomization.yaml @@ -1,3 +1,4 @@ resources: - block - file +- object diff --git a/manifests/function/rook-cluster/storageclasses/object/kustomization.yaml b/manifests/function/rook-cluster/storageclasses/object/kustomization.yaml new file mode 100644 index 000000000..3dcb1fe67 --- /dev/null +++ b/manifests/function/rook-cluster/storageclasses/object/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- upstream/storageclass-bucket-delete.yaml +patchesJSON6902: +- target: + kind: StorageClass + name: rook-ceph-delete-bucket + patch: |- + - op: replace + path: /metadata/name + value: object-storage-sc +patchesStrategicMerge: +- patches/rook-ceph-object-sc.yaml diff --git a/manifests/function/rook-cluster/storageclasses/object/patches/rook-ceph-object-sc.yaml b/manifests/function/rook-cluster/storageclasses/object/patches/rook-ceph-object-sc.yaml new file mode 100644 index 000000000..86cad692b --- /dev/null +++ b/manifests/function/rook-cluster/storageclasses/object/patches/rook-ceph-object-sc.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-delete-bucket +parameters: + objectStoreName: rgw + objectStoreNamespace: rook-ceph # namespace:cluster + region: us-east-1 + # To accommodate brownfield cases reference the existing bucket name here instead + # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant + # access to the bucket by creating a new user, attaching it to the bucket, and + # providing the credentials via a Secret in the namespace of the requesting OBC. + #bucketName: \ No newline at end of file diff --git a/manifests/function/rook-cluster/storageclasses/object/upstream/Kptfile b/manifests/function/rook-cluster/storageclasses/object/upstream/Kptfile new file mode 100644 index 000000000..23c54ce9a --- /dev/null +++ b/manifests/function/rook-cluster/storageclasses/object/upstream/Kptfile @@ -0,0 +1,11 @@ +apiVersion: kpt.dev/v1alpha1 +kind: Kptfile +metadata: + name: upstream +upstream: + type: git + git: + commit: 69591248f69e23964734f0192944ef2442bc7885 + repo: https://github.com/rook/rook + directory: cluster/examples/kubernetes/ceph/csi/cephfs + ref: v1.6.3 diff --git a/manifests/function/rook-cluster/storageclasses/object/upstream/storageclass-bucket-delete.yaml b/manifests/function/rook-cluster/storageclasses/object/upstream/storageclass-bucket-delete.yaml new file mode 100644 index 000000000..44dec1788 --- /dev/null +++ b/manifests/function/rook-cluster/storageclasses/object/upstream/storageclass-bucket-delete.yaml @@ -0,0 +1,17 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-delete-bucket +provisioner: rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster +# set the reclaim policy to delete the bucket and all objects +# when its OBC is deleted. +reclaimPolicy: Delete +parameters: + objectStoreName: my-store + objectStoreNamespace: rook-ceph # namespace:cluster + region: us-east-1 + # To accommodate brownfield cases reference the existing bucket name here instead + # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant + # access to the bucket by creating a new user, attaching it to the bucket, and + # providing the credentials via a Secret in the namespace of the requesting OBC. + #bucketName: \ No newline at end of file