From d74b579f1d5f8d9c7406aed3b6b43d30f637422d Mon Sep 17 00:00:00 2001 From: "Taylor, Stephen (st053q)" Date: Tue, 28 May 2019 16:47:08 -0600 Subject: [PATCH] [Ceph] Implement pool quotas on pools as they are created/managed This patch set implements pool quotas on each pool in the Ceph cluster by obtaining the total capacity of the cluster in bytes, multiplying that by the defined percentage of total data expected to reside in each pool and by the cluster quota, and setting a byte quota on each pool that is equal to its expected percentage of the total cluster quota. Change-Id: I1686822a74c984e99e9347f55b98219c47decec1 --- ceph-client/templates/bin/pool/_init.sh.tpl | 13 ++++++++++--- ceph-client/values.yaml | 4 ++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ceph-client/templates/bin/pool/_init.sh.tpl b/ceph-client/templates/bin/pool/_init.sh.tpl index 031ef1376..746b2746f 100644 --- a/ceph-client/templates/bin/pool/_init.sh.tpl +++ b/ceph-client/templates/bin/pool/_init.sh.tpl @@ -120,23 +120,30 @@ function manage_pool () { TOTAL_DATA_PERCENT=$4 TARGET_PG_PER_OSD=$5 POOL_CRUSH_RULE=$6 - POOL_PROTECTION=$7 + TARGET_QUOTA=$7 + POOL_PROTECTION=$8 + CLUSTER_CAPACITY=$9 TOTAL_OSDS={{.Values.conf.pool.target.osd}} POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD}) create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}" + POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}') + POOL_QUOTA=$(python -c "print int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100)") + ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA } reweight_osds {{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }} {{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }} +{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }} {{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }} +cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec) {{- range $pool := .Values.conf.pool.spec -}} {{- with $pool }} {{- if .crush_rule }} -manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ .crush_rule }} {{ $targetProtection }} +manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ .crush_rule }} {{ $targetQuota }} {{ $targetProtection }} ${cluster_capacity} {{ else }} -manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }} {{ $targetProtection }} +manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }} {{ $targetQuota }} {{ $targetProtection }} ${cluster_capacity} {{- end }} {{- end }} {{- end }} diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml index ca28d014a..9771c76fb 100644 --- a/ceph-client/values.yaml +++ b/ceph-client/values.yaml @@ -217,6 +217,10 @@ conf: osd: 5 pg_per_osd: 100 protected: true + #NOTE(st053q): target quota should be set to the overall cluster full percentage + # to be tolerated as a quota (percent full to allow in order to tolerate some + # level of failure) + quota: 100 default: # NOTE(supamatt): Accepted values are taken from `crush_rules` list. crush_rule: replicated_rule