[CEPH] Uplift from Nautilus to Octopus release

This is to uplift ceph charts from 14.X release to 15.X

Change-Id: I4f7913967185dd52d4301c218450cfad9d0e2b2b
This commit is contained in:
Chinasubbareddy Mallavarapu 2021-01-06 17:40:09 +00:00 committed by chinasubbareddy mallavarapu
parent 72f42ba091
commit da289c78cb
23 changed files with 86 additions and 75 deletions

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph Client
name: ceph-client
version: 0.1.5
version: 0.1.6
home: https://github.com/ceph/ceph-client
...

View File

@ -43,10 +43,10 @@ function check_recovery_flags() {
function check_osd_count() {
echo "#### Start: Checking OSD count ####"
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
osd_stat=$(ceph osd stat -f json)
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
osd_stat=$(ceph osd stat -f json-pretty)
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
if [ ${MIN_OSDS} -lt 1 ]; then
@ -188,7 +188,7 @@ function pool_validation() {
exit 1
fi
fi
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|| [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"

View File

@ -44,7 +44,7 @@ ceph --cluster "${CLUSTER}" -v
# Env. variables matching the pattern "<module>_" will be
# found and parsed for config-key settings by
# ceph config set mgr mgr/<module>/<key> <value>
MODULES_TO_DISABLE=`ceph mgr dump | python -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
MODULES_TO_DISABLE=`ceph mgr dump | python3 -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
for module in ${ENABLED_MODULES}; do
# This module may have been enabled in the past
@ -57,7 +57,7 @@ for module in ${ENABLED_MODULES}; do
option=${option/${module}_/}
key=`echo $option | cut -d= -f1`
value=`echo $option | cut -d= -f2`
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value --force
else
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value

View File

@ -35,7 +35,7 @@ function wait_for_pgs () {
pgs_ready=0
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
query=".pg_stats | ${query}"
fi
@ -70,10 +70,11 @@ function check_recovery_flags () {
function check_osd_count() {
echo "#### Start: Checking OSD count ####"
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
osd_stat=$(ceph osd stat -f json)
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
osd_stat=$(ceph osd stat -f json-pretty)
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
EXPECTED_OSDS={{.Values.conf.pool.target.osd}}
REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}
@ -123,7 +124,7 @@ function create_crushrule () {
}
# Set mons to use the msgr2 protocol on nautilus
if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
ceph --cluster "${CLUSTER}" mon enable-msgr2
fi
@ -183,7 +184,7 @@ function create_pool () {
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
fi
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]] ; then
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode on
else
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" pg_autoscale_mode off
@ -199,7 +200,7 @@ function create_pool () {
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
# set pg_num to pool
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
if [[ $(ceph osd versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "pg_num" "${POOL_PLACEMENT_GROUPS}"
else
for PG_PARAM in pg_num pgp_num; do
@ -246,10 +247,10 @@ function manage_pool () {
POOL_PROTECTION=$8
CLUSTER_CAPACITY=$9
TOTAL_OSDS={{.Values.conf.pool.target.osd}}
POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
POOL_PLACEMENT_GROUPS=$(python3 /tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}"
POOL_REPLICAS=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" size | awk '{print $2}')
POOL_QUOTA=$(python -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
POOL_QUOTA=$(python3 -c "print(int($CLUSTER_CAPACITY * $TOTAL_DATA_PERCENT * $TARGET_QUOTA / $POOL_REPLICAS / 100 / 100))")
ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA
}
@ -262,12 +263,16 @@ reweight_osds
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
cluster_capacity=0
if [[ -z "$(ceph osd versions | grep ceph\ version | grep -v nautilus)" ]]; then
if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | grep "TOTAL" | awk '{print $2 substr($3, 1, 1)}' | numfmt --from=iec)
enable_or_disable_autoscaling
else
cluster_capacity=$(ceph --cluster "${CLUSTER}" df | head -n3 | tail -n1 | awk '{print $1 substr($2, 1, 1)}' | numfmt --from=iec)
fi
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -eq 14 ]]; then
enable_or_disable_autoscaling
fi
{{- range $pool := .Values.conf.pool.spec -}}
{{- with $pool }}
{{- if .crush_rule }}

View File

@ -106,9 +106,9 @@ class cephCRUSH():
"""Replica of the pool. Initialize to 0."""
self.poolSize = 0
def isNautilus(self):
grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True)) # nosec
return grepResult == 0
def isSupportedRelease(self):
cephMajorVer = int(subprocess.check_output("ceph mon versions | awk '/version/{print $3}' | cut -d. -f1", shell=True)) # nosec
return cephMajorVer >= 14
def getPoolSize(self, poolName):
"""
@ -129,7 +129,7 @@ class cephCRUSH():
return
def checkPGs(self, poolName):
poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs
poolPGs = self.poolPGs['pg_stats'] if self.isSupportedRelease() else self.poolPGs
if not poolPGs:
return
print('Checking PGs in pool {} ...'.format(poolName)),

View File

@ -18,4 +18,4 @@ set -ex
mgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)
kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- /tmp/utils-checkPGs.py All 2>/dev/null
kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- python3 /tmp/utils-checkPGs.py All 2>/dev/null

View File

@ -24,11 +24,11 @@ release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
@ -326,6 +326,11 @@ conf:
# the ceph pool management job, as it tunes the pgs and crush rule, based on
# the above.
spec:
# Health metrics pool
- name: device_health_metrics
application: mgr_devicehealth
replication: 1
percent_total_data: 5
# RBD pool
- name: rbd
application: rbd
@ -404,7 +409,7 @@ conf:
- name: default.rgw.buckets.data
application: rgw
replication: 3
percent_total_data: 34.8
percent_total_data: 29
ceph:
global:
@ -497,8 +502,7 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph Mon
name: ceph-mon
version: 0.1.3
version: 0.1.4
home: https://github.com/ceph/ceph
...

View File

@ -20,7 +20,7 @@ set -ex
{{- $envAll := . }}
function ceph_gen_key () {
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
}
function kube_ceph_keyring_gen () {

View File

@ -19,7 +19,7 @@ set -ex
{{- $envAll := . }}
function ceph_gen_key () {
python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
}
function kube_ceph_keyring_gen () {

View File

@ -16,7 +16,7 @@ else
fi
function check_mon_msgr2 {
if [[ -z "$(ceph mon versions | grep ceph\ version | grep -v nautilus)" ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
if ceph health detail|grep -i "MON_MSGR2_NOT_ENABLED"; then
echo "ceph-mon msgr v2 not enabled on all ceph mons so enabling"
ceph mon enable-msgr2

View File

@ -23,10 +23,10 @@ deployment:
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
@ -292,8 +292,7 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph OSD
name: ceph-osd
version: 0.1.17
version: 0.1.18
home: https://github.com/ceph/ceph
...

View File

@ -19,10 +19,10 @@ set -ex
function check_osd_count() {
echo "#### Start: Checking OSD count ####"
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
osd_stat=$(ceph osd stat -f json)
num_osd=$(jq '.osdmap.num_osds' <<< "$osd_stat")
num_in_osds=$(jq '.osdmap.num_in_osds' <<< "$osd_stat")
num_up_osds=$(jq '.osdmap.num_up_osds' <<< "$osd_stat")
osd_stat=$(ceph osd stat -f json-pretty)
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
if [ ${MIN_OSDS} -lt 1 ]; then

View File

@ -89,7 +89,7 @@ function wait_for_pgs () {
pgs_inactive=0
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
if [[ $(ceph tell mon.* version | egrep -q "nautilus"; echo $?) -eq 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
query=".pg_stats | ${query}"
fi

View File

@ -31,8 +31,8 @@ eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c '
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic/Nautilus release"
if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release"
exit 1
fi

View File

@ -115,15 +115,15 @@ alias wipefs='locked wipefs'
alias sgdisk='locked sgdisk'
alias dd='locked dd'
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
if [[ $(ceph -v | egrep -q "nautilus|mimic|luminous"; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic/Nautilus release"
if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -lt 12 ]]; then
echo "ERROR - The minimum Ceph version supported is Luminous 12.x.x"
exit 1
fi

View File

@ -19,9 +19,9 @@
images:
pull_policy: IfNotPresent
tags:
ceph_osd: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_osd: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph Provisioner
name: ceph-provisioners
version: 0.1.2
version: 0.1.3
home: https://github.com/ceph/ceph
...

View File

@ -27,10 +27,10 @@ release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_cephfs_provisioner: 'docker.io/openstackhelm/ceph-cephfs-provisioner:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
ceph_rbd_provisioner: 'docker.io/openstackhelm/ceph-rbd-provisioner:change_770201_ubuntu_bionic-20210113'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
local_registry:
@ -246,8 +246,7 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous" | xargs echo)
if [[ ${test_version} -gt 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph RadosGW
name: ceph-rgw
version: 0.1.1
version: 0.1.2
home: https://github.com/ceph/ceph
...

View File

@ -24,12 +24,12 @@ release_group: null
images:
pull_policy: IfNotPresent
tags:
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:ubuntu_bionic-20200521'
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
ceph_rgw: 'docker.io/openstackhelm/ceph-daemon:change_770201_ubuntu_bionic-20210113'
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0'
image_repo_sync: 'docker.io/docker:17.07.0'
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20200521'
rgw_s3_admin: 'docker.io/openstackhelm/ceph-config-helper:change_770201_ubuntu_bionic-20210113'
ks_endpoints: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_service: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
ks_user: 'docker.io/openstackhelm/heat:newton-ubuntu_xenial'
@ -489,8 +489,7 @@ bootstrap:
ceph -s
function ensure_pool () {
ceph osd pool stats $1 || ceph osd pool create $1 $2
local test_version=$(ceph tell osd.* version | egrep -c "nautilus|mimic|luminous")
if [[ ${test_version} -gt 0 ]]; then
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
ceph osd pool application enable $1 $3
fi
}

View File

@ -86,6 +86,11 @@ conf:
default:
crush_rule: same_host
spec:
# Health metrics pool
- name: device_health_metrics
application: mgr_devicehealth
replication: 1
percent_total_data: 5
# RBD pool
- name: rbd
application: rbd
@ -160,7 +165,7 @@ conf:
- name: default.rgw.buckets.data
application: rgw
replication: 1
percent_total_data: 34.8
percent_total_data: 29
storage:
osd:
- data: