Change default pg_num values for ceph pools:

- cinder-volumes
    - cinder.backups
    - images
    - ephemeral

Pg_num values were increased to avoid ceph health warning
that occurs on larger systems due to the default
pg_num settings not being large enough.

Change-Id: I23feffe613c37b12dff51c73e7ced9a9c7663089
Closes-bug: 1899128
Signed-off-by: Elena Taivan <elena.taivan@windriver.com>
This commit is contained in:
Elena Taivan 2020-10-09 07:35:03 +00:00
parent 3b01f4e8e9
commit a643665af4
6 changed files with 78 additions and 11 deletions

View File

@ -27,6 +27,7 @@ Patch05: 0005-Nova-Add-support-for-disabling-Readiness-Liveness-pr.patch
Patch06: 0006-Support-ingress-creation-for-keystone-admin-endpoint.patch
Patch07: 0007-Allow-more-generic-overrides-for-placeme.patch
Patch08: 0008-Allow-set-public-endpoint-url-for-keystone-endpoints.patch
Patch09: 0009-Wrong-usage-of-rbd_store_chunk_size.patch
BuildRequires: helm
BuildRequires: openstack-helm-infra
@ -46,6 +47,7 @@ Openstack Helm charts
%patch06 -p1
%patch07 -p1
%patch08 -p1
%patch09 -p1
%build
# Stage helm-toolkit in the local repo

View File

@ -0,0 +1,45 @@
From 2892d1bedf30e7260aa67ad93d94677fad55a760 Mon Sep 17 00:00:00 2001
From: Elena Taivan <elena.taivan@windriver.com>
Date: Wed, 30 Sep 2020 14:14:32 +0000
Subject: [PATCH] Wrong usage of 'rbd_store_chunk_size'
'rbd_store_chunk_size' option represents the size of the chunks
of the objects into which an image is chuncked.
It does not represent the 'pg_num' value of 'images' ceph pool.
Solution: replace 'rdb_store_chunk_size' with 'chunk_size' custom
option.
---
glance/templates/job-storage-init.yaml | 2 +-
glance/values.yaml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/glance/templates/job-storage-init.yaml b/glance/templates/job-storage-init.yaml
index 82524086..77ab1c60 100644
--- a/glance/templates/job-storage-init.yaml
+++ b/glance/templates/job-storage-init.yaml
@@ -114,7 +114,7 @@ spec:
- name: RBD_POOL_CRUSH_RULE
value: {{ .Values.conf.glance.glance_store.rbd_store_crush_rule | quote }}
- name: RBD_POOL_CHUNK_SIZE
- value: {{ .Values.conf.glance.glance_store.rbd_store_chunk_size | quote }}
+ value: {{ .Values.conf.glance.glance_store.chunk_size | quote }}
- name: RBD_POOL_SECRET
value: {{ .Values.secrets.rbd | quote }}
{{ end }}
diff --git a/glance/values.yaml b/glance/values.yaml
index 1428c299..a4f74379 100644
--- a/glance/values.yaml
+++ b/glance/values.yaml
@@ -256,7 +256,7 @@ conf:
auth_version: v3
memcache_security_strategy: ENCRYPT
glance_store:
- rbd_store_chunk_size: 8
+ chunk_size: 8
rbd_store_replication: 3
rbd_store_crush_rule: replicated_rule
rbd_store_pool: glance.images
--
2.17.1

View File

@ -53,3 +53,16 @@ NOVA_PCI_ALIAS_GPU_NVIDIA_TESLA_P40_DEVICE = "1b38"
NOVA_PCI_ALIAS_GPU_NVIDIA_TESLA_P40_NAME = "nvidia-tesla-p40"
NOVA_PCI_ALIAS_GPU_NVIDIA_TESLA_T4_PF_DEVICE = "1eb8"
NOVA_PCI_ALIAS_GPU_NVIDIA_TESLA_T4_PF_NAME = "nvidia-tesla-t4-pf"
CEPH_POOL_IMAGES_NAME = 'images'
CEPH_POOL_IMAGES_CHUNK_SIZE = 256
CEPH_POOL_EPHEMERAL_NAME = 'ephemeral'
CEPH_POOL_EPHEMERAL_CHUNK_SIZE = 256
CEPH_POOL_VOLUMES_NAME = 'cinder-volumes'
CEPH_POOL_VOLUMES_APP_NAME = 'cinder-volumes'
CEPH_POOL_VOLUMES_CHUNK_SIZE = 512
CEPH_POOL_BACKUP_APP_NAME = 'cinder-backup'
CEPH_POOL_BACKUP_CHUNK_SIZE = 256

View File

@ -87,9 +87,9 @@ class CinderHelm(openstack.OpenstackBaseHelm):
pools = {}
for backend in self.dbapi.storage_ceph_get_list():
if backend.tier_name == primary_tier_name:
pool_name = constants.CEPH_POOL_VOLUMES_NAME
pool_name = app_constants.CEPH_POOL_VOLUMES_NAME
else:
pool_name = "%s-%s" % (constants.CEPH_POOL_VOLUMES_NAME,
pool_name = "%s-%s" % (app_constants.CEPH_POOL_VOLUMES_NAME,
backend.tier_name)
rule_name = "{0}{1}{2}".format(
backend.tier_name, constants.CEPH_CRUSH_TIER_SUFFIX,
@ -97,14 +97,20 @@ class CinderHelm(openstack.OpenstackBaseHelm):
pool = {
'replication': replication,
'crush_rule': rule_name.encode('utf8', 'strict'),
'chunk_size': constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
'app_name': constants.CEPH_POOL_VOLUMES_APP_NAME
'chunk_size': app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
'app_name': app_constants.CEPH_POOL_VOLUMES_APP_NAME
}
pools[pool_name.encode('utf8', 'strict')] = pool
if backend.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
# Backup uses the same replication and crush rule as
# the default storage backend
pools['backup'] = dict(pool)
pool_backup = {
'replication': replication,
'crush_rule': rule_name.encode('utf8', 'strict'),
'chunk_size': app_constants.CEPH_POOL_BACKUP_PG_NUM,
'app_name': app_constants.CEPH_POOL_BACKUP_APP_NAME
}
pools['backup'] = dict(pool_backup)
return {
'monitors': self._get_formatted_ceph_monitor_ips(),
@ -177,9 +183,9 @@ class CinderHelm(openstack.OpenstackBaseHelm):
raise Exception("No tier present for backend %s" % bk_name)
if tier.name == primary_tier_name:
rbd_pool = constants.CEPH_POOL_VOLUMES_NAME
rbd_pool = app_constants.CEPH_POOL_VOLUMES_NAME
else:
rbd_pool = "%s-%s" % (constants.CEPH_POOL_VOLUMES_NAME,
rbd_pool = "%s-%s" % (app_constants.CEPH_POOL_VOLUMES_NAME,
tier.name)
conf_backends[bk_name] = {

View File

@ -114,7 +114,7 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
rbd_store_user = ""
replication = 1
else:
rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
rbd_store_pool = app_constants.CEPH_POOL_IMAGES_NAME
rbd_store_user = RBD_STORE_USER
replication, min_replication = \
StorageBackendConfig.get_ceph_pool_replication(self.dbapi)
@ -133,6 +133,7 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
'show_image_direct_url': True,
},
'glance_store': {
'chunk_size': app_constants.CEPH_POOL_IMAGES_CHUNK_SIZE,
'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
'rbd_store_pool': rbd_store_pool,
'rbd_store_user': rbd_store_user,

View File

@ -636,7 +636,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
return cpus
def _get_storage_ceph_config(self):
rbd_pool = constants.CEPH_POOL_EPHEMERAL_NAME
rbd_pool = app_constants.CEPH_POOL_EPHEMERAL_NAME
rbd_ceph_conf = os.path.join(constants.CEPH_CONF_PATH,
constants.SB_TYPE_CEPH_CONF_FILENAME)
@ -753,11 +753,11 @@ class NovaHelm(openstack.OpenstackBaseHelm):
# Form the dictionary with the info for the ephemeral pool.
# If needed, multiple pools can be specified.
ephemeral_pool = {
'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME,
'rbd_pool_name': app_constants.CEPH_POOL_EPHEMERAL_NAME,
'rbd_user': RBD_POOL_USER,
'rbd_crush_rule': rule_name,
'rbd_replication': replication,
'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM
'rbd_chunk_size': app_constants.CEPH_POOL_EPHEMERAL_CHUNK_SIZE
}
ephemeral_pools.append(ephemeral_pool)