From 6c2832696cb32b353139178d0d9b21ba954c2134 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 25 May 2023 22:15:54 +0900 Subject: [PATCH] Reduce number of PGs to avoid warning caused by too many pgs assigned to a single OSD. The warning is seen in scenario 004 integration job because Rados Gateway creates additional pools. Change-Id: Ice01e0ea8313ae9732c0e48003f886fb85ecc9fd --- manifests/ceph.pp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/manifests/ceph.pp b/manifests/ceph.pp index 6f7379ba9..efc5e5d09 100644 --- a/manifests/ceph.pp +++ b/manifests/ceph.pp @@ -10,9 +10,14 @@ # (optional) Flag if Ceph RGW will provide swift # services for openstack # +# [*pg_num*] +# (optional) Number of PGs per pool. +# Defaults to 16. +# class openstack_integration::ceph ( $deploy_rgw = false, $swift_dropin = false, + $pg_num = 16, ) { include openstack_integration::config @@ -64,6 +69,7 @@ test -b /dev/ceph_vg/lv_data mgr_key => 'AQD7kyJQQGoOBhAAqrPAqSopSwPrrfMMomzVdw==', osd_max_object_name_len => 256, osd_max_object_namespace_len => 64, + osd_pool_default_pg_num => $pg_num, client_keys => { 'client.admin' => { 'secret' => 'AQD7kyJQQGoOBhAAqrPAqSopSwPrrfMMomzVdw==', @@ -96,7 +102,9 @@ test -b /dev/ceph_vg/lv_data } $ceph_pools = ['glance', 'nova', 'cinder', 'gnocchi'] - ceph::pool { $ceph_pools: } + ceph::pool { $ceph_pools: + pg_num => $pg_num, + } class { 'ceph::profile::mgr': } class { 'ceph::profile::mon': } @@ -104,8 +112,7 @@ test -b /dev/ceph_vg/lv_data # Extra Ceph configuration to increase performances $ceph_extra_config = { - 'global/osd_journal_size' => { value => '100' }, - + 'global/osd_journal_size' => { value => '100' }, } class { 'ceph::conf':