Reduce number of PGs
to avoid warning caused by too many pgs assigned to a single OSD. The warning is seen in scenario 004 integration job because Rados Gateway creates additional pools. Change-Id: Ice01e0ea8313ae9732c0e48003f886fb85ecc9fd
This commit is contained in:
parent
49aec62c40
commit
6c2832696c
|
@ -10,9 +10,14 @@
|
|||
# (optional) Flag if Ceph RGW will provide swift
|
||||
# services for openstack
|
||||
#
|
||||
# [*pg_num*]
|
||||
# (optional) Number of PGs per pool.
|
||||
# Defaults to 16.
|
||||
#
|
||||
class openstack_integration::ceph (
|
||||
$deploy_rgw = false,
|
||||
$swift_dropin = false,
|
||||
$pg_num = 16,
|
||||
) {
|
||||
|
||||
include openstack_integration::config
|
||||
|
@ -64,6 +69,7 @@ test -b /dev/ceph_vg/lv_data
|
|||
mgr_key => 'AQD7kyJQQGoOBhAAqrPAqSopSwPrrfMMomzVdw==',
|
||||
osd_max_object_name_len => 256,
|
||||
osd_max_object_namespace_len => 64,
|
||||
osd_pool_default_pg_num => $pg_num,
|
||||
client_keys => {
|
||||
'client.admin' => {
|
||||
'secret' => 'AQD7kyJQQGoOBhAAqrPAqSopSwPrrfMMomzVdw==',
|
||||
|
@ -96,7 +102,9 @@ test -b /dev/ceph_vg/lv_data
|
|||
}
|
||||
|
||||
$ceph_pools = ['glance', 'nova', 'cinder', 'gnocchi']
|
||||
ceph::pool { $ceph_pools: }
|
||||
ceph::pool { $ceph_pools:
|
||||
pg_num => $pg_num,
|
||||
}
|
||||
|
||||
class { 'ceph::profile::mgr': }
|
||||
class { 'ceph::profile::mon': }
|
||||
|
@ -104,8 +112,7 @@ test -b /dev/ceph_vg/lv_data
|
|||
|
||||
# Extra Ceph configuration to increase performances
|
||||
$ceph_extra_config = {
|
||||
'global/osd_journal_size' => { value => '100' },
|
||||
|
||||
'global/osd_journal_size' => { value => '100' },
|
||||
}
|
||||
|
||||
class { 'ceph::conf':
|
||||
|
|
Loading…
Reference in New Issue