Fix ceph pool creation during stx-openstack apply

Previously the helm overrides for nova, cinder and glance respectively
defined 512, 256 and 256 as the PG_NUM for their Ceph pools. Until
Mimic, ceph would just issue a warning message if this number was
bigger than the number OSD * 100, now Nautilus returns an error message
and asks for expected_num_objects parameter or --yes-i-really-mean-it.
Neither of these options is supported by openstack-helm.

The pool creation is adjusted to take into account the number of OSDs
available to choose the PG_NUM.
The steps are:
Number of OSD times 100 minus 1;
Get the nearest power of two numbers below this result;
Limit PG_NUM to the previous defaults, as they are already high
numbers.
This logic roughly results in the same values described here:
https://docs.ceph.com/en/nautilus/rados/operations/placement-groups/#a-preselection-of-pg-num

This was done to solve an error message which demands a
expected_num_objects when PG_NUM is considered too high by ceph.

Test plan:
StarlingX builds successfully
stx-openstack is built successfully
stx-openstack is applied successfully and ceph pools are created
accordingly
It is possible to override PG_NUM on ceph pools by changing chunk_size
value through helm overrides

Closes-Bug: #1949360
Depends-On: I222bee29bcaa09a95a3706c72dd21b8ed3efbe60
Signed-off-by: Delfino Curado <delfinogomes.curadofilho@windriver.com>
Change-Id: Ia1416e64afcdf91b86afdf750bf5b3a1727db985
This commit is contained in:
Delfino Curado 2021-11-10 17:51:27 -05:00
parent 7bbdc856ba
commit c28605b14f
3 changed files with 11 additions and 4 deletions

View File

@ -108,10 +108,13 @@ class CinderHelm(openstack.OpenstackBaseHelm):
rule_name = "{0}{1}{2}".format(
backend.tier_name, constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())
pool = {
'replication': replication,
'crush_rule': rule_name.encode('utf8', 'strict'),
'chunk_size': app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
'chunk_size': min(chunk_size, app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE),
'app_name': app_constants.CEPH_POOL_VOLUMES_APP_NAME
}
pools[pool_name.encode('utf8', 'strict')] = pool
@ -121,7 +124,7 @@ class CinderHelm(openstack.OpenstackBaseHelm):
pool_backup = {
'replication': replication,
'crush_rule': rule_name.encode('utf8', 'strict'),
'chunk_size': app_constants.CEPH_POOL_BACKUP_CHUNK_SIZE,
'chunk_size': min(chunk_size, app_constants.CEPH_POOL_BACKUP_CHUNK_SIZE),
'app_name': app_constants.CEPH_POOL_BACKUP_APP_NAME
}
pools['backup'] = dict(pool_backup)

View File

@ -141,6 +141,8 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
else:
rule_name = "storage_tier_ruleset"
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())
conf = {
'glance': {
'DEFAULT': {
@ -148,7 +150,7 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
'show_image_direct_url': True,
},
'glance_store': {
'chunk_size': app_constants.CEPH_POOL_IMAGES_CHUNK_SIZE,
'chunk_size': min(chunk_size, app_constants.CEPH_POOL_IMAGES_CHUNK_SIZE),
'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
'rbd_store_pool': rbd_store_pool,
'rbd_store_user': rbd_store_user,

View File

@ -786,6 +786,8 @@ class NovaHelm(openstack.OpenstackBaseHelm):
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())
# Form the dictionary with the info for the ephemeral pool.
# If needed, multiple pools can be specified.
ephemeral_pool = {
@ -793,7 +795,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
'rbd_user': RBD_POOL_USER,
'rbd_crush_rule': rule_name,
'rbd_replication': replication,
'rbd_chunk_size': app_constants.CEPH_POOL_EPHEMERAL_CHUNK_SIZE
'rbd_chunk_size': min(chunk_size, app_constants.CEPH_POOL_EPHEMERAL_CHUNK_SIZE)
}
ephemeral_pools.append(ephemeral_pool)