[rook-ceph] storage backend activation

This change activates the stx-openstack app support for the rook ceph
storage backend. For this, the cinder, glance, libvirt and nova charts
were properly configured based on the type of ceph backend currently
available (baremetal or rook ceph). The rook ceph support has also
required the lifecycle_openstack to be changed to support copying the
ceph configmap either from the rook-ceph or kube-system namespace.
Furthermore, a new patch was added to the openstack-helm package to
ensure that nova storage-init job can create and set the size of the
related ceph pool for AIO-SX systems (size=1).

This change also improves the dynamic image override feature introduced
by [1], updating the images:tags overrides in a reusable
_update_image_tag_overrides method of the OpenstackBaseHelm base
class. Unit tests for this and the others methods in openstack.py are
planned for a near future.

[1]https://review.opendev.org/c/starlingx/openstack-armada-app/+/947531

Test Plan:
[PASS] build stx-openstack packages and tarball

Baremetal ceph deployment:
[PASS] upload and apply the stx-openstack tarball to a virtual AIO-SX
[PASS] check that ceph-config-helper image overrides are NOT applied
[PASS] create an cirros image
[PASS] create a volume from cirros image
[PASS] create a volume backup
[PASS] delete backup, volume and image

Rook ceph deployment:
[PASS] upload and apply the stx-openstack tarball to a AIO-SX
[PASS] check that ceph-config-helper image overrides are applied
[PASS] create an cirros image
[PASS] create a volume from cirros image
[PASS] create a volume backup
[PASS] delete backup, volume and image

Story: 2011388
Task: 52007

Depends-On: https://review.opendev.org/c/starlingx/root/+/947876

Change-Id: I461ea05f33ece776b2cd92f780882943b62f2168
Signed-off-by: Alex Figueiredo <alex.fernandesfigueiredo@windriver.com>
This commit is contained in:
Alex Figueiredo 2025-04-16 14:55:35 -03:00 committed by Alex Fernandes Figueirêdo
parent 6b481aeb37
commit aa4dc6fa24
10 changed files with 180 additions and 163 deletions

View File

@ -0,0 +1,29 @@
From c9888799b8afae1500142d534c305a6cc38ebbe3 Mon Sep 17 00:00:00 2001
From: Alex Figueiredo <alex.fernandesfigueiredo@windriver.com>
Date: Wed, 2 Apr 2025 11:20:52 -0300
Subject: [PATCH] Enable ceph pool creation for AIO systems
The ceph admin tool/CLI requires the option "--yes-i-really-mean-it" to enable
the storage init job to set the pool size to 1 for AIO-SX deployments.
Signed-off-by: Alex Figueiredo <alex.fernandesfigueiredo@windriver.com>
---
nova/templates/bin/_storage-init.sh.tpl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/nova/templates/bin/_storage-init.sh.tpl b/nova/templates/bin/_storage-init.sh.tpl
index cb3505d4..70c71a9f 100644
--- a/nova/templates/bin/_storage-init.sh.tpl
+++ b/nova/templates/bin/_storage-init.sh.tpl
@@ -34,7 +34,7 @@ if [ "x$STORAGE_BACKEND" == "xrbd" ]; then
fi
size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')
ceph osd pool set $1 nosizechange 0
- ceph osd pool set $1 size ${RBD_POOL_REPLICATION}
+ ceph osd pool set $1 size ${RBD_POOL_REPLICATION} --yes-i-really-mean-it
ceph osd pool set $1 nosizechange ${size_protection}
ceph osd pool set $1 crush_rule "${RBD_POOL_CRUSH_RULE}"
}
--
2.34.1

View File

@ -21,3 +21,4 @@
0021-horizon-Allows-setting-Django-s-CSRF_TRUSTED_ORIGINS.patch
0022-horizon-fix-templating-of-list-of-strings-for-CSRF_T.patch
0023-Change-uWSGI-socket-to-allow-IPv6-binding.patch
0024-Enable-ceph-pool-creation-for-AIO-systems.patch

View File

@ -107,17 +107,20 @@ CEPH_ROOK_MANAGER_APP = 'rook-ceph-mgr'
CEPH_ROOK_MANAGER_SVC = 'rook-ceph-mgr-restful'
CEPH_ROOK_POLL_CRUSH_RULE = 'kube-rbd'
CEPH_POOL_IMAGES_NAME = 'images'
CEPH_RBD_POOL_USER_CINDER = "cinder"
CEPH_RBD_POOL_USER_GLANCE = 'images'
CEPH_POOL_IMAGES_NAME = constants.CEPH_POOL_IMAGES_NAME
CEPH_POOL_IMAGES_CHUNK_SIZE = 256
CEPH_POOL_EPHEMERAL_NAME = 'ephemeral'
CEPH_POOL_EPHEMERAL_NAME = constants.CEPH_POOL_EPHEMERAL_NAME
CEPH_POOL_EPHEMERAL_CHUNK_SIZE = 256
CEPH_POOL_VOLUMES_NAME = 'cinder-volumes'
CEPH_POOL_VOLUMES_NAME = constants.CEPH_POOL_VOLUMES_NAME
CEPH_POOL_VOLUMES_APP_NAME = 'cinder-volumes'
CEPH_POOL_VOLUMES_CHUNK_SIZE = 512
CEPH_POOL_BACKUP_NAME = 'cinder.backups'
CEPH_POOL_BACKUP_NAME = 'backups'
CEPH_POOL_BACKUP_APP_NAME = 'cinder-backup'
CEPH_POOL_BACKUP_CHUNK_SIZE = 256

View File

@ -4,10 +4,10 @@
# SPDX-License-Identifier: Apache-2.0
#
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import storage_backend_conf
from sysinv.common import utils
from sysinv.helm import common
from tsconfig import tsconfig as tsc
@ -19,8 +19,8 @@ from k8sapp_openstack.utils import get_image_rook_ceph
from k8sapp_openstack.utils import is_netapp_available
from k8sapp_openstack.utils import is_rook_ceph_backend_available
LOG = logging.getLogger(__name__)
ROOK_CEPH_BACKEND_NAME = app_constants.CEPH_ROOK_BACKEND_NAME
NETAPP_NFS_BACKEND_NAME = 'netapp-nfs'
NETAPP_ISCSI_BACKEND_NAME = 'netapp-iscsi'
@ -57,7 +57,7 @@ class CinderHelm(openstack.OpenstackBaseHelm):
backend_overrides = self._get_common_backend_overrides()
# Ceph and Rook Ceph are mutually exclusive, so it's either one or the other
if self._is_rook_ceph():
if is_rook_ceph_backend_available():
cinder_overrides = self._get_conf_rook_ceph_cinder_overrides(cinder_overrides)
backend_overrides = self._get_conf_rook_ceph_backends_overrides(backend_overrides)
ceph_overrides = self._get_conf_rook_ceph_overrides()
@ -121,18 +121,11 @@ class CinderHelm(openstack.OpenstackBaseHelm):
# are not necessarily the same. Therefore, the ceph client image must be
# dynamically configured based on the ceph backend currently deployed.
if is_rook_ceph_backend_available():
rook_ceph_config_helper = get_image_rook_ceph()
overrides[common.HELM_NS_OPENSTACK] = self._update_overrides(
overrides[common.HELM_NS_OPENSTACK],
{
'images': {
'tags': {
'cinder_backup_storage_init': rook_ceph_config_helper,
'cinder_storage_init': rook_ceph_config_helper
}
}
}
)
overrides[common.HELM_NS_OPENSTACK] =\
self._update_image_tag_overrides(
overrides[common.HELM_NS_OPENSTACK],
['cinder_backup_storage_init', 'cinder_storage_init'],
get_image_rook_ceph())
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
@ -284,7 +277,7 @@ class CinderHelm(openstack.OpenstackBaseHelm):
'volume_backend_name': bk_name,
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': rbd_pool.encode('utf8', 'strict'),
'rbd_user': 'cinder',
'rbd_user': app_constants.CEPH_RBD_POOL_USER_CINDER,
'rbd_ceph_conf':
(constants.CEPH_CONF_PATH +
constants.SB_TYPE_CEPH_CONF_FILENAME),
@ -415,10 +408,10 @@ class CinderHelm(openstack.OpenstackBaseHelm):
# Retrieve the existing enabled_backends value, or set it to an empty string if not present
existing_backends = cinder_overrides['DEFAULT'].get('enabled_backends', '')
# Append 'ceph-store' if it's not already in the enabled_backends list
# Append 'ceph-rook-store' if it's not already in the enabled_backends list
backends_list = existing_backends.split(',') if existing_backends else []
if ROOK_CEPH_BACKEND_NAME not in backends_list:
backends_list.append(ROOK_CEPH_BACKEND_NAME)
if app_constants.CEPH_ROOK_BACKEND_NAME not in backends_list:
backends_list.append(app_constants.CEPH_ROOK_BACKEND_NAME)
# Update Cinder overrides
cinder_overrides['DEFAULT'].update({
@ -426,26 +419,35 @@ class CinderHelm(openstack.OpenstackBaseHelm):
# If the user doesn't want Ceph Rook to be the default backend,
# he can pass a Helm override changing this value, which will
# override this value
'default_volume_type': ROOK_CEPH_BACKEND_NAME,
'default_volume_type': app_constants.CEPH_ROOK_BACKEND_NAME,
})
return cinder_overrides
def _get_conf_rook_ceph_overrides(self):
replication = 2
if utils.is_aio_simplex_system(self.dbapi):
replication = 1
rook_backend = storage_backend_conf.StorageBackendConfig\
.get_configured_backend(self.dbapi, constants.SB_TYPE_CEPH_ROOK)
if not rook_backend:
LOG.error("No rook ceph backend configured")
return {}
replication, _ = storage_backend_conf\
.StorageBackendConfig\
.get_ceph_pool_replication(self.dbapi, ceph_backend=rook_backend)
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())
pools = {
'cinder-volumes': {
'app_name': 'cinder-volumes',
'chunk_size': 8,
'crush_rule': 'kube-rbd',
f'{app_constants.CEPH_POOL_VOLUMES_NAME}': {
'app_name': app_constants.CEPH_POOL_VOLUMES_APP_NAME,
'chunk_size': min(chunk_size,
app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE),
'crush_rule': app_constants.CEPH_ROOK_POLL_CRUSH_RULE,
'replication': replication,
},
'backup': {
'app_name': 'cinder-volumes',
'chunk_size': 8,
'crush_rule': 'kube-rbd',
f'{app_constants.CEPH_POOL_BACKUP_NAME}': {
'app_name': app_constants.CEPH_POOL_BACKUP_APP_NAME,
'chunk_size': min(chunk_size,
app_constants.CEPH_POOL_BACKUP_CHUNK_SIZE),
'crush_rule': app_constants.CEPH_ROOK_POLL_CRUSH_RULE,
'replication': replication,
},
}
@ -458,12 +460,12 @@ class CinderHelm(openstack.OpenstackBaseHelm):
return ceph_override
def _get_conf_rook_ceph_backends_overrides(self, backend_overrides):
backend_overrides[ROOK_CEPH_BACKEND_NAME] = {
backend_overrides[app_constants.CEPH_ROOK_BACKEND_NAME] = {
'image_volume_cache_enabled': 'True',
'volume_backend_name': ROOK_CEPH_BACKEND_NAME,
'volume_backend_name': app_constants.CEPH_ROOK_BACKEND_NAME,
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': 'cinder-volumes',
'rbd_user': 'cinder',
'rbd_pool': app_constants.CEPH_POOL_VOLUMES_NAME,
'rbd_user': app_constants.CEPH_RBD_POOL_USER_CINDER,
'rbd_ceph_conf':
(constants.CEPH_CONF_PATH +
constants.SB_TYPE_CEPH_CONF_FILENAME),
@ -472,14 +474,14 @@ class CinderHelm(openstack.OpenstackBaseHelm):
ceph_uuid = get_ceph_uuid()
if ceph_uuid:
backend_overrides['rbd1']['rbd_secret_uuid'] = ceph_uuid
backend_overrides[ROOK_CEPH_BACKEND_NAME]['rbd_secret_uuid'] = ceph_uuid
backend_overrides[app_constants.CEPH_ROOK_BACKEND_NAME]['rbd_secret_uuid'] = ceph_uuid
return backend_overrides
def _get_ceph_client_rook_overrides(self):
return {
'user_secret_name': constants.K8S_RBD_PROV_ADMIN_SECRET_NAME,
'internal_ceph_backend': ROOK_CEPH_BACKEND_NAME,
'internal_ceph_backend': app_constants.CEPH_ROOK_BACKEND_NAME,
}
def _get_ceph_client_overrides(self):

View File

@ -7,7 +7,6 @@
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.common.storage_backend_conf import StorageBackendConfig
from sysinv.helm import common
@ -17,10 +16,6 @@ from k8sapp_openstack.utils import get_image_rook_ceph
from k8sapp_openstack.utils import is_rook_ceph_backend_available
# Info used in the Glance Helm chart.
RBD_STORE_USER = 'images'
class GlanceHelm(openstack.OpenstackBaseHelm):
"""Class to encapsulate helm operations for the glance chart"""
@ -32,7 +27,7 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
AUTH_USERS = ['glance']
def get_overrides(self, namespace=None):
self._rook_ceph = self._is_rook_ceph()
self._rook_ceph = is_rook_ceph_backend_available()
overrides = {
common.HELM_NS_OPENSTACK: {
@ -53,17 +48,11 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
# are not necessarily the same. Therefore, the ceph client image must be
# dynamically configured based on the ceph backend currently deployed.
if is_rook_ceph_backend_available():
rook_ceph_config_helper = get_image_rook_ceph()
overrides[common.HELM_NS_OPENSTACK] = self._update_overrides(
overrides[common.HELM_NS_OPENSTACK],
{
'images': {
'tags': {
'glance_storage_init': rook_ceph_config_helper,
}
}
}
)
overrides[common.HELM_NS_OPENSTACK] =\
self._update_image_tag_overrides(
overrides[common.HELM_NS_OPENSTACK],
['glance_storage_init'],
get_image_rook_ceph())
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
@ -141,18 +130,16 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
rbd_store_pool = ""
rbd_store_user = ""
replication = 1
elif self._rook_ceph:
rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
rbd_store_user = RBD_STORE_USER
replication = 2
if utils.is_aio_simplex_system(self.dbapi):
replication = 1
else:
rbd_store_pool = app_constants.CEPH_POOL_IMAGES_NAME
rbd_store_user = RBD_STORE_USER
replication, min_replication = \
StorageBackendConfig.get_ceph_pool_replication(self.dbapi)
rbd_store_user = app_constants.CEPH_RBD_POOL_USER_GLANCE
target = constants.SB_TYPE_CEPH_ROOK if self._rook_ceph\
else constants.SB_TYPE_CEPH
backend = StorageBackendConfig.get_configured_backend(self.dbapi,
target)
replication, _ = StorageBackendConfig.get_ceph_pool_replication(
api=self.dbapi,
ceph_backend=backend)
if not self._rook_ceph:
# Only the primary Ceph tier is used for the glance images pool
@ -162,7 +149,7 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
else:
rule_name = "storage_tier_ruleset"
rule_name = app_constants.CEPH_ROOK_POLL_CRUSH_RULE
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())

View File

@ -44,17 +44,11 @@ class GnocchiHelm(openstack.OpenstackBaseHelm):
# are not necessarily the same. Therefore, the ceph client image must be
# dynamically configured based on the ceph backend currently deployed.
if is_rook_ceph_backend_available():
rook_ceph_config_helper = get_image_rook_ceph()
overrides[common.HELM_NS_OPENSTACK] = self._update_overrides(
overrides[common.HELM_NS_OPENSTACK],
{
'images': {
'tags': {
'gnocchi_storage_init': rook_ceph_config_helper,
}
}
}
)
overrides[common.HELM_NS_OPENSTACK] =\
self._update_image_tag_overrides(
overrides[common.HELM_NS_OPENSTACK],
['gnocchi_storage_init'],
get_image_rook_ceph())
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]

View File

@ -53,17 +53,11 @@ class LibvirtHelm(openstack.OpenstackBaseHelm):
# are not necessarily the same. Therefore, the ceph client image must be
# dynamically configured based on the ceph backend currently deployed.
if is_rook_ceph_backend_available():
rook_ceph_config_helper = get_image_rook_ceph()
overrides[common.HELM_NS_OPENSTACK] = self._update_overrides(
overrides[common.HELM_NS_OPENSTACK],
{
'images': {
'tags': {
'ceph_config_helper': rook_ceph_config_helper,
}
}
}
)
overrides[common.HELM_NS_OPENSTACK] =\
self._update_image_tag_overrides(
overrides[common.HELM_NS_OPENSTACK],
['ceph_config_helper'],
get_image_rook_ceph())
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
@ -75,7 +69,7 @@ class LibvirtHelm(openstack.OpenstackBaseHelm):
def _get_conf_overrides(self):
admin_keyring = 'null'
if self._is_rook_ceph():
if is_rook_ceph_backend_available():
admin_keyring = self._get_rook_ceph_admin_keyring()
overrides = {

View File

@ -25,11 +25,6 @@ from k8sapp_openstack.utils import is_rook_ceph_backend_available
LOG = logging.getLogger(__name__)
# Align ephemeral rbd_user with the cinder rbd_user so that the same libvirt
# secret can be used for accessing both pools. This also aligns with the
# behavior defined in nova/virt/libvirt/volume/net.py:_set_auth_config_rbd()
RBD_POOL_USER = "cinder"
DEFAULT_NOVA_PCI_ALIAS = [
{"vendor_id": constants.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"product_id": constants.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
@ -88,7 +83,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
self.rbd_config = {}
def get_overrides(self, namespace=None):
self._rook_ceph = self._is_rook_ceph()
self._rook_ceph = is_rook_ceph_backend_available()
self.labels_by_hostid = self._get_host_labels()
self.cpus_by_hostid = self._get_host_cpus()
@ -169,18 +164,11 @@ class NovaHelm(openstack.OpenstackBaseHelm):
# are not necessarily the same. Therefore, the ceph client image must be
# dynamically configured based on the ceph backend currently deployed.
if is_rook_ceph_backend_available():
rook_ceph_config_helper = get_image_rook_ceph()
overrides[common.HELM_NS_OPENSTACK] = self._update_overrides(
overrides[common.HELM_NS_OPENSTACK],
{
'images': {
'tags': {
'nova_service_cleaner': rook_ceph_config_helper,
'nova_storage_init': rook_ceph_config_helper
}
}
}
)
overrides[common.HELM_NS_OPENSTACK] =\
self._update_image_tag_overrides(
overrides[common.HELM_NS_OPENSTACK],
['nova_service_cleaner', 'nova_storage_init'],
get_image_rook_ceph())
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
@ -724,52 +712,29 @@ class NovaHelm(openstack.OpenstackBaseHelm):
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
def _get_rook_ceph_rbd_ephemeral_storage(self):
ephemeral_storage_conf = {}
ephemeral_pools = []
# Get the values for replication and min replication from the storage
# backend attributes.
replication = 2
if utils.is_aio_simplex_system(self.dbapi):
replication = 1
# Form the dictionary with the info for the ephemeral pool.
# If needed, multiple pools can be specified.
ephemeral_pool = {
'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME,
'rbd_user': RBD_POOL_USER,
'rbd_crush_rule': "storage_tier_ruleset",
'rbd_replication': replication,
'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM
}
ephemeral_pools.append(ephemeral_pool)
ephemeral_storage_conf = {
'type': 'rbd',
'rbd_pools': ephemeral_pools
}
return ephemeral_storage_conf
def _get_rbd_ephemeral_storage(self):
if self._rook_ceph:
return self._get_rook_ceph_rbd_ephemeral_storage()
ephemeral_storage_conf = {}
ephemeral_pools = []
# Get the values for replication and min replication from the storage
# backend attributes.
replication, min_replication = \
StorageBackendConfig.get_ceph_pool_replication(self.dbapi)
target = constants.SB_TYPE_CEPH_ROOK if self._rook_ceph\
else constants.SB_TYPE_CEPH
backend = StorageBackendConfig.get_configured_backend(self.dbapi,
target)
if not backend:
LOG.error("No storage backend configured")
return {}
replication, _ = StorageBackendConfig.get_ceph_pool_replication(
api=self.dbapi,
ceph_backend=backend)
# For now, the ephemeral pool will only be on the primary Ceph tier
rule_name = "{0}{1}{2}".format(
ceph_rule_name = "{0}{1}{2}".format(
constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH],
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
rook_ceph_rule_name = app_constants.CEPH_ROOK_POLL_CRUSH_RULE
rule_name = rook_ceph_rule_name if self._rook_ceph else ceph_rule_name
chunk_size = self._estimate_ceph_pool_pg_num(self.dbapi.istor_get_all())
@ -777,10 +742,15 @@ class NovaHelm(openstack.OpenstackBaseHelm):
# If needed, multiple pools can be specified.
ephemeral_pool = {
'rbd_pool_name': app_constants.CEPH_POOL_EPHEMERAL_NAME,
'rbd_user': RBD_POOL_USER,
# Align ephemeral rbd_user with the cinder rbd_user so that the same
# libvirt secret can be used for accessing both pools. This also
# aligns with the behavior defined in
# nova/virt/libvirt/volume/net.py:_set_auth_config_rbd()
'rbd_user': app_constants.CEPH_RBD_POOL_USER_CINDER,
'rbd_crush_rule': rule_name,
'rbd_replication': replication,
'rbd_chunk_size': min(chunk_size, app_constants.CEPH_POOL_EPHEMERAL_CHUNK_SIZE)
'rbd_chunk_size': min(chunk_size,
app_constants.CEPH_POOL_EPHEMERAL_CHUNK_SIZE)
}
ephemeral_pools.append(ephemeral_pool)

View File

@ -261,6 +261,28 @@ class OpenstackBaseHelm(FluxCDBaseHelm):
dictionary[key] = value
return dictionary
def _update_image_tag_overrides(self,
overrides: dict,
images: list,
tag: str):
"""Overrides the images.tags for a given list of images
Args:
overrides (dict): base overrides to be updated
images (list): list of images to be updated, as defined in the
images.tags keys of the chart values
tags (str): new image to override the values of the the given
images.tags. Must be in the standard <repo:tag> format
"""
tags_overrides = dict(zip(images, [tag] * len(images)))
images_overrides = {
'images': {
'tags': tags_overrides
}
}
overrides_updated = self._update_overrides(overrides, images_overrides)
return overrides_updated
def _get_endpoints_identity_overrides(self, service_name, users,
service_users=()):
# Returns overrides for admin and individual users
@ -557,7 +579,7 @@ class OpenstackBaseHelm(FluxCDBaseHelm):
return uefi_config
def _get_ceph_client_overrides(self):
if self._is_rook_ceph():
if app_utils.is_rook_ceph_backend_available():
return {
'user_secret_name': constants.K8S_RBD_PROV_ADMIN_SECRET_NAME,
}
@ -721,7 +743,7 @@ class OpenstackBaseHelm(FluxCDBaseHelm):
try:
kube = kubernetes.KubeOperator()
keyring = kube.kube_get_secret(constants.K8S_RBD_PROV_ADMIN_SECRET_NAME,
common.HELM_NS_STORAGE_PROVISIONER)
app_constants.HELM_NS_ROOK_CEPH)
return base64.b64decode(keyring.data['key']).decode('utf-8')
except Exception:
pass

View File

@ -14,6 +14,7 @@ from oslo_log import log as logging
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.helm import common
from sysinv.helm import lifecycle_base as base
from sysinv.helm import lifecycle_utils as lifecycle_utils
@ -23,6 +24,7 @@ from sysinv.helm.lifecycle_constants import LifecycleConstants
from k8sapp_openstack import utils as app_utils
from k8sapp_openstack.common import constants as app_constants
from k8sapp_openstack.helpers import ldap
from k8sapp_openstack.utils import is_rook_ceph_backend_available
LOG = logging.getLogger(__name__)
@ -208,29 +210,42 @@ class OpenstackAppLifecycleOperator(base.AppLifecycleOperator):
lifecycle_utils.create_local_registry_secrets(app_op, app, hook_info)
try:
kube = kubernetes.KubeOperator()
# Create openstack namespace if it doesn't exist
# Copy the latest configmap with the ceph monitor information
# required by the application into the application namespace
if app_op._kube.kube_get_config_map(
if kube.kube_get_config_map(
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
common.HELM_NS_OPENSTACK):
# Already have one. Delete it, in case it changed
app_op._kube.kube_delete_config_map(
kube.kube_delete_config_map(
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
common.HELM_NS_OPENSTACK)
# Read rbd-storage-init config map and rename it to ceph-etc
config_map_body = app_op._kube.kube_read_config_map(
self.APP_KUBESYSTEM_RESOURCE_CONFIG_MAP,
common.HELM_NS_RBD_PROVISIONER)
if is_rook_ceph_backend_available():
# Read ceph-etc config map from rooh-ceph namespace
config_map_name = self.APP_OPENSTACK_RESOURCE_CONFIG_MAP
config_map_ns = app_constants.HELM_NS_ROOK_CEPH
else:
# Read rbd-storage-init config map from kube-system namespace
config_map_name = self.APP_KUBESYSTEM_RESOURCE_CONFIG_MAP
config_map_ns = common.HELM_NS_RBD_PROVISIONER
config_map_body.metadata.resource_version = None
config_map_body.metadata.namespace = common.HELM_NS_OPENSTACK
config_map_body.metadata.name = self.APP_OPENSTACK_RESOURCE_CONFIG_MAP
config_map_body = kube.kube_read_config_map(config_map_name,
config_map_ns)
# Create configmap with correct name
app_op._kube.kube_create_config_map(
common.HELM_NS_OPENSTACK,
config_map_body)
if config_map_body:
config_map_body.metadata.resource_version = None
config_map_body.metadata.namespace = common.HELM_NS_OPENSTACK
config_map_body.metadata.name = self.APP_OPENSTACK_RESOURCE_CONFIG_MAP
# Create configmap with correct name
kube.kube_create_config_map(
common.HELM_NS_OPENSTACK,
config_map_body)
else:
raise exception.LifecycleMissingInfo(
f"Missing {self.APP_OPENSTACK_RESOURCE_CONFIG_MAP} config map")
# Perform pre apply LDAP-related actions.
self._pre_apply_ldap_actions(app)