diff --git a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/cinder.py b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/cinder.py index b1871857..6206ca63 100644 --- a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/cinder.py +++ b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/cinder.py @@ -10,6 +10,7 @@ from k8sapp_openstack.helm import openstack import tsconfig.tsconfig as tsc from sysinv.common import constants from sysinv.common import exception +from sysinv.common import utils from sysinv.common.storage_backend_conf import StorageBackendConfig from sysinv.helm import common @@ -40,6 +41,15 @@ class CinderHelm(openstack.OpenstackBaseHelm): return overrides def get_overrides(self, namespace=None): + if self._is_rook_ceph(): + cinder_override = self._get_conf_rook_cinder_overrides() + ceph_override = self._get_conf_rook_ceph_overrides() + backend_override = self._get_conf_rook_backends_overrides() + else: + cinder_override = self._get_conf_cinder_overrides() + ceph_override = self._get_conf_ceph_overrides() + backend_override = self._get_conf_backends_overrides() + overrides = { common.HELM_NS_OPENSTACK: { 'pod': { @@ -56,9 +66,9 @@ class CinderHelm(openstack.OpenstackBaseHelm): } }, 'conf': { - 'cinder': self._get_conf_cinder_overrides(), - 'ceph': self._get_conf_ceph_overrides(), - 'backends': self._get_conf_backends_overrides(), + 'cinder': cinder_override, + 'ceph': ceph_override, + 'backends': backend_override, }, 'endpoints': self._get_endpoints_overrides(), 'ceph_client': self._get_ceph_client_overrides() @@ -269,3 +279,60 @@ class CinderHelm(openstack.OpenstackBaseHelm): return self.SERVICE_TYPE + 'v2' else: return service_type + + def _get_conf_rook_cinder_overrides(self): + conf_cinder = { + 'DEFAULT': { + 'enabled_backends': 'ceph-store', + 'default_volume_type': 'ceph-store' + }, + } + + return conf_cinder + + def _get_conf_rook_ceph_overrides(self): + replication = 2 + if utils.is_aio_simplex_system(self.dbapi): + replication = 1 + + pools = { + 'cinder-volumes': { + 'app_name': 'cinder-volumes', + 'chunk_size': 8, + 'crush_rule': 'kube-rbd', + 'replication': replication, + }, + 'backup': { + 'app_name': 'cinder-volumes', + 'chunk_size': 8, + 'crush_rule': 'kube-rbd', + 'replication': replication, + }, + } + + ceph_override = { + 'admin_keyring': self._get_rook_ceph_admin_keyring(), + 'monitors': [], + 'pools': pools, + } + return ceph_override + + def _get_conf_rook_backends_overrides(self): + conf_backends = {} + + # We don't use the chart's default backends. + conf_backends['rbd1'] = { + 'volume_driver': '' + } + + conf_backends['ceph-store'] = { + 'image_volume_cache_enabled': 'True', + 'volume_backend_name': 'ceph-store', + 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver', + 'rbd_pool': 'cinder-volumes', + 'rbd_user': 'cinder', + 'rbd_ceph_conf': + (constants.CEPH_CONF_PATH + + constants.SB_TYPE_CEPH_CONF_FILENAME), + } + return conf_backends diff --git a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/glance.py b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/glance.py index dc74edea..9bf920b7 100644 --- a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/glance.py +++ b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/glance.py @@ -9,6 +9,7 @@ from k8sapp_openstack.helm import openstack from sysinv.common import constants from sysinv.common import exception +from sysinv.common import utils from sysinv.common.storage_backend_conf import StorageBackendConfig from sysinv.helm import common @@ -28,6 +29,8 @@ class GlanceHelm(openstack.OpenstackBaseHelm): AUTH_USERS = ['glance'] def get_overrides(self, namespace=None): + self._rook_ceph = self._is_rook_ceph() + overrides = { common.HELM_NS_OPENSTACK: { 'pod': self._get_pod_overrides(), @@ -90,8 +93,10 @@ class GlanceHelm(openstack.OpenstackBaseHelm): } def _get_storage_overrides(self): - ceph_backend = self._get_primary_ceph_backend() + if self._rook_ceph: + return "rbd" + ceph_backend = self._get_primary_ceph_backend() if not ceph_backend: return 'pvc' @@ -109,22 +114,32 @@ class GlanceHelm(openstack.OpenstackBaseHelm): def _get_conf_overrides(self): ceph_backend = self._get_primary_ceph_backend() - if not ceph_backend: + if not ceph_backend and not self._rook_ceph: rbd_store_pool = "" rbd_store_user = "" replication = 1 + elif self._rook_ceph: + rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME + rbd_store_user = RBD_STORE_USER + + replication = 2 + if utils.is_aio_simplex_system(self.dbapi): + replication = 1 else: rbd_store_pool = app_constants.CEPH_POOL_IMAGES_NAME rbd_store_user = RBD_STORE_USER replication, min_replication = \ StorageBackendConfig.get_ceph_pool_replication(self.dbapi) - # Only the primary Ceph tier is used for the glance images pool - rule_name = "{0}{1}{2}".format( - constants.SB_TIER_DEFAULT_NAMES[ - constants.SB_TIER_TYPE_CEPH], - constants.CEPH_CRUSH_TIER_SUFFIX, - "-ruleset").replace('-', '_') + if not self._rook_ceph: + # Only the primary Ceph tier is used for the glance images pool + rule_name = "{0}{1}{2}".format( + constants.SB_TIER_DEFAULT_NAMES[ + constants.SB_TIER_TYPE_CEPH], + constants.CEPH_CRUSH_TIER_SUFFIX, + "-ruleset").replace('-', '_') + else: + rule_name = "storage_tier_ruleset" conf = { 'glance': { @@ -145,6 +160,10 @@ class GlanceHelm(openstack.OpenstackBaseHelm): if ceph_backend: conf['ceph'] = self._get_ceph_overrides() + elif self._rook_ceph: + conf['ceph'] = { + 'admin_keyring': self._get_rook_ceph_admin_keyring() + } return conf diff --git a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/libvirt.py b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/libvirt.py index b8033536..9e7132f3 100644 --- a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/libvirt.py +++ b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/libvirt.py @@ -19,6 +19,10 @@ class LibvirtHelm(openstack.OpenstackBaseHelm): SERVICE_NAME = app_constants.HELM_CHART_LIBVIRT def get_overrides(self, namespace=None): + admin_keyring = 'null' + if self._is_rook_ceph(): + admin_keyring = self._get_rook_ceph_admin_keyring() + overrides = { common.HELM_NS_OPENSTACK: { 'conf': { @@ -31,7 +35,10 @@ class LibvirtHelm(openstack.OpenstackBaseHelm): 'cgroup_controllers': ["cpu", "cpuacct", "cpuset", "freezer", "net_cls", "perf_event"], 'namespaces': [], 'clear_emulator_capabilities': 0 - } + }, + 'ceph': { + 'admin_keyring': admin_keyring, + }, }, 'pod': { 'mounts': { @@ -39,7 +46,7 @@ class LibvirtHelm(openstack.OpenstackBaseHelm): 'libvirt': self._get_mount_uefi_overrides() } } - } + }, } } diff --git a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/nova.py b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/nova.py index 50e3f455..cc0d8a85 100644 --- a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/nova.py +++ b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/nova.py @@ -82,6 +82,11 @@ class NovaHelm(openstack.OpenstackBaseHelm): self.rbd_config = {} def get_overrides(self, namespace=None): + self._rook_ceph = self._is_rook_ceph() + admin_keyring = 'null' + if self._rook_ceph: + admin_keyring = self._get_rook_ceph_admin_keyring() + self.labels_by_hostid = self._get_host_labels() self.cpus_by_hostid = self._get_host_cpus() self.interfaces_by_hostid = self._get_host_interfaces() @@ -118,7 +123,8 @@ class NovaHelm(openstack.OpenstackBaseHelm): }, 'conf': { 'ceph': { - 'ephemeral_storage': self._get_rbd_ephemeral_storage() + 'ephemeral_storage': self._get_rbd_ephemeral_storage(), + 'admin_keyring': admin_keyring, }, 'nova': { 'libvirt': { @@ -734,7 +740,38 @@ class NovaHelm(openstack.OpenstackBaseHelm): def get_region_name(self): return self._get_service_region_name(self.SERVICE_NAME) + def _get_rook_ceph_rbd_ephemeral_storage(self): + ephemeral_storage_conf = {} + ephemeral_pools = [] + + # Get the values for replication and min replication from the storage + # backend attributes. + replication = 2 + if utils.is_aio_simplex_system(self.dbapi): + replication = 1 + + # Form the dictionary with the info for the ephemeral pool. + # If needed, multiple pools can be specified. + ephemeral_pool = { + 'rbd_pool_name': constants.CEPH_POOL_EPHEMERAL_NAME, + 'rbd_user': RBD_POOL_USER, + 'rbd_crush_rule': "storage_tier_ruleset", + 'rbd_replication': replication, + 'rbd_chunk_size': constants.CEPH_POOL_EPHEMERAL_PG_NUM + } + ephemeral_pools.append(ephemeral_pool) + + ephemeral_storage_conf = { + 'type': 'rbd', + 'rbd_pools': ephemeral_pools + } + + return ephemeral_storage_conf + def _get_rbd_ephemeral_storage(self): + if self._rook_ceph: + return self._get_rook_ceph_rbd_ephemeral_storage() + ephemeral_storage_conf = {} ephemeral_pools = [] diff --git a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/openstack.py b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/openstack.py index 40b70a6c..02770224 100644 --- a/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/openstack.py +++ b/python-k8sapp-openstack/k8sapp_openstack/k8sapp_openstack/helm/openstack.py @@ -18,6 +18,7 @@ from sqlalchemy.orm.exc import NoResultFound from sysinv.common import constants from sysinv.common import exception +from sysinv.common import kubernetes from sysinv.common.storage_backend_conf import K8RbdProvisioner from sysinv.helm import base from sysinv.helm import common @@ -491,6 +492,10 @@ class OpenstackBaseHelm(base.BaseHelm): return uefi_config def _get_ceph_client_overrides(self): + if self._is_rook_ceph(): + return { + 'user_secret_name': constants.K8S_RBD_PROV_ADMIN_SECRET_NAME, + } # A secret is required by the chart for ceph client access. Use the # secret for the kube-rbd pool associated with the primary ceph tier return { @@ -578,3 +583,27 @@ class OpenstackBaseHelm(base.BaseHelm): """ return super(OpenstackBaseHelm, self)._is_enabled( app_name, chart_name, namespace) + + def _is_rook_ceph(self): + try: + # check function getLabels in rook/pkg/operator/ceph/cluster/mon/spec.go + # rook will assign label "mon_cluster=kube-system" to monitor pods + label = "mon_cluster=" + common.HELM_NS_STORAGE_PROVISIONER + kube = kubernetes.KubeOperator() + pods = kube.kube_get_pods_by_selector(common.HELM_NS_STORAGE_PROVISIONER, label, "") + if len(pods) > 0: + return True + except Exception: + pass + return False + + def _get_rook_ceph_admin_keyring(self): + try: + kube = kubernetes.KubeOperator() + keyring = kube.kube_get_secret(constants.K8S_RBD_PROV_ADMIN_SECRET_NAME, + common.HELM_NS_STORAGE_PROVISIONER) + return keyring.data['key'].decode('base64', 'strict') + except Exception: + pass + + return 'null'