From 086a81deea855d51c65d70cc2288a266d671b9b4 Mon Sep 17 00:00:00 2001 From: Robert Church Date: Tue, 1 Dec 2020 21:43:12 -0500 Subject: [PATCH 01/15] Enable Zuul tox jobs for platform plugin validation NOTE: As part of enabling this, --use-deprecated legacy-resolver was required. This is a result of a new version of pip installed on Dec 3, 2020. A followup commit is required to remove this a deal with the fallout. Change-Id: I29d9484f1f3ffbb7b949c5d4177f95e7a60a99f4 Story: 2008162 Task: 41347 Related-Bug: #1907125 Signed-off-by: Robert Church --- .zuul.yaml | 82 +++++++++++++++++++ .../k8sapp_platform/tox.ini | 5 +- tox.ini | 12 +++ 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index ba935dd..3651b37 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -3,13 +3,95 @@ check: jobs: - openstack-tox-linters + - k8sapp-platform-tox-py27 + - k8sapp-platform-tox-py36 + - k8sapp-platform-tox-flake8 + - k8sapp-platform-tox-pylint + - k8sapp-platform-tox-bandit gate: jobs: - openstack-tox-linters + - k8sapp-platform-tox-py27 + - k8sapp-platform-tox-py36 + - k8sapp-platform-tox-flake8 + - k8sapp-platform-tox-pylint + - k8sapp-platform-tox-bandit post: jobs: - stx-platform-armada-app-upload-git-mirror +- job: + name: k8sapp-platform-tox-py27 + parent: tox + description: | + Run py27 test for k8sapp_platform + nodeset: ubuntu-xenial + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + files: + - python-k8sapp-platform/* + vars: + tox_envlist: py27 + tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini + +- job: + name: k8sapp-platform-tox-py36 + parent: tox + description: | + Run py36 test for k8sapp_platform + nodeset: ubuntu-bionic + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + files: + - python-k8sapp-platform/* + vars: + tox_envlist: py36 + tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini + +- job: + name: k8sapp-platform-tox-flake8 + parent: tox + description: | + Run flake8 test for k8sapp_platform + files: + - python-k8sapp-platform/* + vars: + tox_envlist: flake8 + tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini + +- job: + name: k8sapp-platform-tox-pylint + parent: tox + description: | + Run pylint test for k8sapp_platform + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + files: + - python-k8sapp-platform/* + vars: + tox_envlist: pylint + tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini + +- job: + name: k8sapp-platform-tox-bandit + parent: tox + description: | + Run bandit test for k8sapp_platform + files: + - python-k8sapp-platform/* + vars: + tox_envlist: bandit + tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini + - job: name: stx-platform-armada-app-upload-git-mirror parent: upload-git-mirror diff --git a/python-k8sapp-platform/k8sapp_platform/tox.ini b/python-k8sapp-platform/k8sapp_platform/tox.ini index 1f8e947..6881f9d 100644 --- a/python-k8sapp-platform/k8sapp_platform/tox.ini +++ b/python-k8sapp-platform/k8sapp_platform/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = flake8,py27,py36,pylint +envlist = flake8,py27,py36,pylint,bandit minversion = 1.6 # skipsdist = True #,pip-missing-reqs @@ -21,7 +21,7 @@ sitepackages = True whitelist_externals = bash find -install_command = pip install \ +install_command = pip install --use-deprecated legacy-resolver \ -v -v -v \ -c{toxinidir}/upper-constraints.txt \ -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \ @@ -43,7 +43,6 @@ setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt - -e{[tox]stxdir}/config/controllerconfig/controllerconfig -e{[tox]stxdir}/config/sysinv/sysinv/sysinv -e{[tox]stxdir}/config/tsconfig/tsconfig -e{[tox]stxdir}/fault/fm-api diff --git a/tox.ini b/tox.ini index cede375..c1ff2ef 100644 --- a/tox.ini +++ b/tox.ini @@ -33,3 +33,15 @@ commands = [testenv:linters] commands = {[testenv:bashate]commands} + +[testenv:flake8] +basepython = python3 +description = Dummy environment to allow flake8 to be run in subdir tox + +[testenv:pylint] +basepython = python3 +description = Dummy environment to allow pylint to be run in subdir tox + +[testenv:bandit] +basepython = python3 +description = Dummy environment to allow bandit to be run in subdir tox From 3466dd67da245ebc678aa75c3c95c6c90257326e Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Mon, 19 Oct 2020 10:01:42 +0000 Subject: [PATCH 02/15] CephFS provisioner for k8sapp_platform This commit implements CephFS Provisioner as a helm chart in platform-integ-apps. CephFS Provisioner implements the same design as RBD Provisioner, but with CephFS Provisioner we can have mounts shared by differents components, such as pods. It is activated automatically same as RBD Provisioner. Signed-off-by: Daniel Safta Story: 2008162 Task: 40908 Change-Id: Ic4270e401b2c3e51c3aecfab23af1e874e733831 --- .../armada/manifest_platform.py | 7 +- .../k8sapp_platform/common/constants.py | 23 +++ .../helm/ceph_fs_provisioner.py | 191 ++++++++++++++++++ .../k8sapp_platform/setup.cfg | 1 + .../centos/stx-platform-helm.spec | 1 + .../helm-charts/cephfs-provisioner/Chart.yaml | 11 + .../cephfs-provisioner/requirements.yaml | 9 + .../templates/bin/_storage_init.sh.tpl | 86 ++++++++ .../templates/conf/_ceph-conf.tpl | 19 ++ .../templates/config-provisioner.yaml | 88 ++++++++ .../templates/provisioner.yaml | 67 ++++++ .../templates/rbac-secrets.yaml | 149 ++++++++++++++ .../templates/storageclass.yaml | 30 +++ .../cephfs-provisioner/values.yaml | 142 +++++++++++++ .../stx-platform-helm/manifests/manifest.yaml | 30 +++ 15 files changed, 852 insertions(+), 2 deletions(-) create mode 100644 python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/Chart.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/requirements.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/bin/_storage_init.sh.tpl create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/conf/_ceph-conf.tpl create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/storageclass.yaml create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/armada/manifest_platform.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/armada/manifest_platform.py index efdc87e..624f8f9 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/armada/manifest_platform.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/armada/manifest_platform.py @@ -10,6 +10,7 @@ from k8sapp_platform.helm.ceph_pools_audit import CephPoolsAuditHelm from k8sapp_platform.helm.rbd_provisioner import RbdProvisionerHelm +from k8sapp_platform.helm.ceph_fs_provisioner import CephFSProvisionerHelm from sysinv.common import constants from sysinv.helm import manifest_base as base @@ -23,12 +24,14 @@ class PlatformArmadaManifestOperator(base.ArmadaManifestOperator): CHART_GROUP_CEPH = 'starlingx-ceph-charts' CHART_GROUPS_LUT = { CephPoolsAuditHelm.CHART: CHART_GROUP_CEPH, - RbdProvisionerHelm.CHART: CHART_GROUP_CEPH + RbdProvisionerHelm.CHART: CHART_GROUP_CEPH, + CephFSProvisionerHelm.CHART: CHART_GROUP_CEPH } CHARTS_LUT = { CephPoolsAuditHelm.CHART: 'kube-system-ceph-pools-audit', - RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner' + RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner', + CephFSProvisionerHelm.CHART: 'kube-system-cephfs-provisioner' } def platform_mode_manifest_updates(self, dbapi, mode): diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py index 04f5342..edd1ce9 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py @@ -6,6 +6,29 @@ # Helm: Supported charts: # These values match the names in the chart package's Chart.yaml +from sysinv.helm import common + HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner' HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit' HELM_CHART_HELM_TOOLKIT = 'helm-toolkit' +HELM_CHART_CEPH_FS_PROVISIONER = 'cephfs-provisioner' +HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM + +HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT = '/pvc-volumes' +HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs' +K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME = 'cephfs' +K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin' +K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system' +K8S_CEPHFS_PROVISIONER_USER_NAME = 'admin' + +K8S_CEPHFS_PROVISIONER_DEFAULT_NAMESPACE = 'kube-system' +K8S_CEPHFS_PROVISIONER_RBAC_CONFIG_NAME = 'cephfs-provisioner-keyring' + +# CephFS Provisioner backend +K8S_CEPHFS_PROV_STORAGECLASS_NAME = 'cephfs_storageclass_name' # Customer +K8S_CEPHFS_PROV_STOR_CLASS_NAME = 'cephfs' + +# Ceph FS constants for pools and fs +CEPHFS_DATA_POOL_KUBE_NAME = 'kube-cephfs-data' +CEPHFS_METADATA_POOL_KUBE_NAME = 'kube-cephfs-metadata' +CEPHFS_FS_KUBE_NAME = 'kube-cephfs' diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py new file mode 100644 index 0000000..c93ea75 --- /dev/null +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py @@ -0,0 +1,191 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from k8sapp_platform.common import constants as app_constants + +from sysinv.common import constants +from sysinv.common import exception + +from sysinv.helm import base + + +class K8CephFSProvisioner(object): + """ Utility methods for getting the k8 overrides for internal ceph + from a corresponding storage backend. + """ + + @staticmethod + def get_storage_class_name(bk): + """ Get the name of the storage class for an rbd provisioner + :param bk: Ceph storage backend object + :returns: name of the rbd provisioner + """ + if bk['capabilities'].get(app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME): + name = bk['capabilities'][app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME] + elif bk.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + name = app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME + else: + name = bk.name + '-' + app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME + + return str(name) + + @staticmethod + def get_data_pool(bk): + """ Get the name of the ceph pool for an rbd provisioner + This naming convention is valid only for internal backends + :param bk: Ceph storage backend object + :returns: name of the rbd provisioner + """ + if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + return app_constants.CEPHFS_DATA_POOL_KUBE_NAME + else: + return str(app_constants.CEPHFS_DATA_POOL_KUBE_NAME + '-' + bk['name']) + + @staticmethod + def get_metadata_pool(bk): + """ Get the name of the ceph pool for an rbd provisioner + This naming convention is valid only for internal backends + :param bk: Ceph storage backend object + :returns: name of the rbd provisioner + """ + if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + return app_constants.CEPHFS_METADATA_POOL_KUBE_NAME + else: + return str(app_constants.CEPHFS_METADATA_POOL_KUBE_NAME + '-' + bk['name']) + + @staticmethod + def get_fs(bk): + """ Get the name of the ceph pool for an rbd provisioner + This naming convention is valid only for internal backends + :param bk: Ceph storage backend object + :returns: name of the rbd provisioner + """ + if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + return app_constants.CEPHFS_FS_KUBE_NAME + else: + return str(app_constants.CEPHFS_FS_KUBE_NAME + '-' + bk['name']) + + @staticmethod + def get_user_id(bk): + """ Get the non admin user name for an cephfs provisioner secret + :param bk: Ceph storage backend object + :returns: name of the cephfs provisioner + """ + if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + name = K8CephFSProvisioner.get_data_pool(bk) + else: + name = K8CephFSProvisioner.get_data_pool(bk) + + prefix = 'ceph-pool' + return str(prefix + '-' + name) + + @staticmethod + def get_user_secret_name(bk): + """ Get the name for the non admin secret key of a pool + :param bk: Ceph storage backend object + :returns: name of k8 secret + """ + if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]: + name = K8CephFSProvisioner.get_data_pool(bk) + else: + name = K8CephFSProvisioner.get_data_pool(bk) + + base_name = 'ceph-pool' + return str(base_name + '-' + name) + + +class CephFSProvisionerHelm(base.BaseHelm): + """Class to encapsulate helm operations for the cephfs-provisioner chart""" + + CHART = app_constants.HELM_CHART_CEPH_FS_PROVISIONER + SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \ + [app_constants.HELM_NS_CEPH_FS_PROVISIONER] + SUPPORTED_APP_NAMESPACES = { + constants.HELM_APP_PLATFORM: + base.BaseHelm.SUPPORTED_NAMESPACES + [app_constants.HELM_NS_CEPH_FS_PROVISIONER], + } + + SERVICE_NAME = app_constants.HELM_CHART_CEPH_FS_PROVISIONER + SERVICE_PORT_MON = 6789 + + def execute_manifest_updates(self, operator): + # On application load this chart is enabled. Only disable if specified + # by the user + if not self._is_enabled(operator.APP, self.CHART, + app_constants.HELM_NS_CEPH_FS_PROVISIONER): + operator.chart_group_chart_delete( + operator.CHART_GROUPS_LUT[self.CHART], + operator.CHARTS_LUT[self.CHART]) + + def get_overrides(self, namespace=None): + + backends = self.dbapi.storage_backend_get_list() + ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH] + + if not ceph_bks: + return {} # ceph is not configured + + def _skip_ceph_mon_2(name): + return name != constants.CEPH_MON_2 + + classdefaults = { + "monitors": self._get_formatted_ceph_monitor_ips( + name_filter=_skip_ceph_mon_2), + "adminId": app_constants.K8S_CEPHFS_PROVISIONER_USER_NAME, + "adminSecretName": app_constants.K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME + } + + # Get tier info. + tiers = self.dbapi.storage_tier_get_list() + + classes = [] + for bk in ceph_bks: + # Get the ruleset for the new kube-cephfs pools. + tier = next((t for t in tiers if t.forbackendid == bk.id), None) + if not tier: + raise Exception("No tier present for backend %s" % bk.name) + + rule_name = "{0}{1}{2}".format( + tier.name, + constants.CEPH_CRUSH_TIER_SUFFIX, + "-ruleset").replace('-', '_') + + cls = { + "name": K8CephFSProvisioner.get_storage_class_name(bk), + "data_pool_name": K8CephFSProvisioner.get_data_pool(bk), + "metadata_pool_name": K8CephFSProvisioner.get_metadata_pool(bk), + "fs_name": K8CephFSProvisioner.get_fs(bk), + "replication": int(bk.capabilities.get("replication")), + "crush_rule_name": rule_name, + "chunk_size": 64, + "userId": K8CephFSProvisioner.get_user_id(bk), + "userSecretName": K8CephFSProvisioner.get_user_secret_name(bk), + "claim_root": app_constants.HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT, + "additionalNamespaces": ['default', 'kube-public'] + } + + classes.append(cls) + + global_settings = { + "replicas": self._num_provisioned_controllers(), + "defaultStorageClass": app_constants.K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME + } + + overrides = { + app_constants.HELM_NS_CEPH_FS_PROVISIONER: { + "classdefaults": classdefaults, + "classes": classes, + "global": global_settings + } + } + + if namespace in self.SUPPORTED_NAMESPACES: + return overrides[namespace] + elif namespace: + raise exception.InvalidHelmNamespace(chart=self.CHART, + namespace=namespace) + else: + return overrides diff --git a/python-k8sapp-platform/k8sapp_platform/setup.cfg b/python-k8sapp-platform/k8sapp_platform/setup.cfg index 29ed9de..95ced37 100644 --- a/python-k8sapp-platform/k8sapp_platform/setup.cfg +++ b/python-k8sapp-platform/k8sapp_platform/setup.cfg @@ -36,6 +36,7 @@ systemconfig.helm_plugins.platform_integ_apps = 001_helm-toolkit = k8sapp_platform.helm.helm_toolkit:HelmToolkitHelm 002_rbd-provisioner = k8sapp_platform.helm.rbd_provisioner:RbdProvisionerHelm 003_ceph-pools-audit = k8sapp_platform.helm.ceph_pools_audit:CephPoolsAuditHelm + 004_cephfs-provisioner = k8sapp_platform.helm.ceph_fs_provisioner:CephFSProvisionerHelm systemconfig.armada.manifest_ops = platform-integ-apps = k8sapp_platform.armada.manifest_platform:PlatformArmadaManifestOperator diff --git a/stx-platform-helm/centos/stx-platform-helm.spec b/stx-platform-helm/centos/stx-platform-helm.spec index dd816ca..38f0f48 100644 --- a/stx-platform-helm/centos/stx-platform-helm.spec +++ b/stx-platform-helm/centos/stx-platform-helm.spec @@ -47,6 +47,7 @@ helm repo add local http://localhost:8879/charts cd helm-charts make rbd-provisioner make ceph-pools-audit +make cephfs-provisioner # TODO (rchurch): remove make node-feature-discovery cd - diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/Chart.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/Chart.yaml new file mode 100644 index 0000000..320b492 --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/Chart.yaml @@ -0,0 +1,11 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +apiVersion: v1 +appVersion: "1.0" +description: CephFS provisioner for Kubernetes +name: cephfs-provisioner +version: 0.1.0 diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/requirements.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/requirements.yaml new file mode 100644 index 0000000..0674954 --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/requirements.yaml @@ -0,0 +1,9 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +dependencies: + - name: helm-toolkit + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/bin/_storage_init.sh.tpl b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/bin/_storage_init.sh.tpl new file mode 100644 index 0000000..39da34a --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/bin/_storage_init.sh.tpl @@ -0,0 +1,86 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +#! /bin/bash +set -x + +{{ $classes := .Values.classes}} + +touch /etc/ceph/ceph.client.admin.keyring + +# Check if ceph is accessible +echo "====================================" +ceph -s +if [ $? -ne 0 ]; then + echo "Error: Ceph cluster is not accessible, check Pod logs for details." + exit 1 +fi + +set -ex +KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${DATA_POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p') +# Set up pool key in Ceph format +CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring +echo $KEYRING >$CEPH_USER_KEYRING +set +ex + +if [ -n "${CEPH_USER_SECRET}" ]; then + kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null + if [ $? -ne 0 ]; then + echo "Create ${CEPH_USER_SECRET} secret" + kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING + if [ $? -ne 0 ]; then + echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit" + exit 1 + fi + else + echo "Secret ${CEPH_USER_SECRET} already exists" + fi + + # Support creating namespaces and Ceph user secrets for additional + # namespaces other than that which the provisioner is installed. This + # allows the provisioner to set up and provide PVs for multiple + # applications across many namespaces. + if [ -n "${ADDITIONAL_NAMESPACES}" ]; then + for ns in $( + IFS=, + echo ${ADDITIONAL_NAMESPACES} + ); do + kubectl get namespace $ns 2>/dev/null + if [ $? -ne 0 ]; then + kubectl create namespace $ns + if [ $? -ne 0 ]; then + echo "Error creating namespace $ns, exit" + continue + fi + fi + + kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null + if [ $? -ne 0 ]; then + echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns" + kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING + if [ $? -ne 0 ]; then + echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit" + fi + else + echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists" + fi + done + fi +fi + +ceph osd pool stats ${DATA_POOL_NAME} || ceph osd pool create ${DATA_POOL_NAME} ${CHUNK_SIZE} +ceph osd pool application enable ${DATA_POOL_NAME} cephfs +ceph osd pool set ${DATA_POOL_NAME} size ${POOL_REPLICATION} +ceph osd pool set ${DATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME} + +ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE} +ceph osd pool application enable ${METADATA_POOL_NAME} cephfs +ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION} +ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME} + +ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME} + +ceph -s diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/conf/_ceph-conf.tpl b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/conf/_ceph-conf.tpl new file mode 100644 index 0000000..a6024ef --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/conf/_ceph-conf.tpl @@ -0,0 +1,19 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +[global] + # For version 0.55 and beyond, you must explicitly enable + # or disable authentication with "auth" entries in [global]. + auth_cluster_required = none + auth_service_required = none + auth_client_required = none + +{{ $defaults := .Values.classdefaults}} + +{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}} +[mon.{{- $index }}] +mon_addr = {{ $element }} +{{- end }} diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml new file mode 100644 index 0000000..8646da3 --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml @@ -0,0 +1,88 @@ +{{/* +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +*/}} + +{{- $defaults := .Values.classdefaults }} +{{- $cephfs_provisioner_storage_init := .Values.images.tags.cephfs_provisioner_storage_init }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ceph-config-file + namespace: {{ $defaults.cephFSNamespace }} +data: + ceph.conf: | +{{ tuple "conf/_ceph-conf.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cephfs-storage-init + namespace: {{ $defaults.cephFSNamespace }} +data: + storage-init.sh: | +{{ tuple "bin/_storage_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: cephfs-storage-init + namespace: {{ $defaults.cephFSNamespace }} +spec: + template: + spec: + serviceAccountName: {{ $defaults.rbacConfigName }} + volumes: + - name: cephfs-storage-init + configMap: + name: cephfs-storage-init + defaultMode: 0555 + - name: ceph-config + configMap: + name: ceph-config-file + defaultMode: 0555 + containers: + {{- range $classConfig := .Values.classes }} + - name: storage-init-{{- $classConfig.name }} + image: {{ $cephfs_provisioner_storage_init | quote }} + command: ["/bin/bash", "/tmp/storage-init.sh"] + env: + - name: NAMESPACE + value: {{ $defaults.cephFSNamespace }} + - name: ADDITIONAL_NAMESPACES + value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }} + - name: CEPH_USER_SECRET + value: {{ $defaults.adminSecretName }} + - name: USER_ID + value: {{ $classConfig.userId }} + - name: DATA_POOL_NAME + value: {{ $classConfig.data_pool_name }} + - name: METADATA_POOL_NAME + value: {{ $classConfig.metadata_pool_name }} + - name: FS_NAME + value: {{ $classConfig.fs_name }} + - name: CHUNK_SIZE + value: {{ $classConfig.chunk_size | quote }} + - name: POOL_REPLICATION + value: {{ $classConfig.replication | quote }} + - name: POOL_CRUSH_RULE_NAME + value: {{ $classConfig.crush_rule_name | quote }} + volumeMounts: + - name: cephfs-storage-init + mountPath: /tmp/storage-init.sh + subPath: storage-init.sh + readOnly: true + - name: ceph-config + mountPath: /etc/ceph/ceph.conf + subPath: ceph.conf + readOnly: true + {{- end }} + restartPolicy: Never + backoffLimit: 4 diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml new file mode 100644 index 0000000..a7ccbcd --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml @@ -0,0 +1,67 @@ +{{/* +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +*/}} + +{{- $defaults := .Values.classdefaults }} +{{- $cephfs_provisioner_image := .Values.images.tags.cephfs_provisioner }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: {{ $defaults.provisionerConfigName }} + template: + metadata: + labels: + app: {{ $defaults.provisionerConfigName }} + spec: + {{- if (.Values.global.rbac) or (.Values.global.reuseRbac)}} + serviceAccount: {{ .Values.rbac.serviceAccount }} + {{- end }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ .Values.global.name }} + topologyKey: kubernetes.io/hostname + containers: + - name: {{ $defaults.provisionerConfigName }} + image: {{ $cephfs_provisioner_image | quote }} + env: + - name: PROVISIONER_NAME + value: {{ $defaults.provisionerName }} + - name: PROVISIONER_SECRET_NAMESPACE + value: {{ $defaults.cephFSNamespace }} + command: + - "/usr/local/bin/{{ $defaults.provisionerConfigName }}" + args: + - "-id={{ $defaults.provisionerConfigName }}-1" + serviceAccount: {{ $defaults.provisionerConfigName }} +{{- if .Values.global.nodeSelector }} + nodeSelector: +{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }} +{{- end }} +{{- if .Values.global.tolerations }} + tolerations: +{{ .Values.global.tolerations | toYaml | trim | indent 8 }} +{{- end}} +{{- if .Values.global.resources }} + resources: +{{ .Values.global.resources | toYaml | trim | indent 8 }} +{{- end }} diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml new file mode 100644 index 0000000..358a28d --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml @@ -0,0 +1,149 @@ +{{/* +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +*/}} + +{{- $defaults := .Values.classdefaults }} + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $defaults.rbacConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "create", "list", "update"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $defaults.rbacConfigName }} +subjects: + - kind: ServiceAccount + name: {{ $defaults.rbacConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +roleRef: + kind: ClusterRole + name: {{ $defaults.rbacConfigName }} + apiGroup: rbac.authorization.k8s.io +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $defaults.rbacConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "delete"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "create", "list", "update"] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $defaults.rbacConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $defaults.rbacConfigName }} +subjects: +- kind: ServiceAccount + name: {{ $defaults.rbacConfigName }} +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $defaults.rbacConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + resourceNames: ["kube-dns","coredns"] + verbs: ["list", "get"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $defaults.provisionerConfigName }} +subjects: + - kind: ServiceAccount + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +roleRef: + kind: ClusterRole + name: {{ $defaults.provisionerConfigName }} + apiGroup: rbac.authorization.k8s.io +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "delete"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $defaults.provisionerConfigName }} +subjects: +- kind: ServiceAccount + name: {{ $defaults.provisionerConfigName }} +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $defaults.provisionerConfigName }} + namespace: {{ $defaults.cephFSNamespace }} +imagePullSecrets: + - name: default-registry-key \ No newline at end of file diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/storageclass.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/storageclass.yaml new file mode 100644 index 0000000..0e474a5 --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/storageclass.yaml @@ -0,0 +1,30 @@ +{{/* +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +*/}} + +{{ $defaults := .Values.classdefaults }} +{{ $provisioner := .Values.global.provisioner_name }} +{{ $defaultSC := .Values.global.defaultStorageClass }} +{{- range $classConfig := .Values.classes }} + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + {{- if eq $defaultSC $classConfig.name}} + annotations: + "storageclass.kubernetes.io/is-default-class": "true" + {{- end }} + name: {{ $classConfig.name }} +provisioner: {{ $provisioner }} +parameters: + monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}" + adminId: {{ or $classConfig.adminId $defaults.adminId }} + adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }} + adminSecretNamespace: {{ or $classConfig.adminSecretNamespace $defaults.adminSecretNamespace }} + claimRoot: {{ $classConfig.claim_root }} +--- +{{- end }} \ No newline at end of file diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml new file mode 100644 index 0000000..7e1b87a --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml @@ -0,0 +1,142 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# Global options. +# Defaults should be fine in most cases. +global: + # + # Defines the application name of the provisioner. + # + name: "cephfs-provisioner" + # + # Defines the name of the provisioner associated with a set of storage classes + # + provisioner_name: "ceph.com/cephfs" + # + # If configured, tolerations will add a toleration field to the Pod. + # + # Node tolerations for cephfs-provisioner scheduling to nodes with taints. + # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # Example: + # [ + # { + # "key": "node-role.kubernetes.io/master", + # "operator": "Exists" + # } + # ] + # + tolerations: [] + # If configured, resources will set the requests/limits field to the Pod. + # Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + # Example: + # { + # "limits": { + # "memory": "200Mi" + # }, + # "requests": { + # "cpu": "100m", + # "memory": "200Mi" + # } + # } + resources: {} + # + # Number of replicas to start when configured as deployment + # + replicas: 1 + # + # Node Selector + # + nodeSelector: { node-role.kubernetes.io/master: "" } +# +# RBAC options. +# Defaults should be fine in most cases. +rbac: + # + # Cluster Role name + # + clusterRole: cephfs-provisioner + # + # Cluster Role Binding name + # + clusterRoleBinding: cephfs-provisioner + # + # Role name + # + role: cephfs-provisioner + # + # Role Binding name + # + roleBinding: cephfs-provisioner + # + # Defines a name of the service account which Provisioner will use to communicate with API server. + # + serviceAccount: cephfs-provisioner + +# +# Configure storage classes. +# Defaults for storage classes. Update this if you have a single Ceph storage cluster. +# No need to add them to each class. +# +classdefaults: + # Ceph admin account + adminId: admin + # K8 secret name for the admin context + adminSecretName: ceph-secret-admin + adminSecretNamespace: kube-system + cephFSNamespace: kube-system + # Define ip addresses of Ceph Monitors + monitors: + - 192.168.204.2:6789 + provisionerConfigName: cephfs-provisioner + provisionerName: ceph.com/cephfs + rbacConfigName: cephfs-provisioner-keyring + +# Configure storage classes. +# This section should be tailored to your setup. It allows you to define multiple storage +# classes for the same cluster (e.g. if you have tiers of drives with different speeds). +# If you have multiple Ceph clusters take attributes from classdefaults and add them here. +classes: +- name: fast-cephfs # Name of storage class. + # Ceph pools name + data_pool_name: kube-cephfs-data + metadata_pool_name: kube-cephfs-metadata + # CephFS name + fs_name: kube-cephfs + # Ceph user name to access this pool + userId: ceph-pool-kube-cephfs-data + # K8 secret name with key for accessing the Ceph pool + userSecretName: ceph-pool-kube-cephfs-data + # Pool replication + replication: 1 + # Pool crush rule name + crush_rule_name: storage_tier_ruleset + # Pool chunk size / PG_NUM + chunk_size: 64 + # Additional namespace to allow storage class access (other than where + # installed) + claim_root: "/pvc-volumes" + additionalNamespaces: + - default + - kube-public + +# Defines: +# - Provisioner's image name including container registry. +# - CEPH helper image +# +images: + tags: + cephfs_provisioner: quay.io/external_storage/cephfs-provisioner:v2.1.0-k8s1.11 + cephfs_provisioner_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0 + pull_policy: "IfNotPresent" + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync +manifests: + configmap_bin: true + diff --git a/stx-platform-helm/stx-platform-helm/manifests/manifest.yaml b/stx-platform-helm/stx-platform-helm/manifests/manifest.yaml index 0efc230..576b41a 100644 --- a/stx-platform-helm/stx-platform-helm/manifests/manifest.yaml +++ b/stx-platform-helm/stx-platform-helm/manifests/manifest.yaml @@ -45,6 +45,35 @@ data: - helm-toolkit --- schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: kube-system-cephfs-provisioner +data: + chart_name: cephfs-provisioner + release: cephfs-provisioner + namespace: kube-system + wait: + timeout: 1800 + labels: + app: cephfs-provisioner + install: + no_hooks: false + upgrade: + no_hooks: false + pre: + delete: + - type: job + labels: + app: cephfs-provisioner + source: + type: tar + location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz + subpath: cephfs-provisioner + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: kube-system-ceph-pools-audit @@ -83,6 +112,7 @@ data: chart_group: - kube-system-rbd-provisioner - kube-system-ceph-pools-audit + - kube-system-cephfs-provisioner --- schema: armada/Manifest/v1 metadata: From e2f6893ed0855b4f6b061ab0c7501d285d52c2f6 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Fri, 18 Dec 2020 14:34:51 +0000 Subject: [PATCH 03/15] Fixed missing key in cephfs-provisioner chart A previous update broke the build of stx-platform-helm, due to the missing defaultStorageClass key. This update defines this value in the cephfs-provisioner chart to fix the issue. This commit address a build issue with https://review.opendev.org/c/starlingx/platform-armada-app/+/758786 Signed-off-by: Daniel Safta Story: 2008162 Task: 40908 Change-Id: I0e70a5b504f8240e8d661415b27f6a86875c17dd --- .../helm-charts/cephfs-provisioner/values.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml index 7e1b87a..bc8f0f5 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml @@ -17,6 +17,10 @@ global: # provisioner_name: "ceph.com/cephfs" # + # Enable this storage class as the system default storage class + # + defaultStorageClass: fast-cephfs-disabled + # # If configured, tolerations will add a toleration field to the Pod. # # Node tolerations for cephfs-provisioner scheduling to nodes with taints. From a7681c3f3246830cc4c6fb58d3a9da0b9777e4cf Mon Sep 17 00:00:00 2001 From: Don Penney Date: Wed, 6 Jan 2021 14:36:19 -0500 Subject: [PATCH 04/15] Remove empty package from python-k8sapp-platform Packages defined in a spec with no files do not result in an RPM produced by the build. On a rebuild, the build tools scan the spec and sees the package defined but does not find a corresponding RPM, and so flags the package for a rebuild as a result. This commit removes the empty package definition from the spec. Partial-Bug: 1910439 Signed-off-by: Don Penney Change-Id: I48275e45b5c5628564f9d40807ba7828510ff80a --- .../centos/python-k8sapp-platform.spec | 9 --------- 1 file changed, 9 deletions(-) diff --git a/python-k8sapp-platform/centos/python-k8sapp-platform.spec b/python-k8sapp-platform/centos/python-k8sapp-platform.spec index d770459..a43e790 100644 --- a/python-k8sapp-platform/centos/python-k8sapp-platform.spec +++ b/python-k8sapp-platform/centos/python-k8sapp-platform.spec @@ -19,15 +19,6 @@ BuildRequires: python2-wheel %description StarlingX sysinv extensions: Platform Integration K8S app -%package -n python2-%{pypi_name} -Summary: StarlingX sysinv extensions: Platform Integration K8S app - -Requires: python-pbr >= 2.0.0 -Requires: sysinv >= 1.0 - -%description -n python2-%{pypi_name} -StarlingX sysinv extensions: Platform Integration K8S app - %prep %setup # Remove bundled egg-info From 3f7acd5c3214fb93afc75ee23c0eabe6089d0c32 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Tue, 5 Jan 2021 14:14:33 +0000 Subject: [PATCH 05/15] Fixed the default storageclass and replicas number A previous update that added a new storageclass using cephfs-provisioner affects the installation of some application that are trying to use the default storageclass. When an application will try to use the default storageclass it will find two default storageclasses and we want to avoid this problem by using a single default storageclass that can be changed by the user through helm overrides. Also this fixes the replicas number for the deployment, it was hardcoded with 1, but now it can be changed by the user. This commit address an application issue with https://review.opendev.org/c/starlingx/platform-armada-app/+/758786 Signed-off-by: Daniel Safta Story: 2008162 Task: 40908 Change-Id: Ib1e4db29397e674a17d45120f6c0b3babaf1880b --- .../k8sapp_platform/k8sapp_platform/common/constants.py | 1 - .../k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py | 1 - .../helm-charts/cephfs-provisioner/templates/provisioner.yaml | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py index edd1ce9..97addfb 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/common/constants.py @@ -16,7 +16,6 @@ HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT = '/pvc-volumes' HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs' -K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME = 'cephfs' K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin' K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system' K8S_CEPHFS_PROVISIONER_USER_NAME = 'admin' diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py index c93ea75..1a7ef22 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py @@ -171,7 +171,6 @@ class CephFSProvisionerHelm(base.BaseHelm): global_settings = { "replicas": self._num_provisioned_controllers(), - "defaultStorageClass": app_constants.K8S_CEPHFS_PROVISIONER_STOR_CLASS_NAME } overrides = { diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml index a7ccbcd..0ac21ea 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml @@ -16,7 +16,7 @@ metadata: name: {{ $defaults.provisionerConfigName }} namespace: {{ $defaults.cephFSNamespace }} spec: - replicas: 1 + replicas: {{ .Values.global.replicas }} strategy: type: Recreate selector: From 2aedbcda09915c0b21b4fc963901243e184e8035 Mon Sep 17 00:00:00 2001 From: Dan Voiculeasa Date: Mon, 9 Nov 2020 13:21:15 +0200 Subject: [PATCH 06/15] Introduce lifecycle operator to platform app A big chunk of logic is moved from sysinv conductor to application itself. Following hooks were necessary: pre-apply-rbd, pre-apply-resource, post-remove-rbd, post-remove-resource, auto-apply-semantic-check, post-armada-request Change-Id: Ibe994411fee55c84fa86770fad5497040f13b78f Story: 2007960 Task: 41292 Signed-off-by: Dan Voiculeasa --- .../k8sapp_platform/lifecycle/__init__.py | 5 + .../lifecycle/lifecycle_platform.py | 123 ++++++++++++++++++ .../k8sapp_platform/setup.cfg | 3 + 3 files changed, 131 insertions(+) create mode 100644 python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/__init__.py create mode 100644 python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/lifecycle_platform.py diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/__init__.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/__init__.py new file mode 100644 index 0000000..6be15e8 --- /dev/null +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/__init__.py @@ -0,0 +1,5 @@ +# +# Copyright (c) 2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/lifecycle_platform.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/lifecycle_platform.py new file mode 100644 index 0000000..5c71324 --- /dev/null +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/lifecycle/lifecycle_platform.py @@ -0,0 +1,123 @@ +# +# Copyright (c) 2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# All Rights Reserved. +# + +""" System inventory App lifecycle operator.""" +# Temporary disable pylint for lifecycle hooks until Ic83fbd25d23ae34889cb288330ec448f920bda39 merges +# This will be reverted in a future commit +# pylint: disable=no-member +# pylint: disable=no-name-in-module +import os + +from oslo_log import log as logging +from sysinv.common import constants +from sysinv.common import exception +from sysinv.helm import lifecycle_base as base +from sysinv.helm import lifecycle_utils as lifecycle_utils +from sysinv.helm.lifecycle_constants import LifecycleConstants + +LOG = logging.getLogger(__name__) + + +class PlatformAppLifecycleOperator(base.AppLifecycleOperator): + def app_lifecycle_actions(self, context, conductor_obj, app_op, app, hook_info): + """ Perform lifecycle actions for an operation + + :param context: request context + :param conductor_obj: conductor object + :param app_op: AppOperator object + :param app: AppOperator.Application object + :param hook_info: LifecycleHookInfo object + + """ + # Semantic checks + if hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK: + if hook_info.mode == constants.APP_LIFECYCLE_MODE_AUTO and \ + hook_info.operation == constants.APP_APPLY_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE: + return self.pre_auto_apply_check(conductor_obj) + + # Rbd + elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RBD: + if hook_info.operation == constants.APP_APPLY_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE: + return lifecycle_utils.create_rbd_provisioner_secrets(app_op, app, hook_info) + elif hook_info.operation == constants.APP_REMOVE_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST: + return lifecycle_utils.delete_rbd_provisioner_secrets(app_op, app, hook_info) + + # Resources + elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RESOURCE: + if hook_info.operation == constants.APP_APPLY_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE: + return lifecycle_utils.create_local_registry_secrets(app_op, app, hook_info) + elif hook_info.operation == constants.APP_REMOVE_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST: + return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info) + + # Armada apply retry + elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST: + if hook_info.operation == constants.APP_APPLY_OP and \ + hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST: + return self.armada_apply_retry(app_op, app, hook_info) + + # Use the default behaviour for other hooks + super(PlatformAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info) + + def pre_auto_apply_check(self, conductor_obj): + """ Semantic check for auto-apply + + Check: + - ceph access + - ceph health + - crushmap applied + - replica count is non-zero so that manifest apply will not timeout + + :param conductor_obj: conductor object + + """ + crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH, + constants.CEPH_CRUSH_MAP_APPLIED) + + if not os.path.isfile(crushmap_flag_file): + raise exception.LifecycleSemanticCheckException( + "Crush map not applied") + if not conductor_obj._ceph.have_ceph_monitor_access(): + raise exception.LifecycleSemanticCheckException( + "Monitor access error") + if not conductor_obj._ceph.ceph_status_ok(): + raise exception.LifecycleSemanticCheckException( + "Ceph status is not HEALTH_OK") + if conductor_obj.dbapi.count_hosts_matching_criteria( + personality=constants.CONTROLLER, + administrative=constants.ADMIN_UNLOCKED, + operational=constants.OPERATIONAL_ENABLED, + availability=[constants.AVAILABILITY_AVAILABLE, + constants.AVAILABILITY_DEGRADED], + vim_progress_status=constants.VIM_SERVICES_ENABLED) < 1: + raise exception.LifecycleSemanticCheckException( + "Not enough hosts in desired state") + + def armada_apply_retry(self, app_op, app, hook_info): + """Retry armada apply + + :param app_op: AppOperator object + :param app: AppOperator.Application object + :param hook_info: LifecycleHookInfo object + """ + if LifecycleConstants.EXTRA not in hook_info: + raise exception.LifecycleMissingInfo("Missing {}".format(LifecycleConstants.EXTRA)) + if LifecycleConstants.RETURN_CODE not in hook_info[LifecycleConstants.EXTRA]: + raise exception.LifecycleMissingInfo( + "Missing {} {}".format(LifecycleConstants.EXTRA, LifecycleConstants.RETURN_CODE)) + + # Raise a specific exception to be caught by the + # retry decorator and attempt a re-apply + if not hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] and \ + not app_op.is_app_aborted(app.name): + LOG.info("%s app failed applying. Retrying." % str(app.name)) + raise exception.ApplicationApplyFailure(name=app.name) diff --git a/python-k8sapp-platform/k8sapp_platform/setup.cfg b/python-k8sapp-platform/k8sapp_platform/setup.cfg index 95ced37..bd99fa2 100644 --- a/python-k8sapp-platform/k8sapp_platform/setup.cfg +++ b/python-k8sapp-platform/k8sapp_platform/setup.cfg @@ -41,5 +41,8 @@ systemconfig.helm_plugins.platform_integ_apps = systemconfig.armada.manifest_ops = platform-integ-apps = k8sapp_platform.armada.manifest_platform:PlatformArmadaManifestOperator +systemconfig.app_lifecycle = + platform-integ-apps = k8sapp_platform.lifecycle.lifecycle_platform:PlatformAppLifecycleOperator + [wheel] universal = 1 From 3236e99affb558b0b25ea50ecdbea2072d61b7f9 Mon Sep 17 00:00:00 2001 From: Robert Church Date: Thu, 21 Jan 2021 20:37:38 +0200 Subject: [PATCH 07/15] Insert dependency between python-k8sapp and stx-app-helm A change in python-k8sapp-platform is not always detected when building stx-platform-helm. Make sure changes are detected. Keep stx-platform-helm and python-k8sapp-platform version in sync. Closes-Bug: 1912490 Signed-off-by: Dan Voiculeasa Change-Id: If2b4efb9213db5f621f55ffe9770253251e6fccc --- python-k8sapp-platform/centos/build_srpm.data | 1 + stx-platform-helm/centos/build_srpm.data | 2 ++ 2 files changed, 3 insertions(+) diff --git a/python-k8sapp-platform/centos/build_srpm.data b/python-k8sapp-platform/centos/build_srpm.data index 98cea4a..8bb4d6a 100644 --- a/python-k8sapp-platform/centos/build_srpm.data +++ b/python-k8sapp-platform/centos/build_srpm.data @@ -1,4 +1,5 @@ SRC_DIR="k8sapp_platform" +OPT_DEP_LIST="$STX_BASE/platform-armada-app/stx-platform-helm" # Bump The version to be one less that what the version was prior to decoupling # as this will align the GITREVCOUNT value to increment the version by one. diff --git a/stx-platform-helm/centos/build_srpm.data b/stx-platform-helm/centos/build_srpm.data index fc61c75..cc5c926 100644 --- a/stx-platform-helm/centos/build_srpm.data +++ b/stx-platform-helm/centos/build_srpm.data @@ -4,6 +4,8 @@ COPY_LIST_TO_TAR="\ $STX_BASE/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts \ " +OPT_DEP_LIST="$STX_BASE/platform-armada-app/python-k8sapp-platform" + # Bump The version to be one less that what the version was prior to decoupling # as this will align the GITREVCOUNT value to increment the version by one. # Remove this (i.e. reset to 0) on then next major version changes when From d155fb78ca24288a05f02e7fc068dbab6d7d0c62 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Fri, 15 Jan 2021 15:49:55 +0000 Subject: [PATCH 08/15] Ceph-pools-audit job misses start time The Cronjob controller will check every 10s if there is a job to start, but if there are more than 100 retries to start the job, it will fail with "Cannot determine if job needs to be started. Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew." error. If the Cronjob controller happens to be down for a long period and 100 retries occured since lastScheduledTime, after that it will NOT retry to run the job. Because concurrencyPolicy is set to Forbid and startingDeadlineSeconds was not set, the Cronjob controller will fail to start a new job when another job is already running and will increase an internal counter. If startingDeadlineSeconds is set, the Cronjob controller will count how many fails occured in the last startingDeadlineSeconds seconds. Closes-bug: 1913057 Signed-off-by: Daniel Safta Change-Id: I3ca5ad04290580a8d0bf20f787cf5eefef6ac422 --- .../ceph-pools-audit/templates/job-ceph-pools-audit.yaml | 3 ++- .../stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/job-ceph-pools-audit.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/job-ceph-pools-audit.yaml index eff2d4f..40376ed 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/job-ceph-pools-audit.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/job-ceph-pools-audit.yaml @@ -1,6 +1,6 @@ {{/* # -# Copyright (c) 2019 Wind River Systems, Inc. +# Copyright (c) 2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -31,6 +31,7 @@ spec: successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }} failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }} concurrencyPolicy: Forbid + startingDeadlineSeconds: {{ .Values.jobs.job_ceph_pools_audit.startingDeadlineSeconds }} jobTemplate: metadata: name: "{{$envAll.Release.Name}}" diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml index f255d00..62b6b96 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml @@ -52,6 +52,7 @@ dependencies: jobs: job_ceph_pools_audit: cron: "*/5 * * * *" + startingDeadlineSeconds: 200 history: success: 3 failed: 1 From d30187c950cf622c7d6e65cc2d0995e51b41641f Mon Sep 17 00:00:00 2001 From: Dan Voiculeasa Date: Thu, 4 Feb 2021 12:12:54 +0200 Subject: [PATCH 09/15] Introduce metadata for app behavior control App should achieve applied state. Keep existing behavior when evaluating app reapplies. Note: Sibling commits modify metadata for other apps. Cert-manager and rook apps will no longer be evaluated strictly after platform-integ-apps. Story: 2007960 Task: 41754 Signed-off-by: Dan Voiculeasa Change-Id: Ia7bbca906e343ffffa019885a790befdf5ccb565 --- .../stx-platform-helm/files/metadata.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/stx-platform-helm/stx-platform-helm/files/metadata.yaml b/stx-platform-helm/stx-platform-helm/files/metadata.yaml index 49f72b5..33d3b2c 100644 --- a/stx-platform-helm/stx-platform-helm/files/metadata.yaml +++ b/stx-platform-helm/stx-platform-helm/files/metadata.yaml @@ -1,3 +1,15 @@ app_name: @APP_NAME@ app_version: @APP_VERSION@ helm_repo: @HELM_REPO@ +behavior: + platform_managed_app: yes + desired_state: applied + evaluate_reapply: + triggers: + - type: runtime-apply-puppet # TODO(dvoicule): optimize triggers + - type: host-availability-updated + filters: + - availability: services-enabled + - type: host-delete + filters: + - personality: controller From f48ac19d64072c52a69f38f63f131dc6396667d8 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Thu, 4 Mar 2021 15:23:19 +0000 Subject: [PATCH 10/15] Fixed the restartPolicy and serviceAccount rules When cephfs-storage-init gets scheduled by k8s on a compute node it can not pull images because it was using the wrong serviceAccount. This serviceAccount, along with new restartPolicy and backOffLimit rules are fixed in this commit. This commit address an application issue with https://review.opendev.org/c/starlingx/platform-armada-app/+/758786 Closes-bug: 1917788 Signed-off-by: Daniel Safta Change-Id: I270c551d09c8b1c2870406cad68a8b5799cd9fb9 --- .../templates/config-provisioner.yaml | 10 +++++--- .../templates/provisioner.yaml | 3 --- .../cephfs-provisioner/values.yaml | 24 ------------------- 3 files changed, 7 insertions(+), 30 deletions(-) diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml index 8646da3..1399401 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml @@ -36,9 +36,10 @@ metadata: name: cephfs-storage-init namespace: {{ $defaults.cephFSNamespace }} spec: + backoffLimit: 5 template: spec: - serviceAccountName: {{ $defaults.rbacConfigName }} + serviceAccountName: {{ $defaults.provisionerConfigName }} volumes: - name: cephfs-storage-init configMap: @@ -84,5 +85,8 @@ spec: subPath: ceph.conf readOnly: true {{- end }} - restartPolicy: Never - backoffLimit: 4 + restartPolicy: OnFailure + {{- if .Values.global.nodeSelector }} + nodeSelector: + {{ .Values.global.nodeSelector | toYaml | trim | indent 8 }} + {{- end }} \ No newline at end of file diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml index 0ac21ea..6a4e864 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/provisioner.yaml @@ -27,9 +27,6 @@ spec: labels: app: {{ $defaults.provisionerConfigName }} spec: - {{- if (.Values.global.rbac) or (.Values.global.reuseRbac)}} - serviceAccount: {{ .Values.rbac.serviceAccount }} - {{- end }} affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml index bc8f0f5..ee577f9 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml @@ -55,30 +55,6 @@ global: # Node Selector # nodeSelector: { node-role.kubernetes.io/master: "" } -# -# RBAC options. -# Defaults should be fine in most cases. -rbac: - # - # Cluster Role name - # - clusterRole: cephfs-provisioner - # - # Cluster Role Binding name - # - clusterRoleBinding: cephfs-provisioner - # - # Role name - # - role: cephfs-provisioner - # - # Role Binding name - # - roleBinding: cephfs-provisioner - # - # Defines a name of the service account which Provisioner will use to communicate with API server. - # - serviceAccount: cephfs-provisioner # # Configure storage classes. From 88570d9f0539c6f3827fb708c65e5f497e5ce776 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Thu, 11 Mar 2021 13:32:42 +0000 Subject: [PATCH 11/15] Fixed cephfs-provisioner chart update cephfs-provisioner fails to upgrade when overrides are changed due to missing annotations. This commit address an application issue with https://review.opendev.org/c/starlingx/platform-armada-app/+/758786 Closes-bug: 1918674 Signed-off-by: Daniel Safta Change-Id: I270c551d09c8b0c4440406cad68a8b5799cd9fb9 --- .../cephfs-provisioner/templates/config-provisioner.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml index 1399401..0a1c763 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml @@ -35,6 +35,9 @@ kind: Job metadata: name: cephfs-storage-init namespace: {{ $defaults.cephFSNamespace }} + annotations: + "helm.sh/hook": "post-install, pre-upgrade, pre-rollback" + "helm.sh/hook-delete-policy": "before-hook-creation" spec: backoffLimit: 5 template: From 45fd0a6b2cd2dbaaf7ff21ef989824377c5b17dc Mon Sep 17 00:00:00 2001 From: Robert Church Date: Thu, 18 Mar 2021 11:43:21 -0400 Subject: [PATCH 12/15] Build: Isolate platform plugins to an app specific directory When building the stx-platform-helm RPM for platform-integ-apps the helm plugins are installed in a location that could be populated with other app plugins if their spec files are not properly set up. Adjust the spec to provide an app specific location for the plugins to ensure that no other app plugins are included in the application tarball Closes-Bug: #1920066 Change-Id: Id24227cd100a3c29809f1dd01f61ea7174e9d779 Signed-off-by: Robert Church --- python-k8sapp-platform/centos/python-k8sapp-platform.spec | 5 +++-- stx-platform-helm/centos/stx-platform-helm.spec | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/python-k8sapp-platform/centos/python-k8sapp-platform.spec b/python-k8sapp-platform/centos/python-k8sapp-platform.spec index a43e790..5bc8fae 100644 --- a/python-k8sapp-platform/centos/python-k8sapp-platform.spec +++ b/python-k8sapp-platform/centos/python-k8sapp-platform.spec @@ -1,3 +1,4 @@ +%global app_name platform-integ-apps %global pypi_name k8sapp-platform %global sname k8sapp_platform @@ -34,8 +35,8 @@ export PBR_VERSION=%{version} export PBR_VERSION=%{version}.%{tis_patch_ver} export SKIP_PIP_INSTALL=1 %{__python2} setup.py install --skip-build --root %{buildroot} -mkdir -p ${RPM_BUILD_ROOT}/plugins -install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/ +mkdir -p ${RPM_BUILD_ROOT}/plugins/%{app_name} +install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/%{app_name}/ %files %{python2_sitelib}/%{sname} diff --git a/stx-platform-helm/centos/stx-platform-helm.spec b/stx-platform-helm/centos/stx-platform-helm.spec index 38f0f48..f3f29a3 100644 --- a/stx-platform-helm/centos/stx-platform-helm.spec +++ b/stx-platform-helm/centos/stx-platform-helm.spec @@ -74,7 +74,7 @@ sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml # Copy the plugins: installed in the buildroot mkdir -p %{app_staging}/plugins -cp /plugins/*.whl %{app_staging}/plugins +cp /plugins/%{app_name}/*.whl %{app_staging}/plugins # package it up find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5 From 1021d50142af6422c9a3f0853f4b7c525e724ab8 Mon Sep 17 00:00:00 2001 From: Daniel Safta Date: Wed, 24 Mar 2021 12:56:29 +0000 Subject: [PATCH 13/15] Removed extra serviceAccount from cephfs-provisioner cephfs-provisioner may need to create new resources in the kubernetes cluster. It was granted access to some of the resources including namespaces but when https://review.opendev.org/c/starlingx/platform-armada-app/+/778746 got merged the serviceAccount was changed. I have updated the serviceAccount with access to creating new namespaces and secrets. The serviceAccount that was initially used to create namespaces and secrets is not needed anymore, so I have removed it. Closes-bug: 1921197 Change-Id: I3c683776f3ecaf9c78d1a6b5b1108e9582497dde Signed-off-by: Daniel Safta --- .../templates/rbac-secrets.yaml | 76 +++---------------- .../cephfs-provisioner/values.yaml | 1 - 2 files changed, 10 insertions(+), 67 deletions(-) diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml index 358a28d..545cd81 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/rbac-secrets.yaml @@ -8,72 +8,6 @@ {{- $defaults := .Values.classdefaults }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ $defaults.rbacConfigName }} - namespace: {{ $defaults.cephFSNamespace }} -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "create", "list", "update"] ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ $defaults.rbacConfigName }} -subjects: - - kind: ServiceAccount - name: {{ $defaults.rbacConfigName }} - namespace: {{ $defaults.cephFSNamespace }} -roleRef: - kind: ClusterRole - name: {{ $defaults.rbacConfigName }} - apiGroup: rbac.authorization.k8s.io ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ $defaults.rbacConfigName }} - namespace: {{ $defaults.cephFSNamespace }} -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["create", "get", "delete"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "create", "list", "update"] ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ $defaults.rbacConfigName }} - namespace: {{ $defaults.cephFSNamespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ $defaults.rbacConfigName }} -subjects: -- kind: ServiceAccount - name: {{ $defaults.rbacConfigName }} ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ $defaults.rbacConfigName }} - namespace: {{ $defaults.cephFSNamespace }} ---- - kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -96,6 +30,12 @@ rules: resources: ["services"] resourceNames: ["kube-dns","coredns"] verbs: ["list", "get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "create", "list", "update"] --- kind: ClusterRoleBinding @@ -124,6 +64,10 @@ rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "create", "list", "update"] + --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml index ee577f9..ba985a5 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/values.yaml @@ -73,7 +73,6 @@ classdefaults: - 192.168.204.2:6789 provisionerConfigName: cephfs-provisioner provisionerName: ceph.com/cephfs - rbacConfigName: cephfs-provisioner-keyring # Configure storage classes. # This section should be tailored to your setup. It allows you to define multiple storage From 9d45149d29b8c05808bcd7e5a129f135b3386931 Mon Sep 17 00:00:00 2001 From: Isac Souza Date: Mon, 5 Apr 2021 11:07:21 -0300 Subject: [PATCH 14/15] Use new method for setting num of replicas Use the new _num_replicas_for_platform_app method from the helm base class to set the number of replicas in the chart. The new method will return the number of provisioned controllers with a minimum of 1. Tested by building an ISO and installing the armada apps. Partial-Bug: 1922278 Signed-off-by: Isac Souza Change-Id: Idb3c93274a1cb5c410d885d459784382525427a0 --- .../k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py | 2 +- .../k8sapp_platform/k8sapp_platform/helm/rbd_provisioner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py index 1a7ef22..bf1e5ee 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/ceph_fs_provisioner.py @@ -170,7 +170,7 @@ class CephFSProvisionerHelm(base.BaseHelm): classes.append(cls) global_settings = { - "replicas": self._num_provisioned_controllers(), + "replicas": self._num_replicas_for_platform_app(), } overrides = { diff --git a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/rbd_provisioner.py b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/rbd_provisioner.py index d7cf12a..d060807 100644 --- a/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/rbd_provisioner.py +++ b/python-k8sapp-platform/k8sapp_platform/k8sapp_platform/helm/rbd_provisioner.py @@ -83,7 +83,7 @@ class RbdProvisionerHelm(base.BaseHelm): classes.append(cls) global_settings = { - "replicas": self._num_provisioned_controllers(), + "replicas": self._num_replicas_for_platform_app(), "defaultStorageClass": constants.K8S_RBD_PROV_STOR_CLASS_NAME } From 66fa48f04d926848a37f5fbb7689cf4b114cb3ba Mon Sep 17 00:00:00 2001 From: Pedro Henrique Linhares Date: Tue, 6 Apr 2021 21:45:28 -0300 Subject: [PATCH 15/15] Update helm charts config maps after sx-dx migration with new CEPH monitors This commit adds annotations that allows config maps to be recreated after ceph monitor IP changes due to DX migration so that existing StorageClasses can get a reference to the correct monitor. StorageClasses and provisioners are recreated during platform-integ-apps auto re-apply. Story: 2008587 Task: 42242 Signed-off-by: Pedro Linhares Depends-On: https://review.opendev.org/c/starlingx/stx-puppet/+/783727 Change-Id: I9cedc70326e92796f03520deed7f0857e119257f --- .../templates/configmap-ceph-etc.yaml | 30 +++++++++++++++++++ .../helm-charts/ceph-pools-audit/values.yaml | 5 ++-- .../templates/config-provisioner.yaml | 7 +++-- .../templates/pre-install-check-ceph.yaml | 9 ++++-- 4 files changed, 45 insertions(+), 6 deletions(-) create mode 100644 stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/configmap-ceph-etc.yaml diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/configmap-ceph-etc.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/configmap-ceph-etc.yaml new file mode 100644 index 0000000..1cdcc35 --- /dev/null +++ b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/templates/configmap-ceph-etc.yaml @@ -0,0 +1,30 @@ +{{/* +# +# Copyright (c) 2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +*/}} + +{{- if .Values.manifests.configmap_ceph_conf }} +{{- $envAll := . }} +{{- $ceph := $envAll.Values.conf.ceph }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $envAll.Values.ceph_client.configmap }} + namespace: {{ $envAll.Release.namespace }} + labels: + app: ceph-pools-audit + annotations: + "helm.sh/hook": "pre-upgrade, pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +data: + ceph.conf: | + [global] + auth_supported = none + {{ $monitors := $ceph.monitors }}{{ range $index, $element := $monitors}} + [mon.{{- $index }}] + mon_addr = {{ $element }} + {{- end }} +{{- end }} diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml index 62b6b96..fbbae8b 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/ceph-pools-audit/values.yaml @@ -1,5 +1,5 @@ # -# Copyright (c) 2019 Wind River Systems, Inc. +# Copyright (c) 2019-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -13,7 +13,7 @@ labels: name: ceph-pools-audit ceph_client: - configmap: ceph-etc + configmap: ceph-etc-pools-audit conf: ceph: @@ -68,3 +68,4 @@ affinity: {} manifests: job_ceph_pools_audit: true configmap_bin: true + configmap_ceph_conf: true diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml index 0a1c763..dd8880d 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/cephfs-provisioner/templates/config-provisioner.yaml @@ -1,6 +1,6 @@ {{/* # -# Copyright (c) 2020 Wind River Systems, Inc. +# Copyright (c) 2020-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -15,6 +15,9 @@ kind: ConfigMap metadata: name: ceph-config-file namespace: {{ $defaults.cephFSNamespace }} + annotations: + "helm.sh/hook": "pre-upgrade, pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" data: ceph.conf: | {{ tuple "conf/_ceph-conf.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} @@ -92,4 +95,4 @@ spec: {{- if .Values.global.nodeSelector }} nodeSelector: {{ .Values.global.nodeSelector | toYaml | trim | indent 8 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml b/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml index ec986b7..f57bec2 100644 --- a/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml +++ b/stx-platform-helm/stx-platform-helm/helm-charts/rbd-provisioner/templates/pre-install-check-ceph.yaml @@ -1,6 +1,6 @@ {{/* # -# Copyright (c) 2018 Wind River Systems, Inc. +# Copyright (c) 2018-2021 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -14,9 +14,11 @@ apiVersion: v1 kind: ConfigMap metadata: - creationTimestamp: 2016-02-18T19:14:38Z name: config-{{- $root.Values.global.name }} namespace: {{ $root.Release.Namespace }} + annotations: + "helm.sh/hook": "pre-upgrade, pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" data: ceph.conf: | {{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}} @@ -202,6 +204,9 @@ kind: ConfigMap metadata: name: ceph-etc namespace: {{ $root.Release.Namespace }} + annotations: + "helm.sh/hook": "pre-upgrade, pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" data: ceph.conf: | [global]