Merge remote-tracking branch 'origin/master' into f/centos8
Signed-off-by: Charles Short <charles.short@windriver.com> Change-Id: I685ac993a1f5113fdf4cea14f24a5c1e04a941d0
This commit is contained in:
commit
f3bb236173
82
.zuul.yaml
82
.zuul.yaml
|
@ -3,13 +3,95 @@
|
|||
check:
|
||||
jobs:
|
||||
- openstack-tox-linters
|
||||
- k8sapp-platform-tox-py27
|
||||
- k8sapp-platform-tox-py36
|
||||
- k8sapp-platform-tox-flake8
|
||||
- k8sapp-platform-tox-pylint
|
||||
- k8sapp-platform-tox-bandit
|
||||
gate:
|
||||
jobs:
|
||||
- openstack-tox-linters
|
||||
- k8sapp-platform-tox-py27
|
||||
- k8sapp-platform-tox-py36
|
||||
- k8sapp-platform-tox-flake8
|
||||
- k8sapp-platform-tox-pylint
|
||||
- k8sapp-platform-tox-bandit
|
||||
post:
|
||||
jobs:
|
||||
- stx-platform-armada-app-upload-git-mirror
|
||||
|
||||
- job:
|
||||
name: k8sapp-platform-tox-py27
|
||||
parent: tox
|
||||
description: |
|
||||
Run py27 test for k8sapp_platform
|
||||
nodeset: ubuntu-xenial
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
- starlingx/fault
|
||||
- starlingx/update
|
||||
- starlingx/utilities
|
||||
files:
|
||||
- python-k8sapp-platform/*
|
||||
vars:
|
||||
tox_envlist: py27
|
||||
tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini
|
||||
|
||||
- job:
|
||||
name: k8sapp-platform-tox-py36
|
||||
parent: tox
|
||||
description: |
|
||||
Run py36 test for k8sapp_platform
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
- starlingx/fault
|
||||
- starlingx/update
|
||||
- starlingx/utilities
|
||||
files:
|
||||
- python-k8sapp-platform/*
|
||||
vars:
|
||||
tox_envlist: py36
|
||||
tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini
|
||||
|
||||
- job:
|
||||
name: k8sapp-platform-tox-flake8
|
||||
parent: tox
|
||||
description: |
|
||||
Run flake8 test for k8sapp_platform
|
||||
files:
|
||||
- python-k8sapp-platform/*
|
||||
vars:
|
||||
tox_envlist: flake8
|
||||
tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini
|
||||
|
||||
- job:
|
||||
name: k8sapp-platform-tox-pylint
|
||||
parent: tox
|
||||
description: |
|
||||
Run pylint test for k8sapp_platform
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
- starlingx/fault
|
||||
- starlingx/update
|
||||
- starlingx/utilities
|
||||
files:
|
||||
- python-k8sapp-platform/*
|
||||
vars:
|
||||
tox_envlist: pylint
|
||||
tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini
|
||||
|
||||
- job:
|
||||
name: k8sapp-platform-tox-bandit
|
||||
parent: tox
|
||||
description: |
|
||||
Run bandit test for k8sapp_platform
|
||||
files:
|
||||
- python-k8sapp-platform/*
|
||||
vars:
|
||||
tox_envlist: bandit
|
||||
tox_extra_args: -c python-k8sapp-platform/k8sapp_platform/tox.ini
|
||||
|
||||
- job:
|
||||
name: stx-platform-armada-app-upload-git-mirror
|
||||
parent: upload-git-mirror
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
SRC_DIR="k8sapp_platform"
|
||||
OPT_DEP_LIST="$STX_BASE/platform-armada-app/stx-platform-helm"
|
||||
|
||||
# Bump The version to be one less that what the version was prior to decoupling
|
||||
# as this will align the GITREVCOUNT value to increment the version by one.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
%global app_name platform-integ-apps
|
||||
%global pypi_name k8sapp-platform
|
||||
%global sname k8sapp_platform
|
||||
|
||||
|
@ -19,15 +20,6 @@ BuildRequires: python3-wheel
|
|||
%description
|
||||
StarlingX sysinv extensions: Platform Integration K8S app
|
||||
|
||||
%package -n python3-%{pypi_name}
|
||||
Summary: StarlingX sysinv extensions: Platform Integration K8S app
|
||||
|
||||
Requires: python3-pbr >= 2.0.0
|
||||
Requires: sysinv >= 1.0
|
||||
|
||||
%description -n python3-%{pypi_name}
|
||||
StarlingX sysinv extensions: Platform Integration K8S app
|
||||
|
||||
%prep
|
||||
%setup
|
||||
# Remove bundled egg-info
|
||||
|
@ -42,8 +34,8 @@ export PBR_VERSION=%{version}
|
|||
export PBR_VERSION=%{version}.%{tis_patch_ver}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python3} setup.py install --skip-build --root %{buildroot}
|
||||
mkdir -p ${RPM_BUILD_ROOT}/plugins
|
||||
install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/
|
||||
mkdir -p ${RPM_BUILD_ROOT}/plugins/%{app_name}
|
||||
install -m 644 dist/*.whl ${RPM_BUILD_ROOT}/plugins/%{app_name}/
|
||||
|
||||
%files
|
||||
%{python3_sitelib}/%{sname}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
from k8sapp_platform.helm.ceph_pools_audit import CephPoolsAuditHelm
|
||||
from k8sapp_platform.helm.rbd_provisioner import RbdProvisionerHelm
|
||||
from k8sapp_platform.helm.ceph_fs_provisioner import CephFSProvisionerHelm
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.helm import manifest_base as base
|
||||
|
@ -23,12 +24,14 @@ class PlatformArmadaManifestOperator(base.ArmadaManifestOperator):
|
|||
CHART_GROUP_CEPH = 'starlingx-ceph-charts'
|
||||
CHART_GROUPS_LUT = {
|
||||
CephPoolsAuditHelm.CHART: CHART_GROUP_CEPH,
|
||||
RbdProvisionerHelm.CHART: CHART_GROUP_CEPH
|
||||
RbdProvisionerHelm.CHART: CHART_GROUP_CEPH,
|
||||
CephFSProvisionerHelm.CHART: CHART_GROUP_CEPH
|
||||
}
|
||||
|
||||
CHARTS_LUT = {
|
||||
CephPoolsAuditHelm.CHART: 'kube-system-ceph-pools-audit',
|
||||
RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner'
|
||||
RbdProvisionerHelm.CHART: 'kube-system-rbd-provisioner',
|
||||
CephFSProvisionerHelm.CHART: 'kube-system-cephfs-provisioner'
|
||||
}
|
||||
|
||||
def platform_mode_manifest_updates(self, dbapi, mode):
|
||||
|
|
|
@ -6,6 +6,28 @@
|
|||
|
||||
# Helm: Supported charts:
|
||||
# These values match the names in the chart package's Chart.yaml
|
||||
from sysinv.helm import common
|
||||
|
||||
HELM_CHART_RBD_PROVISIONER = 'rbd-provisioner'
|
||||
HELM_CHART_CEPH_POOLS_AUDIT = 'ceph-pools-audit'
|
||||
HELM_CHART_HELM_TOOLKIT = 'helm-toolkit'
|
||||
HELM_CHART_CEPH_FS_PROVISIONER = 'cephfs-provisioner'
|
||||
HELM_NS_CEPH_FS_PROVISIONER = common.HELM_NS_KUBE_SYSTEM
|
||||
|
||||
HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT = '/pvc-volumes'
|
||||
HELM_CHART_CEPH_FS_PROVISIONER_NAME = 'ceph.com/cephfs'
|
||||
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME = 'ceph-secret-admin'
|
||||
K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAMESPACE = 'kube-system'
|
||||
K8S_CEPHFS_PROVISIONER_USER_NAME = 'admin'
|
||||
|
||||
K8S_CEPHFS_PROVISIONER_DEFAULT_NAMESPACE = 'kube-system'
|
||||
K8S_CEPHFS_PROVISIONER_RBAC_CONFIG_NAME = 'cephfs-provisioner-keyring'
|
||||
|
||||
# CephFS Provisioner backend
|
||||
K8S_CEPHFS_PROV_STORAGECLASS_NAME = 'cephfs_storageclass_name' # Customer
|
||||
K8S_CEPHFS_PROV_STOR_CLASS_NAME = 'cephfs'
|
||||
|
||||
# Ceph FS constants for pools and fs
|
||||
CEPHFS_DATA_POOL_KUBE_NAME = 'kube-cephfs-data'
|
||||
CEPHFS_METADATA_POOL_KUBE_NAME = 'kube-cephfs-metadata'
|
||||
CEPHFS_FS_KUBE_NAME = 'kube-cephfs'
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_platform.common import constants as app_constants
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
|
||||
from sysinv.helm import base
|
||||
|
||||
|
||||
class K8CephFSProvisioner(object):
|
||||
""" Utility methods for getting the k8 overrides for internal ceph
|
||||
from a corresponding storage backend.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_storage_class_name(bk):
|
||||
""" Get the name of the storage class for an rbd provisioner
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['capabilities'].get(app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME):
|
||||
name = bk['capabilities'][app_constants.K8S_CEPHFS_PROV_STORAGECLASS_NAME]
|
||||
elif bk.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME
|
||||
else:
|
||||
name = bk.name + '-' + app_constants.K8S_CEPHFS_PROV_STOR_CLASS_NAME
|
||||
|
||||
return str(name)
|
||||
|
||||
@staticmethod
|
||||
def get_data_pool(bk):
|
||||
""" Get the name of the ceph pool for an rbd provisioner
|
||||
This naming convention is valid only for internal backends
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
return app_constants.CEPHFS_DATA_POOL_KUBE_NAME
|
||||
else:
|
||||
return str(app_constants.CEPHFS_DATA_POOL_KUBE_NAME + '-' + bk['name'])
|
||||
|
||||
@staticmethod
|
||||
def get_metadata_pool(bk):
|
||||
""" Get the name of the ceph pool for an rbd provisioner
|
||||
This naming convention is valid only for internal backends
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
return app_constants.CEPHFS_METADATA_POOL_KUBE_NAME
|
||||
else:
|
||||
return str(app_constants.CEPHFS_METADATA_POOL_KUBE_NAME + '-' + bk['name'])
|
||||
|
||||
@staticmethod
|
||||
def get_fs(bk):
|
||||
""" Get the name of the ceph pool for an rbd provisioner
|
||||
This naming convention is valid only for internal backends
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
return app_constants.CEPHFS_FS_KUBE_NAME
|
||||
else:
|
||||
return str(app_constants.CEPHFS_FS_KUBE_NAME + '-' + bk['name'])
|
||||
|
||||
@staticmethod
|
||||
def get_user_id(bk):
|
||||
""" Get the non admin user name for an cephfs provisioner secret
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the cephfs provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = K8CephFSProvisioner.get_data_pool(bk)
|
||||
else:
|
||||
name = K8CephFSProvisioner.get_data_pool(bk)
|
||||
|
||||
prefix = 'ceph-pool'
|
||||
return str(prefix + '-' + name)
|
||||
|
||||
@staticmethod
|
||||
def get_user_secret_name(bk):
|
||||
""" Get the name for the non admin secret key of a pool
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of k8 secret
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = K8CephFSProvisioner.get_data_pool(bk)
|
||||
else:
|
||||
name = K8CephFSProvisioner.get_data_pool(bk)
|
||||
|
||||
base_name = 'ceph-pool'
|
||||
return str(base_name + '-' + name)
|
||||
|
||||
|
||||
class CephFSProvisionerHelm(base.BaseHelm):
|
||||
"""Class to encapsulate helm operations for the cephfs-provisioner chart"""
|
||||
|
||||
CHART = app_constants.HELM_CHART_CEPH_FS_PROVISIONER
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[app_constants.HELM_NS_CEPH_FS_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_PLATFORM:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [app_constants.HELM_NS_CEPH_FS_PROVISIONER],
|
||||
}
|
||||
|
||||
SERVICE_NAME = app_constants.HELM_CHART_CEPH_FS_PROVISIONER
|
||||
SERVICE_PORT_MON = 6789
|
||||
|
||||
def execute_manifest_updates(self, operator):
|
||||
# On application load this chart is enabled. Only disable if specified
|
||||
# by the user
|
||||
if not self._is_enabled(operator.APP, self.CHART,
|
||||
app_constants.HELM_NS_CEPH_FS_PROVISIONER):
|
||||
operator.chart_group_chart_delete(
|
||||
operator.CHART_GROUPS_LUT[self.CHART],
|
||||
operator.CHARTS_LUT[self.CHART])
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
|
||||
backends = self.dbapi.storage_backend_get_list()
|
||||
ceph_bks = [bk for bk in backends if bk.backend == constants.SB_TYPE_CEPH]
|
||||
|
||||
if not ceph_bks:
|
||||
return {} # ceph is not configured
|
||||
|
||||
def _skip_ceph_mon_2(name):
|
||||
return name != constants.CEPH_MON_2
|
||||
|
||||
classdefaults = {
|
||||
"monitors": self._get_formatted_ceph_monitor_ips(
|
||||
name_filter=_skip_ceph_mon_2),
|
||||
"adminId": app_constants.K8S_CEPHFS_PROVISIONER_USER_NAME,
|
||||
"adminSecretName": app_constants.K8S_CEPHFS_PROVISIONER_ADMIN_SECRET_NAME
|
||||
}
|
||||
|
||||
# Get tier info.
|
||||
tiers = self.dbapi.storage_tier_get_list()
|
||||
|
||||
classes = []
|
||||
for bk in ceph_bks:
|
||||
# Get the ruleset for the new kube-cephfs pools.
|
||||
tier = next((t for t in tiers if t.forbackendid == bk.id), None)
|
||||
if not tier:
|
||||
raise Exception("No tier present for backend %s" % bk.name)
|
||||
|
||||
rule_name = "{0}{1}{2}".format(
|
||||
tier.name,
|
||||
constants.CEPH_CRUSH_TIER_SUFFIX,
|
||||
"-ruleset").replace('-', '_')
|
||||
|
||||
cls = {
|
||||
"name": K8CephFSProvisioner.get_storage_class_name(bk),
|
||||
"data_pool_name": K8CephFSProvisioner.get_data_pool(bk),
|
||||
"metadata_pool_name": K8CephFSProvisioner.get_metadata_pool(bk),
|
||||
"fs_name": K8CephFSProvisioner.get_fs(bk),
|
||||
"replication": int(bk.capabilities.get("replication")),
|
||||
"crush_rule_name": rule_name,
|
||||
"chunk_size": 64,
|
||||
"userId": K8CephFSProvisioner.get_user_id(bk),
|
||||
"userSecretName": K8CephFSProvisioner.get_user_secret_name(bk),
|
||||
"claim_root": app_constants.HELM_CEPH_FS_PROVISIONER_CLAIM_ROOT,
|
||||
"additionalNamespaces": ['default', 'kube-public']
|
||||
}
|
||||
|
||||
classes.append(cls)
|
||||
|
||||
global_settings = {
|
||||
"replicas": self._num_replicas_for_platform_app(),
|
||||
}
|
||||
|
||||
overrides = {
|
||||
app_constants.HELM_NS_CEPH_FS_PROVISIONER: {
|
||||
"classdefaults": classdefaults,
|
||||
"classes": classes,
|
||||
"global": global_settings
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
|
@ -83,7 +83,7 @@ class RbdProvisionerHelm(base.BaseHelm):
|
|||
classes.append(cls)
|
||||
|
||||
global_settings = {
|
||||
"replicas": self._num_provisioned_controllers(),
|
||||
"replicas": self._num_replicas_for_platform_app(),
|
||||
"defaultStorageClass": constants.K8S_RBD_PROV_STOR_CLASS_NAME
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
|
@ -0,0 +1,123 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
""" System inventory App lifecycle operator."""
|
||||
# Temporary disable pylint for lifecycle hooks until Ic83fbd25d23ae34889cb288330ec448f920bda39 merges
|
||||
# This will be reverted in a future commit
|
||||
# pylint: disable=no-member
|
||||
# pylint: disable=no-name-in-module
|
||||
import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.helm import lifecycle_base as base
|
||||
from sysinv.helm import lifecycle_utils as lifecycle_utils
|
||||
from sysinv.helm.lifecycle_constants import LifecycleConstants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlatformAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
def app_lifecycle_actions(self, context, conductor_obj, app_op, app, hook_info):
|
||||
""" Perform lifecycle actions for an operation
|
||||
|
||||
:param context: request context
|
||||
:param conductor_obj: conductor object
|
||||
:param app_op: AppOperator object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
# Semantic checks
|
||||
if hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK:
|
||||
if hook_info.mode == constants.APP_LIFECYCLE_MODE_AUTO and \
|
||||
hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return self.pre_auto_apply_check(conductor_obj)
|
||||
|
||||
# Rbd
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RBD:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return lifecycle_utils.create_rbd_provisioner_secrets(app_op, app, hook_info)
|
||||
elif hook_info.operation == constants.APP_REMOVE_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return lifecycle_utils.delete_rbd_provisioner_secrets(app_op, app, hook_info)
|
||||
|
||||
# Resources
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RESOURCE:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return lifecycle_utils.create_local_registry_secrets(app_op, app, hook_info)
|
||||
elif hook_info.operation == constants.APP_REMOVE_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info)
|
||||
|
||||
# Armada apply retry
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return self.armada_apply_retry(app_op, app, hook_info)
|
||||
|
||||
# Use the default behaviour for other hooks
|
||||
super(PlatformAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info)
|
||||
|
||||
def pre_auto_apply_check(self, conductor_obj):
|
||||
""" Semantic check for auto-apply
|
||||
|
||||
Check:
|
||||
- ceph access
|
||||
- ceph health
|
||||
- crushmap applied
|
||||
- replica count is non-zero so that manifest apply will not timeout
|
||||
|
||||
:param conductor_obj: conductor object
|
||||
|
||||
"""
|
||||
crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
|
||||
constants.CEPH_CRUSH_MAP_APPLIED)
|
||||
|
||||
if not os.path.isfile(crushmap_flag_file):
|
||||
raise exception.LifecycleSemanticCheckException(
|
||||
"Crush map not applied")
|
||||
if not conductor_obj._ceph.have_ceph_monitor_access():
|
||||
raise exception.LifecycleSemanticCheckException(
|
||||
"Monitor access error")
|
||||
if not conductor_obj._ceph.ceph_status_ok():
|
||||
raise exception.LifecycleSemanticCheckException(
|
||||
"Ceph status is not HEALTH_OK")
|
||||
if conductor_obj.dbapi.count_hosts_matching_criteria(
|
||||
personality=constants.CONTROLLER,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=[constants.AVAILABILITY_AVAILABLE,
|
||||
constants.AVAILABILITY_DEGRADED],
|
||||
vim_progress_status=constants.VIM_SERVICES_ENABLED) < 1:
|
||||
raise exception.LifecycleSemanticCheckException(
|
||||
"Not enough hosts in desired state")
|
||||
|
||||
def armada_apply_retry(self, app_op, app, hook_info):
|
||||
"""Retry armada apply
|
||||
|
||||
:param app_op: AppOperator object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
"""
|
||||
if LifecycleConstants.EXTRA not in hook_info:
|
||||
raise exception.LifecycleMissingInfo("Missing {}".format(LifecycleConstants.EXTRA))
|
||||
if LifecycleConstants.RETURN_CODE not in hook_info[LifecycleConstants.EXTRA]:
|
||||
raise exception.LifecycleMissingInfo(
|
||||
"Missing {} {}".format(LifecycleConstants.EXTRA, LifecycleConstants.RETURN_CODE))
|
||||
|
||||
# Raise a specific exception to be caught by the
|
||||
# retry decorator and attempt a re-apply
|
||||
if not hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] and \
|
||||
not app_op.is_app_aborted(app.name):
|
||||
LOG.info("%s app failed applying. Retrying." % str(app.name))
|
||||
raise exception.ApplicationApplyFailure(name=app.name)
|
|
@ -36,9 +36,13 @@ systemconfig.helm_plugins.platform_integ_apps =
|
|||
001_helm-toolkit = k8sapp_platform.helm.helm_toolkit:HelmToolkitHelm
|
||||
002_rbd-provisioner = k8sapp_platform.helm.rbd_provisioner:RbdProvisionerHelm
|
||||
003_ceph-pools-audit = k8sapp_platform.helm.ceph_pools_audit:CephPoolsAuditHelm
|
||||
004_cephfs-provisioner = k8sapp_platform.helm.ceph_fs_provisioner:CephFSProvisionerHelm
|
||||
|
||||
systemconfig.armada.manifest_ops =
|
||||
platform-integ-apps = k8sapp_platform.armada.manifest_platform:PlatformArmadaManifestOperator
|
||||
|
||||
systemconfig.app_lifecycle =
|
||||
platform-integ-apps = k8sapp_platform.lifecycle.lifecycle_platform:PlatformAppLifecycleOperator
|
||||
|
||||
[wheel]
|
||||
universal = 1
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[tox]
|
||||
envlist = flake8,py27,py36,pylint
|
||||
envlist = flake8,py27,py36,pylint,bandit
|
||||
minversion = 1.6
|
||||
# skipsdist = True
|
||||
#,pip-missing-reqs
|
||||
|
@ -21,7 +21,7 @@ sitepackages = True
|
|||
whitelist_externals = bash
|
||||
find
|
||||
|
||||
install_command = pip install \
|
||||
install_command = pip install --use-deprecated legacy-resolver \
|
||||
-v -v -v \
|
||||
-c{toxinidir}/upper-constraints.txt \
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \
|
||||
|
@ -43,7 +43,6 @@ setenv = VIRTUAL_ENV={envdir}
|
|||
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-e{[tox]stxdir}/config/controllerconfig/controllerconfig
|
||||
-e{[tox]stxdir}/config/sysinv/sysinv/sysinv
|
||||
-e{[tox]stxdir}/config/tsconfig/tsconfig
|
||||
-e{[tox]stxdir}/fault/fm-api
|
||||
|
|
|
@ -4,6 +4,8 @@ COPY_LIST_TO_TAR="\
|
|||
$STX_BASE/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts \
|
||||
"
|
||||
|
||||
OPT_DEP_LIST="$STX_BASE/platform-armada-app/python-k8sapp-platform"
|
||||
|
||||
# Bump The version to be one less that what the version was prior to decoupling
|
||||
# as this will align the GITREVCOUNT value to increment the version by one.
|
||||
# Remove this (i.e. reset to 0) on then next major version changes when
|
||||
|
|
|
@ -47,6 +47,7 @@ helm repo add local http://localhost:8879/charts
|
|||
cd helm-charts
|
||||
make rbd-provisioner
|
||||
make ceph-pools-audit
|
||||
make cephfs-provisioner
|
||||
# TODO (rchurch): remove
|
||||
make node-feature-discovery
|
||||
cd -
|
||||
|
@ -73,7 +74,7 @@ sed -i 's/@HELM_REPO@/%{helm_repo}/g' %{app_staging}/metadata.yaml
|
|||
|
||||
# Copy the plugins: installed in the buildroot
|
||||
mkdir -p %{app_staging}/plugins
|
||||
cp /plugins/*.whl %{app_staging}/plugins
|
||||
cp /plugins/%{app_name}/*.whl %{app_staging}/plugins
|
||||
|
||||
# package it up
|
||||
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
|
||||
|
|
|
@ -1,3 +1,15 @@
|
|||
app_name: @APP_NAME@
|
||||
app_version: @APP_VERSION@
|
||||
helm_repo: @HELM_REPO@
|
||||
behavior:
|
||||
platform_managed_app: yes
|
||||
desired_state: applied
|
||||
evaluate_reapply:
|
||||
triggers:
|
||||
- type: runtime-apply-puppet # TODO(dvoicule): optimize triggers
|
||||
- type: host-availability-updated
|
||||
filters:
|
||||
- availability: services-enabled
|
||||
- type: host-delete
|
||||
filters:
|
||||
- personality: controller
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.configmap_ceph_conf }}
|
||||
{{- $envAll := . }}
|
||||
{{- $ceph := $envAll.Values.conf.ceph }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $envAll.Values.ceph_client.configmap }}
|
||||
namespace: {{ $envAll.Release.namespace }}
|
||||
labels:
|
||||
app: ceph-pools-audit
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
data:
|
||||
ceph.conf: |
|
||||
[global]
|
||||
auth_supported = none
|
||||
{{ $monitors := $ceph.monitors }}{{ range $index, $element := $monitors}}
|
||||
[mon.{{- $index }}]
|
||||
mon_addr = {{ $element }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,6 +1,6 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -31,6 +31,7 @@ spec:
|
|||
successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }}
|
||||
failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }}
|
||||
concurrencyPolicy: Forbid
|
||||
startingDeadlineSeconds: {{ .Values.jobs.job_ceph_pools_audit.startingDeadlineSeconds }}
|
||||
jobTemplate:
|
||||
metadata:
|
||||
name: "{{$envAll.Release.Name}}"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2019-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -13,7 +13,7 @@ labels:
|
|||
|
||||
name: ceph-pools-audit
|
||||
ceph_client:
|
||||
configmap: ceph-etc
|
||||
configmap: ceph-etc-pools-audit
|
||||
|
||||
conf:
|
||||
ceph:
|
||||
|
@ -52,6 +52,7 @@ dependencies:
|
|||
jobs:
|
||||
job_ceph_pools_audit:
|
||||
cron: "*/5 * * * *"
|
||||
startingDeadlineSeconds: 200
|
||||
history:
|
||||
success: 3
|
||||
failed: 1
|
||||
|
@ -67,3 +68,4 @@ affinity: {}
|
|||
manifests:
|
||||
job_ceph_pools_audit: true
|
||||
configmap_bin: true
|
||||
configmap_ceph_conf: true
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
apiVersion: v1
|
||||
appVersion: "1.0"
|
||||
description: CephFS provisioner for Kubernetes
|
||||
name: cephfs-provisioner
|
||||
version: 0.1.0
|
|
@ -0,0 +1,9 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: http://localhost:8879/charts
|
||||
version: 0.1.0
|
|
@ -0,0 +1,86 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#! /bin/bash
|
||||
set -x
|
||||
|
||||
{{ $classes := .Values.classes}}
|
||||
|
||||
touch /etc/ceph/ceph.client.admin.keyring
|
||||
|
||||
# Check if ceph is accessible
|
||||
echo "===================================="
|
||||
ceph -s
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -ex
|
||||
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${DATA_POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
||||
# Set up pool key in Ceph format
|
||||
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
||||
echo $KEYRING >$CEPH_USER_KEYRING
|
||||
set +ex
|
||||
|
||||
if [ -n "${CEPH_USER_SECRET}" ]; then
|
||||
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Create ${CEPH_USER_SECRET} secret"
|
||||
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
|
||||
if [ $? -ne 0 ]; then
|
||||
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Secret ${CEPH_USER_SECRET} already exists"
|
||||
fi
|
||||
|
||||
# Support creating namespaces and Ceph user secrets for additional
|
||||
# namespaces other than that which the provisioner is installed. This
|
||||
# allows the provisioner to set up and provide PVs for multiple
|
||||
# applications across many namespaces.
|
||||
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
||||
for ns in $(
|
||||
IFS=,
|
||||
echo ${ADDITIONAL_NAMESPACES}
|
||||
); do
|
||||
kubectl get namespace $ns 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
kubectl create namespace $ns
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error creating namespace $ns, exit"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
||||
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=key=$KEYRING
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
||||
fi
|
||||
else
|
||||
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
ceph osd pool stats ${DATA_POOL_NAME} || ceph osd pool create ${DATA_POOL_NAME} ${CHUNK_SIZE}
|
||||
ceph osd pool application enable ${DATA_POOL_NAME} cephfs
|
||||
ceph osd pool set ${DATA_POOL_NAME} size ${POOL_REPLICATION}
|
||||
ceph osd pool set ${DATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||
|
||||
ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE}
|
||||
ceph osd pool application enable ${METADATA_POOL_NAME} cephfs
|
||||
ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION}
|
||||
ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||
|
||||
ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME}
|
||||
|
||||
ceph -s
|
|
@ -0,0 +1,19 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[global]
|
||||
# For version 0.55 and beyond, you must explicitly enable
|
||||
# or disable authentication with "auth" entries in [global].
|
||||
auth_cluster_required = none
|
||||
auth_service_required = none
|
||||
auth_client_required = none
|
||||
|
||||
{{ $defaults := .Values.classdefaults}}
|
||||
|
||||
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
||||
[mon.{{- $index }}]
|
||||
mon_addr = {{ $element }}
|
||||
{{- end }}
|
|
@ -0,0 +1,98 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
*/}}
|
||||
|
||||
{{- $defaults := .Values.classdefaults }}
|
||||
{{- $cephfs_provisioner_storage_init := .Values.images.tags.cephfs_provisioner_storage_init }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ceph-config-file
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ tuple "conf/_ceph-conf.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cephfs-storage-init
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
data:
|
||||
storage-init.sh: |
|
||||
{{ tuple "bin/_storage_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: cephfs-storage-init
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
spec:
|
||||
backoffLimit: 5
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ $defaults.provisionerConfigName }}
|
||||
volumes:
|
||||
- name: cephfs-storage-init
|
||||
configMap:
|
||||
name: cephfs-storage-init
|
||||
defaultMode: 0555
|
||||
- name: ceph-config
|
||||
configMap:
|
||||
name: ceph-config-file
|
||||
defaultMode: 0555
|
||||
containers:
|
||||
{{- range $classConfig := .Values.classes }}
|
||||
- name: storage-init-{{- $classConfig.name }}
|
||||
image: {{ $cephfs_provisioner_storage_init | quote }}
|
||||
command: ["/bin/bash", "/tmp/storage-init.sh"]
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
value: {{ $defaults.cephFSNamespace }}
|
||||
- name: ADDITIONAL_NAMESPACES
|
||||
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
|
||||
- name: CEPH_USER_SECRET
|
||||
value: {{ $defaults.adminSecretName }}
|
||||
- name: USER_ID
|
||||
value: {{ $classConfig.userId }}
|
||||
- name: DATA_POOL_NAME
|
||||
value: {{ $classConfig.data_pool_name }}
|
||||
- name: METADATA_POOL_NAME
|
||||
value: {{ $classConfig.metadata_pool_name }}
|
||||
- name: FS_NAME
|
||||
value: {{ $classConfig.fs_name }}
|
||||
- name: CHUNK_SIZE
|
||||
value: {{ $classConfig.chunk_size | quote }}
|
||||
- name: POOL_REPLICATION
|
||||
value: {{ $classConfig.replication | quote }}
|
||||
- name: POOL_CRUSH_RULE_NAME
|
||||
value: {{ $classConfig.crush_rule_name | quote }}
|
||||
volumeMounts:
|
||||
- name: cephfs-storage-init
|
||||
mountPath: /tmp/storage-init.sh
|
||||
subPath: storage-init.sh
|
||||
readOnly: true
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,64 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
*/}}
|
||||
|
||||
{{- $defaults := .Values.classdefaults }}
|
||||
{{- $cephfs_provisioner_image := .Values.images.tags.cephfs_provisioner }}
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.global.replicas }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ $defaults.provisionerConfigName }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ $defaults.provisionerConfigName }}
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Values.global.name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: {{ $defaults.provisionerConfigName }}
|
||||
image: {{ $cephfs_provisioner_image | quote }}
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: {{ $defaults.provisionerName }}
|
||||
- name: PROVISIONER_SECRET_NAMESPACE
|
||||
value: {{ $defaults.cephFSNamespace }}
|
||||
command:
|
||||
- "/usr/local/bin/{{ $defaults.provisionerConfigName }}"
|
||||
args:
|
||||
- "-id={{ $defaults.provisionerConfigName }}-1"
|
||||
serviceAccount: {{ $defaults.provisionerConfigName }}
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.tolerations }}
|
||||
tolerations:
|
||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
||||
{{- end}}
|
||||
{{- if .Values.global.resources }}
|
||||
resources:
|
||||
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,93 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
*/}}
|
||||
|
||||
{{- $defaults := .Values.classdefaults }}
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["kube-dns","coredns"]
|
||||
verbs: ["list", "get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create", "get", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $defaults.provisionerConfigName }}
|
||||
namespace: {{ $defaults.cephFSNamespace }}
|
||||
imagePullSecrets:
|
||||
- name: default-registry-key
|
|
@ -0,0 +1,30 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
*/}}
|
||||
|
||||
{{ $defaults := .Values.classdefaults }}
|
||||
{{ $provisioner := .Values.global.provisioner_name }}
|
||||
{{ $defaultSC := .Values.global.defaultStorageClass }}
|
||||
{{- range $classConfig := .Values.classes }}
|
||||
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
{{- if eq $defaultSC $classConfig.name}}
|
||||
annotations:
|
||||
"storageclass.kubernetes.io/is-default-class": "true"
|
||||
{{- end }}
|
||||
name: {{ $classConfig.name }}
|
||||
provisioner: {{ $provisioner }}
|
||||
parameters:
|
||||
monitors: "{{ $monitors := or $classConfig.monitors $defaults.monitors }}{{ join "," $monitors}}"
|
||||
adminId: {{ or $classConfig.adminId $defaults.adminId }}
|
||||
adminSecretName: {{ or $classConfig.adminSecretName $defaults.adminSecretName }}
|
||||
adminSecretNamespace: {{ or $classConfig.adminSecretNamespace $defaults.adminSecretNamespace }}
|
||||
claimRoot: {{ $classConfig.claim_root }}
|
||||
---
|
||||
{{- end }}
|
|
@ -0,0 +1,121 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#
|
||||
# Global options.
|
||||
# Defaults should be fine in most cases.
|
||||
global:
|
||||
#
|
||||
# Defines the application name of the provisioner.
|
||||
#
|
||||
name: "cephfs-provisioner"
|
||||
#
|
||||
# Defines the name of the provisioner associated with a set of storage classes
|
||||
#
|
||||
provisioner_name: "ceph.com/cephfs"
|
||||
#
|
||||
# Enable this storage class as the system default storage class
|
||||
#
|
||||
defaultStorageClass: fast-cephfs-disabled
|
||||
#
|
||||
# If configured, tolerations will add a toleration field to the Pod.
|
||||
#
|
||||
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
# Example:
|
||||
# [
|
||||
# {
|
||||
# "key": "node-role.kubernetes.io/master",
|
||||
# "operator": "Exists"
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
tolerations: []
|
||||
# If configured, resources will set the requests/limits field to the Pod.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
# Example:
|
||||
# {
|
||||
# "limits": {
|
||||
# "memory": "200Mi"
|
||||
# },
|
||||
# "requests": {
|
||||
# "cpu": "100m",
|
||||
# "memory": "200Mi"
|
||||
# }
|
||||
# }
|
||||
resources: {}
|
||||
#
|
||||
# Number of replicas to start when configured as deployment
|
||||
#
|
||||
replicas: 1
|
||||
#
|
||||
# Node Selector
|
||||
#
|
||||
nodeSelector: { node-role.kubernetes.io/master: "" }
|
||||
|
||||
#
|
||||
# Configure storage classes.
|
||||
# Defaults for storage classes. Update this if you have a single Ceph storage cluster.
|
||||
# No need to add them to each class.
|
||||
#
|
||||
classdefaults:
|
||||
# Ceph admin account
|
||||
adminId: admin
|
||||
# K8 secret name for the admin context
|
||||
adminSecretName: ceph-secret-admin
|
||||
adminSecretNamespace: kube-system
|
||||
cephFSNamespace: kube-system
|
||||
# Define ip addresses of Ceph Monitors
|
||||
monitors:
|
||||
- 192.168.204.2:6789
|
||||
provisionerConfigName: cephfs-provisioner
|
||||
provisionerName: ceph.com/cephfs
|
||||
|
||||
# Configure storage classes.
|
||||
# This section should be tailored to your setup. It allows you to define multiple storage
|
||||
# classes for the same cluster (e.g. if you have tiers of drives with different speeds).
|
||||
# If you have multiple Ceph clusters take attributes from classdefaults and add them here.
|
||||
classes:
|
||||
- name: fast-cephfs # Name of storage class.
|
||||
# Ceph pools name
|
||||
data_pool_name: kube-cephfs-data
|
||||
metadata_pool_name: kube-cephfs-metadata
|
||||
# CephFS name
|
||||
fs_name: kube-cephfs
|
||||
# Ceph user name to access this pool
|
||||
userId: ceph-pool-kube-cephfs-data
|
||||
# K8 secret name with key for accessing the Ceph pool
|
||||
userSecretName: ceph-pool-kube-cephfs-data
|
||||
# Pool replication
|
||||
replication: 1
|
||||
# Pool crush rule name
|
||||
crush_rule_name: storage_tier_ruleset
|
||||
# Pool chunk size / PG_NUM
|
||||
chunk_size: 64
|
||||
# Additional namespace to allow storage class access (other than where
|
||||
# installed)
|
||||
claim_root: "/pvc-volumes"
|
||||
additionalNamespaces:
|
||||
- default
|
||||
- kube-public
|
||||
|
||||
# Defines:
|
||||
# - Provisioner's image name including container registry.
|
||||
# - CEPH helper image
|
||||
#
|
||||
images:
|
||||
tags:
|
||||
cephfs_provisioner: quay.io/external_storage/cephfs-provisioner:v2.1.0-k8s1.11
|
||||
cephfs_provisioner_storage_init: docker.io/starlingx/ceph-config-helper:v1.15.0
|
||||
pull_policy: "IfNotPresent"
|
||||
local_registry:
|
||||
active: false
|
||||
exclude:
|
||||
- dep_check
|
||||
- image_repo_sync
|
||||
manifests:
|
||||
configmap_bin: true
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{{/*
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -14,9 +14,11 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: 2016-02-18T19:14:38Z
|
||||
name: config-{{- $root.Values.global.name }}
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ $monitors := $defaults.monitors }}{{ range $index, $element := $monitors}}
|
||||
|
@ -202,6 +204,9 @@ kind: ConfigMap
|
|||
metadata:
|
||||
name: ceph-etc
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-upgrade, pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
data:
|
||||
ceph.conf: |
|
||||
[global]
|
||||
|
|
|
@ -45,6 +45,35 @@ data:
|
|||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kube-system-cephfs-provisioner
|
||||
data:
|
||||
chart_name: cephfs-provisioner
|
||||
release: cephfs-provisioner
|
||||
namespace: kube-system
|
||||
wait:
|
||||
timeout: 1800
|
||||
labels:
|
||||
app: cephfs-provisioner
|
||||
install:
|
||||
no_hooks: false
|
||||
upgrade:
|
||||
no_hooks: false
|
||||
pre:
|
||||
delete:
|
||||
- type: job
|
||||
labels:
|
||||
app: cephfs-provisioner
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz
|
||||
subpath: cephfs-provisioner
|
||||
reference: master
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kube-system-ceph-pools-audit
|
||||
|
@ -83,6 +112,7 @@ data:
|
|||
chart_group:
|
||||
- kube-system-rbd-provisioner
|
||||
- kube-system-ceph-pools-audit
|
||||
- kube-system-cephfs-provisioner
|
||||
---
|
||||
schema: armada/Manifest/v1
|
||||
metadata:
|
||||
|
|
12
tox.ini
12
tox.ini
|
@ -33,3 +33,15 @@ commands =
|
|||
[testenv:linters]
|
||||
commands =
|
||||
{[testenv:bashate]commands}
|
||||
|
||||
[testenv:flake8]
|
||||
basepython = python3
|
||||
description = Dummy environment to allow flake8 to be run in subdir tox
|
||||
|
||||
[testenv:pylint]
|
||||
basepython = python3
|
||||
description = Dummy environment to allow pylint to be run in subdir tox
|
||||
|
||||
[testenv:bandit]
|
||||
basepython = python3
|
||||
description = Dummy environment to allow bandit to be run in subdir tox
|
||||
|
|
Loading…
Reference in New Issue