Remove the Ceph related charts from the stx-openstack application
This will remove the rbd-provisioner and ceph-pools-audit charts from the stx-openstack application and enable it to use the default platform storage provisioner. Changes include: - Update the rbd-provsioner and ceph-pools-audit helm plugin to provide overrides for the namespace defined by HELM_NS_STORAGE_PROVISIONER (currently: kube-system). - Update the cinder, glance, gnocchi, and nova helm plugins use the existing ceph-pool-kube-rbd secret for Ceph client access. This allows removing the pvc-ceph-client-key generation from the rbd-provisioner chart. - Add functions to kube_app.py to create/delete the required Ceph user secret for all namespaces of a supported application. This provides support for PVCs within the application's namespace(s). In the case of stx-openstack, this covers any claims made from the 'openstack' namespace. - Add functions to kube_app.py to support creating and deleting app specific resources that are not handled by the application charts. Using this enables copying the 'ceph-etc' configmap from the provisioner namespace to the openstack namespace for application use. - Add support through the kubernetes API to copy a secret from one namespace to another. - Add support through the kubernetes API to get, create, delete, and copy configmaps. - Remove the rbd-provisioner and ceph-pools-audit stevedore plugins from the stx-openstack application. Also, re-number the plugins. - Update the RBD provisioner to support creating namespaces and Ceph user secrets for additional namespaces other than that which the provisioner is installed. Also, enable PVCs for default namespaces (default and kube-public) against the 'general' storageclass. Change-Id: I387e315545d2c99a1b6baa90d30bdb2a4e08f315 Depends-On: I67dba3f1a3a6e7c8169719ee622ddd533c69be31 Story: 2005424 Task: 30679 Signed-off-by: Robert Church <robert.church@windriver.com>
This commit is contained in:
parent
0fe4655be5
commit
a8b23796fe
|
@ -1,3 +1,3 @@
|
|||
SRC_DIR="stx-openstack-helm"
|
||||
COPY_LIST_TO_TAR="$PKG_BASE/../../../helm-charts/rbd-provisioner $PKG_BASE/../../../helm-charts/garbd $PKG_BASE/../../../helm-charts/ceph-pools-audit"
|
||||
TIS_PATCH_VER=12
|
||||
COPY_LIST_TO_TAR="$PKG_BASE/../../../helm-charts/garbd"
|
||||
TIS_PATCH_VER=13
|
||||
|
|
|
@ -57,9 +57,7 @@ helm repo add local http://localhost:8879/charts
|
|||
|
||||
# Make the charts. These produce a tgz file
|
||||
make nova-api-proxy
|
||||
make rbd-provisioner
|
||||
make garbd
|
||||
make ceph-pools-audit
|
||||
make keystone-api-proxy
|
||||
|
||||
# terminate helm server (the last backgrounded task)
|
||||
|
|
|
@ -106,64 +106,6 @@ data:
|
|||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: openstack-rbd-provisioner
|
||||
data:
|
||||
chart_name: rbd-provisioner
|
||||
release: openstack-rbd-provisioner
|
||||
namespace: openstack
|
||||
wait:
|
||||
timeout: 1800
|
||||
labels:
|
||||
app: rbd-provisioner
|
||||
install:
|
||||
no_hooks: false
|
||||
upgrade:
|
||||
no_hooks: false
|
||||
pre:
|
||||
delete:
|
||||
- type: job
|
||||
labels:
|
||||
app: rbd-provisioner
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1/helm_charts/starlingx/rbd-provisioner-0.1.0.tgz
|
||||
subpath: rbd-provisioner
|
||||
reference: master
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: openstack-ceph-pools-audit
|
||||
data:
|
||||
chart_name: ceph-pools-audit
|
||||
release: openstack-ceph-pools-audit
|
||||
namespace: openstack
|
||||
wait:
|
||||
timeout: 1800
|
||||
labels:
|
||||
app: rbd-provisioner
|
||||
install:
|
||||
no_hooks: false
|
||||
upgrade:
|
||||
no_hooks: false
|
||||
pre:
|
||||
delete:
|
||||
- type: job
|
||||
labels:
|
||||
app: osh-openstack-ceph-pools-audit
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1/helm_charts/starlingx/ceph-pools-audit-0.1.0.tgz
|
||||
subpath: ceph-pools-audit
|
||||
reference: master
|
||||
dependencies:
|
||||
- helm-toolkit
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: openstack-mariadb
|
||||
|
@ -713,10 +655,10 @@ data:
|
|||
namespace: openstack
|
||||
# If we deploy ovs-dpdk on the host, ovs pod will not be created.
|
||||
# commenting out the wait block until a solution can be implemented
|
||||
#wait:
|
||||
# timeout: 1800
|
||||
# labels:
|
||||
# release_group: osh-openstack-openvswitch
|
||||
# wait:
|
||||
# timeout: 1800
|
||||
# labels:
|
||||
# release_group: osh-openstack-openvswitch
|
||||
install:
|
||||
no_hooks: false
|
||||
upgrade:
|
||||
|
@ -976,7 +918,7 @@ data:
|
|||
metrics:
|
||||
required: false
|
||||
workarounds:
|
||||
enable_numa_live_migration: True
|
||||
enable_numa_live_migration: true
|
||||
hypervisor:
|
||||
address_search_enabled: false
|
||||
network:
|
||||
|
@ -2986,17 +2928,6 @@ data:
|
|||
- openstack-ingress
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: provisioner
|
||||
data:
|
||||
description: "Provisioner"
|
||||
sequenced: true
|
||||
chart_group:
|
||||
- openstack-rbd-provisioner
|
||||
- openstack-ceph-pools-audit
|
||||
---
|
||||
schema: armada/ChartGroup/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: openstack-mariadb
|
||||
|
@ -3133,7 +3064,6 @@ data:
|
|||
chart_groups:
|
||||
- kube-system-ingress
|
||||
- openstack-ingress
|
||||
- provisioner
|
||||
- openstack-mariadb
|
||||
- openstack-memcached
|
||||
- openstack-rabbitmq
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: http://localhost:8879/charts
|
||||
version: 0.1.0
|
|
@ -34,4 +34,7 @@ rules:
|
|||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
{{- end}}
|
||||
|
|
|
@ -30,19 +30,15 @@ data:
|
|||
# Copy from read only mount to Ceph config folder
|
||||
cp {{ $mount -}}/ceph.conf /etc/ceph/
|
||||
|
||||
if [ ! -z $CEPH_ADMIN_SECRET ]; then
|
||||
kubectl get secret -n $NAMESPACE | grep $CEPH_ADMIN_SECRET
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
msg="Create $CEPH_ADMIN_SECRET secret"
|
||||
echo "$msg"
|
||||
kubectl create secret generic $CEPH_ADMIN_SECRET --type="kubernetes.io/rbd" --from-literal=key= --namespace=$NAMESPACE
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
msg="Error creating secret $CEPH_ADMIN_SECRET, exit"
|
||||
echo "$msg"
|
||||
exit $ret
|
||||
fi
|
||||
if [ -n "${CEPH_ADMIN_SECRET}" ]; then
|
||||
kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Create ${CEPH_ADMIN_SECRET} secret"
|
||||
kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -51,49 +47,80 @@ data:
|
|||
# Check if ceph is accessible
|
||||
echo "===================================="
|
||||
ceph -s
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
msg="Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||
echo "$msg"
|
||||
exit $ret
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -ex
|
||||
|
||||
# Make sure the pool exists.
|
||||
ceph osd pool stats $POOL_NAME || ceph osd pool create $POOL_NAME $POOL_CHUNK_SIZE
|
||||
ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
|
||||
# Set pool configuration.
|
||||
ceph osd pool application enable $POOL_NAME rbd
|
||||
ceph osd pool set $POOL_NAME size $POOL_REPLICATION
|
||||
ceph osd pool set $POOL_NAME crush_rule $POOL_CRUSH_RULE_NAME
|
||||
ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
|
||||
ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
||||
set +ex
|
||||
|
||||
if [[ -z $USER_ID && -z $CEPH_USER_SECRET ]]; then
|
||||
msg="No need to create secrets for pool $POOL_NAME"
|
||||
echo "$msg"
|
||||
if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
|
||||
echo "No need to create secrets for pool ${POOL_NAME}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
KEYRING=$(ceph auth get-or-create client.$USER_ID mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
||||
set -ex
|
||||
KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
||||
# Set up pool key in Ceph format
|
||||
CEPH_USER_KEYRING=/etc/ceph/ceph.client.$USER_ID.keyring
|
||||
CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
||||
echo $KEYRING > $CEPH_USER_KEYRING
|
||||
|
||||
kubectl create secret generic $CEPH_USER_SECRET --type="kubernetes.io/rbd" --from-literal=key=$KEYRING --namespace=$NAMESPACE
|
||||
|
||||
set +ex
|
||||
|
||||
if [ -n "${CEPH_USER_SECRET}" ]; then
|
||||
kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Create ${CEPH_USER_SECRET} secret"
|
||||
kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
||||
if [ $? -ne 0 ]; then
|
||||
echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Support creating namespaces and Ceph user secrets for additional
|
||||
# namespaces other than that which the provisioner is installed. This
|
||||
# allows the provisioner to set up and provide PVs for multiple
|
||||
# applications across many namespaces.
|
||||
if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
||||
for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
|
||||
kubectl get namespace $ns 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
kubectl create namespace $ns
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error creating namespace $ns, exit"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
||||
kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
||||
fi
|
||||
else
|
||||
echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if pool is accessible using provided credentials
|
||||
echo "====================================="
|
||||
rbd -p $POOL_NAME --user $USER_ID ls -K $CEPH_USER_KEYRING
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
msg="Error: Ceph pool $POOL_NAME is not accessible using \
|
||||
credentials for user $USER_ID, check Pod logs for details."
|
||||
echo "$msg"
|
||||
exit $ret
|
||||
rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
|
||||
exit 1
|
||||
else
|
||||
msg="Pool $POOL_NAME accessible"
|
||||
echo "$msg"
|
||||
echo "Pool ${POOL_NAME} accessible"
|
||||
fi
|
||||
|
||||
ceph -s
|
||||
|
@ -102,7 +129,7 @@ data:
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: rbd-provisioner-storage-init
|
||||
name: storage-init-{{- $root.Values.global.name }}
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
labels:
|
||||
heritage: {{$root.Release.Service | quote }}
|
||||
|
@ -134,6 +161,8 @@ spec:
|
|||
env:
|
||||
- name: NAMESPACE
|
||||
value: {{ $root.Release.Namespace }}
|
||||
- name: ADDITIONAL_NAMESPACES
|
||||
value: {{ include "helm-toolkit.utils.joinListWithComma" $classConfig.additionalNamespaces | quote }}
|
||||
- name: CEPH_ADMIN_SECRET
|
||||
value: {{ $defaults.adminSecretName }}
|
||||
- name: CEPH_USER_SECRET
|
||||
|
@ -168,13 +197,4 @@ data:
|
|||
[mon.{{- $index }}]
|
||||
mon_addr = {{ $element }}
|
||||
{{- end }}
|
||||
---
|
||||
# Create the pvc-ceph-client-key. We need this here as we're not launching
|
||||
# Ceph using the Helm chart.
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/rbd
|
||||
metadata:
|
||||
name: pvc-ceph-client-key
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
{{- end }}
|
||||
|
|
|
@ -16,4 +16,7 @@ rules:
|
|||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
{{- end}}
|
||||
|
|
|
@ -141,6 +141,11 @@ classes:
|
|||
crush_rule_name: storage_tier_ruleset
|
||||
# Pool chunk size / PG_NUM
|
||||
chunk_size: 8
|
||||
# Additional namespace to allow storage class access (other than where
|
||||
# installed)
|
||||
additionalNamespaces:
|
||||
- default
|
||||
- kube-public
|
||||
# Configuration data for the ephemeral pool(s)
|
||||
ephemeral_pools:
|
||||
- chunk_size: 8
|
||||
|
|
|
@ -5,4 +5,4 @@ $PKG_BASE/../../../helm-charts/node-feature-discovery \
|
|||
$PKG_BASE/../../../helm-charts/rbd-provisioner \
|
||||
$PKG_BASE/../../../helm-charts/ceph-pools-audit"
|
||||
|
||||
TIS_PATCH_VER=3
|
||||
TIS_PATCH_VER=4
|
||||
|
|
|
@ -36,15 +36,6 @@ data:
|
|||
- type: job
|
||||
labels:
|
||||
app: rbd-provisioner
|
||||
values:
|
||||
global:
|
||||
# TODO (rchurch): Remove after enabling the stx-openstack application to
|
||||
# use the default system provisioner.
|
||||
provisioner_name: "ceph.com/rbd-platform"
|
||||
rbac:
|
||||
clusterRole: stx-rbd-provisioner
|
||||
clusterRoleBinding: stx-rbd-provisioner
|
||||
serviceAccount: stx-rbd-provisioner
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
SRC_DIR="sysinv"
|
||||
TIS_PATCH_VER=316
|
||||
TIS_PATCH_VER=317
|
||||
|
|
|
@ -84,29 +84,27 @@ systemconfig.helm_plugins.platform_integ_apps =
|
|||
|
||||
systemconfig.helm_plugins.stx_openstack =
|
||||
001_ingress = sysinv.helm.ingress:IngressHelm
|
||||
002_rbd-provisioner = sysinv.helm.rbd_provisioner:RbdProvisionerHelm
|
||||
003_ceph-pools-audit = sysinv.helm.ceph_pools_audit:CephPoolsAuditHelm
|
||||
004_mariadb = sysinv.helm.mariadb:MariadbHelm
|
||||
005_garbd = sysinv.helm.garbd:GarbdHelm
|
||||
006_rabbitmq = sysinv.helm.rabbitmq:RabbitmqHelm
|
||||
007_memcached = sysinv.helm.memcached:MemcachedHelm
|
||||
008_keystone = sysinv.helm.keystone:KeystoneHelm
|
||||
009_heat = sysinv.helm.heat:HeatHelm
|
||||
010_horizon = sysinv.helm.horizon:HorizonHelm
|
||||
011_glance = sysinv.helm.glance:GlanceHelm
|
||||
012_openvswitch = sysinv.helm.openvswitch:OpenvswitchHelm
|
||||
013_libvirt = sysinv.helm.libvirt:LibvirtHelm
|
||||
014_neutron = sysinv.helm.neutron:NeutronHelm
|
||||
015_nova = sysinv.helm.nova:NovaHelm
|
||||
016_nova-api-proxy = sysinv.helm.nova_api_proxy:NovaApiProxyHelm
|
||||
017_cinder = sysinv.helm.cinder:CinderHelm
|
||||
018_gnocchi = sysinv.helm.gnocchi:GnocchiHelm
|
||||
019_ceilometer = sysinv.helm.ceilometer:CeilometerHelm
|
||||
020_panko = sysinv.helm.panko:PankoHelm
|
||||
021_aodh = sysinv.helm.aodh:AodhHelm
|
||||
022_helm-toolkit = sysinv.helm.helm_toolkit:HelmToolkitHelm
|
||||
023_barbican = sysinv.helm.barbican:BarbicanHelm
|
||||
024_keystone-api-proxy = sysinv.helm.keystone_api_proxy:KeystoneApiProxyHelm
|
||||
002_mariadb = sysinv.helm.mariadb:MariadbHelm
|
||||
003_garbd = sysinv.helm.garbd:GarbdHelm
|
||||
004_rabbitmq = sysinv.helm.rabbitmq:RabbitmqHelm
|
||||
005_memcached = sysinv.helm.memcached:MemcachedHelm
|
||||
006_keystone = sysinv.helm.keystone:KeystoneHelm
|
||||
007_heat = sysinv.helm.heat:HeatHelm
|
||||
008_horizon = sysinv.helm.horizon:HorizonHelm
|
||||
009_glance = sysinv.helm.glance:GlanceHelm
|
||||
010_openvswitch = sysinv.helm.openvswitch:OpenvswitchHelm
|
||||
011_libvirt = sysinv.helm.libvirt:LibvirtHelm
|
||||
012_neutron = sysinv.helm.neutron:NeutronHelm
|
||||
013_nova = sysinv.helm.nova:NovaHelm
|
||||
014_nova-api-proxy = sysinv.helm.nova_api_proxy:NovaApiProxyHelm
|
||||
015_cinder = sysinv.helm.cinder:CinderHelm
|
||||
016_gnocchi = sysinv.helm.gnocchi:GnocchiHelm
|
||||
017_ceilometer = sysinv.helm.ceilometer:CeilometerHelm
|
||||
018_panko = sysinv.helm.panko:PankoHelm
|
||||
019_aodh = sysinv.helm.aodh:AodhHelm
|
||||
020_helm-toolkit = sysinv.helm.helm_toolkit:HelmToolkitHelm
|
||||
021_barbican = sysinv.helm.barbican:BarbicanHelm
|
||||
022_keystone-api-proxy = sysinv.helm.keystone_api_proxy:KeystoneApiProxyHelm
|
||||
|
||||
sysinv.agent.lldp.drivers =
|
||||
lldpd = sysinv.agent.lldp.drivers.lldpd.driver:SysinvLldpdAgentDriver
|
||||
|
|
|
@ -127,6 +127,17 @@ class KubeOperator(object):
|
|||
"%s" % (body['metadata']['name'], namespace, e))
|
||||
raise
|
||||
|
||||
def kube_copy_secret(self, name, src_namespace, dst_namespace):
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
body = c.read_namespaced_secret(name, src_namespace, export=True)
|
||||
body.metadata.namespace = dst_namespace
|
||||
c.create_namespaced_secret(dst_namespace, body)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to copy Secret %s from Namespace %s to Namespace "
|
||||
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
||||
raise
|
||||
|
||||
def kube_delete_persistent_volume_claim(self, namespace, **kwargs):
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
|
@ -177,3 +188,60 @@ class KubeOperator(object):
|
|||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_namespace: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_get_config_map(self, name, namespace):
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
c.read_namespaced_config_map(name, namespace)
|
||||
return True
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return False
|
||||
else:
|
||||
LOG.error("Failed to get ConfigMap %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_get_config_map: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_create_config_map(self, namespace, body):
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
c.create_namespaced_config_map(namespace, body)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to create ConfigMap %s under Namespace %s: "
|
||||
"%s" % (body['metadata']['name'], namespace, e))
|
||||
raise
|
||||
|
||||
def kube_copy_config_map(self, name, src_namespace, dst_namespace):
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
body = c.read_namespaced_config_map(name, src_namespace, export=True)
|
||||
body.metadata.namespace = dst_namespace
|
||||
c.create_namespaced_config_map(dst_namespace, body)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to copy ConfigMap %s from Namespace %s to Namespace "
|
||||
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
||||
raise
|
||||
|
||||
def kube_delete_config_map(self, name, namespace, **kwargs):
|
||||
body = {}
|
||||
|
||||
if kwargs:
|
||||
body.update(kwargs)
|
||||
|
||||
c = self._get_kubernetesclient()
|
||||
try:
|
||||
c.delete_namespaced_config_map(name, namespace, body)
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
LOG.warn("ConfigMap %s under Namespace %s "
|
||||
"not found." % (name, namespace))
|
||||
else:
|
||||
LOG.error("Failed to clean up ConfigMap %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_config_map: %s" % e)
|
||||
raise
|
||||
|
|
|
@ -34,6 +34,7 @@ from sysinv.common import constants
|
|||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import helm
|
||||
|
||||
|
@ -121,6 +122,8 @@ Chart = namedtuple('Chart', 'name namespace location')
|
|||
class AppOperator(object):
|
||||
"""Class to encapsulate Kubernetes App operations for System Inventory"""
|
||||
|
||||
APP_OPENSTACK_RESOURCE_CONFIG_MAP = 'ceph-etc'
|
||||
|
||||
def __init__(self, dbapi):
|
||||
self._dbapi = dbapi
|
||||
self._docker = DockerHelper(self._dbapi)
|
||||
|
@ -663,6 +666,73 @@ class AppOperator(object):
|
|||
if null_labels:
|
||||
self._update_kubernetes_labels(host.hostname, null_labels)
|
||||
|
||||
def _create_storage_provisioner_secrets(self, app_name):
|
||||
""" Provide access to the system persistent storage provisioner.
|
||||
|
||||
The rbd-provsioner is installed as part of system provisioning and has
|
||||
created secrets for all common default namespaces. Copy the secret to
|
||||
this application's namespace(s) to provide resolution for PVCs
|
||||
|
||||
:param app_name: Name of the application
|
||||
"""
|
||||
|
||||
# Only set up a secret for the default storage pool (i.e. ignore
|
||||
# additional storage tiers)
|
||||
pool_secret = K8RbdProvisioner.get_user_secret_name({
|
||||
'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]})
|
||||
app_ns = self._helm.get_helm_application_namespaces(app_name)
|
||||
namespaces = \
|
||||
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
|
||||
for ns in namespaces:
|
||||
if (ns in [common.HELM_NS_HELM_TOOLKIT,
|
||||
common.HELM_NS_STORAGE_PROVISIONER] or
|
||||
self._kube.kube_get_secret(pool_secret, ns)):
|
||||
# Secret already exist
|
||||
continue
|
||||
|
||||
try:
|
||||
if not self._kube.kube_get_namespace(ns):
|
||||
self._kube.kube_create_namespace(ns)
|
||||
self._kube.kube_copy_secret(
|
||||
pool_secret, common.HELM_NS_STORAGE_PROVISIONER, ns)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def _delete_storage_provisioner_secrets(self, app_name):
|
||||
""" Remove access to the system persistent storage provisioner.
|
||||
|
||||
As part of launching a supported application, secrets were created to
|
||||
allow access to the provisioner from the application namespaces. This
|
||||
will remove those created secrets.
|
||||
|
||||
:param app_name: Name of the application
|
||||
"""
|
||||
|
||||
# Only set up a secret for the default storage pool (i.e. ignore
|
||||
# additional storage tiers)
|
||||
pool_secret = K8RbdProvisioner.get_user_secret_name({
|
||||
'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]})
|
||||
app_ns = self._helm.get_helm_application_namespaces(app_name)
|
||||
namespaces = \
|
||||
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
|
||||
|
||||
for ns in namespaces:
|
||||
if (ns == common.HELM_NS_HELM_TOOLKIT or
|
||||
ns == common.HELM_NS_STORAGE_PROVISIONER):
|
||||
continue
|
||||
|
||||
try:
|
||||
LOG.info("Deleting Secret %s under Namespace "
|
||||
"%s ..." % (pool_secret, ns))
|
||||
self._kube.kube_delete_secret(
|
||||
pool_secret, ns, grace_period_seconds=0)
|
||||
LOG.info("Secret %s under Namespace %s delete "
|
||||
"completed." % (pool_secret, ns))
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def _create_local_registry_secrets(self, app_name):
|
||||
# Temporary function to create default registry secret
|
||||
# which would be used by kubernetes to pull images from
|
||||
|
@ -925,6 +995,63 @@ class AppOperator(object):
|
|||
monitor.kill()
|
||||
return rc
|
||||
|
||||
def _create_app_specific_resources(self, app_name):
|
||||
"""Add application specific k8s resources.
|
||||
|
||||
Some applications may need resources created outside of the existing
|
||||
charts to properly integrate with the current capabilities of the
|
||||
system. Create these resources here.
|
||||
|
||||
:param app_name: Name of the application.
|
||||
"""
|
||||
|
||||
if app_name == constants.HELM_APP_OPENSTACK:
|
||||
try:
|
||||
# Copy the latest configmap with the ceph monitor information
|
||||
# required by the application into the application namespace
|
||||
if self._kube.kube_get_config_map(
|
||||
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
|
||||
common.HELM_NS_OPENSTACK):
|
||||
|
||||
# Already have one. Delete it, in case it changed
|
||||
self._kube.kube_delete_config_map(
|
||||
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
|
||||
common.HELM_NS_OPENSTACK)
|
||||
|
||||
# Copy the latest config map
|
||||
self._kube.kube_copy_config_map(
|
||||
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
|
||||
common.HELM_NS_STORAGE_PROVISIONER,
|
||||
common.HELM_NS_OPENSTACK)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def _delete_app_specific_resources(self, app_name):
|
||||
"""Remove application specific k8s resources.
|
||||
|
||||
Some applications may need resources created outside of the existing
|
||||
charts to properly integrate with the current capabilities of the
|
||||
system. Remove these resources here.
|
||||
|
||||
:param app_name: Name of the application.
|
||||
"""
|
||||
|
||||
if app_name == constants.HELM_APP_OPENSTACK:
|
||||
self._delete_persistent_volume_claim(common.HELM_NS_OPENSTACK)
|
||||
|
||||
try:
|
||||
# Remove the configmap with the ceph monitor information
|
||||
# required by the application into the application namespace
|
||||
self._kube.kube_delete_config_map(
|
||||
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
|
||||
common.HELM_NS_OPENSTACK)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
self._delete_namespace(common.HELM_NS_OPENSTACK)
|
||||
|
||||
def perform_app_upload(self, rpc_app, tarfile):
|
||||
"""Process application upload request
|
||||
|
||||
|
@ -1020,6 +1147,8 @@ class AppOperator(object):
|
|||
app.charts = self._get_list_of_charts(app.armada_mfile_abs)
|
||||
if app.system_app:
|
||||
self._create_local_registry_secrets(app.name)
|
||||
self._create_storage_provisioner_secrets(app.name)
|
||||
self._create_app_specific_resources(app.name)
|
||||
self._update_app_status(
|
||||
app, new_progress=constants.APP_PROGRESS_GENERATE_OVERRIDES)
|
||||
LOG.info("Generating application overrides...")
|
||||
|
@ -1086,11 +1215,8 @@ class AppOperator(object):
|
|||
|
||||
try:
|
||||
self._delete_local_registry_secrets(app.name)
|
||||
# TODO (rchurch): Clean up needs to be conditional based on
|
||||
# the application. For now only clean up the stx-openstack.
|
||||
if app.name == constants.HELM_APP_OPENSTACK:
|
||||
self._delete_persistent_volume_claim(common.HELM_NS_OPENSTACK)
|
||||
self._delete_namespace(common.HELM_NS_OPENSTACK)
|
||||
self._delete_storage_provisioner_secrets(app.name)
|
||||
self._delete_app_specific_resources(app.name)
|
||||
except Exception as e:
|
||||
self._abort_operation(app, constants.APP_REMOVE_OP)
|
||||
LOG.exception(e)
|
||||
|
|
|
@ -19,13 +19,10 @@ class CephPoolsAuditHelm(base.BaseHelm):
|
|||
|
||||
CHART = constants.HELM_CHART_CEPH_POOLS_AUDIT
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_OPENSTACK,
|
||||
common.HELM_NS_KUBE_SYSTEM]
|
||||
[common.HELM_NS_STORAGE_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_OPENSTACK:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK],
|
||||
constants.HELM_APP_PLATFORM:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_KUBE_SYSTEM],
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
|
||||
}
|
||||
|
||||
SERVICE_NAME = 'ceph-pools'
|
||||
|
@ -69,18 +66,7 @@ class CephPoolsAuditHelm(base.BaseHelm):
|
|||
tiers_cfg.append(tier_cfg)
|
||||
|
||||
overrides = {
|
||||
# TODO (rchurch): Support running in both namespaces for the near
|
||||
# term. A future commit will remove this from the stx-openstack
|
||||
# application
|
||||
common.HELM_NS_OPENSTACK: {
|
||||
'conf': {
|
||||
'ceph': {
|
||||
'monitors': monitors,
|
||||
'storage_tiers': tiers_cfg
|
||||
}
|
||||
}
|
||||
},
|
||||
common.HELM_NS_KUBE_SYSTEM: {
|
||||
common.HELM_NS_STORAGE_PROVISIONER: {
|
||||
'conf': {
|
||||
'ceph': {
|
||||
'monitors': monitors,
|
||||
|
|
|
@ -41,6 +41,7 @@ class CinderHelm(openstack.OpenstackBaseHelm):
|
|||
'backends': self._get_conf_backends_overrides(),
|
||||
},
|
||||
'endpoints': self._get_endpoints_overrides(),
|
||||
'ceph_client': self._get_ceph_client_overrides()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,9 @@ HELM_NS_NFS = 'nfs'
|
|||
HELM_NS_OPENSTACK = 'openstack'
|
||||
HELM_NS_HELM_TOOLKIT = 'helm-toolkit'
|
||||
|
||||
# Namespaces: for system functions
|
||||
HELM_NS_STORAGE_PROVISIONER = HELM_NS_KUBE_SYSTEM
|
||||
|
||||
# Services
|
||||
# Matches configassistant.py value => Should change to STARLINGX
|
||||
SERVICE_ADMIN = 'CGCS'
|
||||
|
|
|
@ -35,7 +35,8 @@ class GlanceHelm(openstack.OpenstackBaseHelm):
|
|||
'endpoints': self._get_endpoints_overrides(),
|
||||
'storage': self._get_storage_overrides(),
|
||||
'conf': self._get_conf_overrides(),
|
||||
'bootstrap': self._get_bootstrap_overrides()
|
||||
'bootstrap': self._get_bootstrap_overrides(),
|
||||
'ceph_client': self._get_ceph_client_overrides(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ class GnocchiHelm(openstack.OpenstackBaseHelm):
|
|||
common.HELM_NS_OPENSTACK: {
|
||||
'pod': self._get_pod_overrides(),
|
||||
'endpoints': self._get_endpoints_overrides(),
|
||||
'ceph_client': self._get_ceph_client_overrides(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@ class KeystoneApiProxyHelm(openstack.OpenstackBaseHelm):
|
|||
'chart_groups': [
|
||||
'kube-system-ingress',
|
||||
'openstack-ingress',
|
||||
'provisioner',
|
||||
'openstack-mariadb',
|
||||
'openstack-memcached',
|
||||
'openstack-rabbitmq',
|
||||
|
|
|
@ -106,7 +106,8 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
|||
'sshd': {
|
||||
'from_subnet': self._get_ssh_subnet(),
|
||||
}
|
||||
}
|
||||
},
|
||||
'ceph_client': self._get_ceph_client_overrides(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ from oslo_log import log
|
|||
from oslo_serialization import jsonutils
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
from sqlalchemy.orm.exc import NoResultFound
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -425,3 +426,12 @@ class OpenstackBaseHelm(base.BaseHelm):
|
|||
]
|
||||
}
|
||||
return uefi_config
|
||||
|
||||
def _get_ceph_client_overrides(self):
|
||||
# A secret is required by the chart for ceph client access. Use the
|
||||
# secret for the kube-rbd pool associated with the primary ceph tier
|
||||
return {
|
||||
'user_secret_name':
|
||||
K8RbdProvisioner.get_user_secret_name({
|
||||
'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]})
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import copy
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
|
@ -22,13 +20,10 @@ class RbdProvisionerHelm(base.BaseHelm):
|
|||
|
||||
CHART = constants.HELM_CHART_RBD_PROVISIONER
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_OPENSTACK,
|
||||
common.HELM_NS_KUBE_SYSTEM]
|
||||
[common.HELM_NS_STORAGE_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_OPENSTACK:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK],
|
||||
constants.HELM_APP_PLATFORM:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_KUBE_SYSTEM],
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
|
||||
}
|
||||
|
||||
SERVICE_NAME = 'rbd-provisioner'
|
||||
|
@ -64,48 +59,28 @@ class RbdProvisionerHelm(base.BaseHelm):
|
|||
"-ruleset").replace('-', '_')
|
||||
|
||||
cls = {
|
||||
"name": K8RbdProvisioner.get_storage_class_name(bk),
|
||||
"pool_name": K8RbdProvisioner.get_pool(bk),
|
||||
"replication": int(bk.capabilities.get("replication")),
|
||||
"crush_rule_name": rule_name,
|
||||
"chunk_size": 64,
|
||||
"userId": K8RbdProvisioner.get_user_id(bk),
|
||||
"userSecretName": K8RbdProvisioner.get_user_secret_name(bk)
|
||||
}
|
||||
"name": K8RbdProvisioner.get_storage_class_name(bk),
|
||||
"pool_name": K8RbdProvisioner.get_pool(bk),
|
||||
"replication": int(bk.capabilities.get("replication")),
|
||||
"crush_rule_name": rule_name,
|
||||
"chunk_size": 64,
|
||||
"userId": K8RbdProvisioner.get_user_id(bk),
|
||||
"userSecretName": K8RbdProvisioner.get_user_secret_name(bk),
|
||||
"additionalNamespaces": ['default', 'kube-public'],
|
||||
}
|
||||
classes.append(cls)
|
||||
|
||||
global_settings = {
|
||||
"replicas": self._num_controllers()
|
||||
}
|
||||
|
||||
overrides = {}
|
||||
# TODO(rchurch): Multiple rbd-provsioners can be run in the k8s cluster.
|
||||
# This will be the case for the near term until an update is provided to
|
||||
# the stx-openstack application to support using the default system
|
||||
# provisioner which will be installed in the kube-system namespace.
|
||||
overrides.update({
|
||||
common.HELM_NS_OPENSTACK: {
|
||||
"classdefaults": copy.deepcopy(classdefaults),
|
||||
"classes": copy.deepcopy(classes),
|
||||
"global": global_settings
|
||||
}
|
||||
})
|
||||
|
||||
# TODO(rchurch): For the near term ensure, provisioner isolation
|
||||
classdefaults["adminId"] += '-platform'
|
||||
classdefaults["adminSecretName"] += '-platform'
|
||||
for c in classes:
|
||||
c["name"] += '-platform'
|
||||
c["pool_name"] += '-platform'
|
||||
c["userId"] += '-platform'
|
||||
c["userSecretName"] += '-platform'
|
||||
overrides.update({
|
||||
common.HELM_NS_KUBE_SYSTEM: {
|
||||
overrides = {
|
||||
common.HELM_NS_STORAGE_PROVISIONER: {
|
||||
"classdefaults": classdefaults,
|
||||
"classes": classes,
|
||||
"global": global_settings
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
|
|
Loading…
Reference in New Issue