Align Rook-Ceph with bare metal Ceph
Update rook-ceph to use upstream code and its FluxCD manifests to align with bare metal Ceph. Changes include: - Create a migration-rook-ceph-helm package to align to the upstream helm charts - Delete previously copied upstream helm charts from stx-rook-ceph - Rename stx-rook-ceph to stx-migration-rook-ceph-helm following existing application patterns - Add new python plugin package support with python3-k8sapp-migration-rook-ceph Test Plan: PASS - Run all tox tests locally PASS - Build all application packages PASS - Deploy on AIO-SX validating that ceph cluster is operational - Followed: https://wiki.openstack.org/wiki/StarlingX/Containers/Applications/app-rook-ceph#Testing Change-Id: I99e0d3a61c6169e5aae7091dd0202350d4c3e3c9 Story: 2011055 Task: 49625 Co-Authored-By: Robert Church <robert.church@windriver.com> Signed-off-by: Caio Correa <caio.correa@windriver.com>
This commit is contained in:
parent
aa7b233198
commit
88fa7920ef
30
.zuul.yaml
30
.zuul.yaml
@ -26,7 +26,7 @@
|
||||
name: k8sapp-rook-ceph-tox-py39
|
||||
parent: openstack-tox-py39
|
||||
description: |
|
||||
Run py39 test for k8sapp_rook_ceph
|
||||
Run py39 test for k8sapp_migration_rook_ceph
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
@ -35,18 +35,18 @@
|
||||
- starlingx/utilities
|
||||
- starlingx/root
|
||||
files:
|
||||
- python3-k8sapp-rook/*
|
||||
-python3-k8sapp-migration-rook-ceph/*
|
||||
vars:
|
||||
python_version: 3.9
|
||||
tox_envlist: py39
|
||||
tox_extra_args: -c python3-k8sapp-rook/k8sapp_rook/tox.ini
|
||||
tox_extra_args: -c python3-k8sapp-migration-rook-ceph/k8sapp_migration_rook_ceph/tox.ini
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt'
|
||||
|
||||
- job:
|
||||
name: k8sapp-rook-ceph-tox-flake8
|
||||
parent: tox
|
||||
description: |
|
||||
Run flake8 test for k8sapp_rook_ceph
|
||||
Run flake8 test for k8sapp_migration_rook_ceph
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
@ -55,17 +55,17 @@
|
||||
- starlingx/utilities
|
||||
- starlingx/root
|
||||
files:
|
||||
- python3-k8sapp-rook/*
|
||||
-python3-k8sapp-migration-rook-ceph/*
|
||||
vars:
|
||||
tox_envlist: flake8
|
||||
tox_extra_args: -c python3-k8sapp-rook/k8sapp_rook/tox.ini
|
||||
tox_extra_args: -c python3-k8sapp-migration-rook-ceph/k8sapp_migration_rook_ceph/tox.ini
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt'
|
||||
|
||||
- job:
|
||||
name: k8sapp-rook-ceph-tox-pylint
|
||||
parent: tox
|
||||
description: |
|
||||
Run pylint test for k8sapp_rook_ceph
|
||||
Run pylint test for k8sapp_migration_rook_ceph
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
@ -74,17 +74,17 @@
|
||||
- starlingx/utilities
|
||||
- starlingx/root
|
||||
files:
|
||||
- python3-k8sapp-rook/*
|
||||
-python3-k8sapp-migration-rook-ceph/*
|
||||
vars:
|
||||
tox_envlist: pylint
|
||||
tox_extra_args: -c python3-k8sapp-rook/k8sapp_rook/tox.ini
|
||||
tox_extra_args: -c python3-k8sapp-migration-rook-ceph/k8sapp_migration_rook_ceph/tox.ini
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt'
|
||||
|
||||
- job:
|
||||
name: k8sapp-rook-ceph-tox-metadata
|
||||
parent: tox
|
||||
description: |
|
||||
Run metadata test for k8sapp_rook_ceph
|
||||
Run metadata test for k8sapp_migration_rook_ceph
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
@ -93,17 +93,17 @@
|
||||
- starlingx/utilities
|
||||
- starlingx/root
|
||||
files:
|
||||
- python3-k8sapp-rook/*
|
||||
-python3-k8sapp-migration-rook-ceph/*
|
||||
vars:
|
||||
tox_envlist: metadata
|
||||
tox_extra_args: -c python3-k8sapp-rook/k8sapp_rook/tox.ini
|
||||
tox_extra_args: -c python3-k8sapp-migration-rook-ceph/k8sapp_migration_rook_ceph/tox.ini
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt'
|
||||
|
||||
- job:
|
||||
name: k8sapp-rook-ceph-tox-bandit
|
||||
parent: tox
|
||||
description: |
|
||||
Run bandit test for k8sapp_rook_ceph
|
||||
Run bandit test for k8sapp_migration_rook_ceph
|
||||
nodeset: debian-bullseye
|
||||
required-projects:
|
||||
- starlingx/config
|
||||
@ -112,10 +112,10 @@
|
||||
- starlingx/utilities
|
||||
- starlingx/root
|
||||
files:
|
||||
- python3-k8sapp-rook/*
|
||||
-python3-k8sapp-migration-rook-ceph/*
|
||||
vars:
|
||||
tox_envlist: bandit
|
||||
tox_extra_args: -c python3-k8sapp-rook/k8sapp_rook/tox.ini
|
||||
tox_extra_args: -c python3-k8sapp-migration-rook-ceph/k8sapp_migration_rook_ceph/tox.ini
|
||||
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt'
|
||||
|
||||
- job:
|
||||
|
@ -1,2 +1 @@
|
||||
#stx-rook-ceph
|
||||
#stx-rook-ceph (not being added to the iso)
|
||||
#stx-migration-rook-ceph-helm
|
@ -1,2 +1,3 @@
|
||||
python3-k8sapp-rook
|
||||
stx-rook-ceph
|
||||
migration-rook-ceph-helm
|
||||
python3-k8sapp-migration-rook-ceph
|
||||
stx-migration-rook-ceph-helm
|
||||
|
5
migration-rook-ceph-helm/debian/deb_folder/changelog
Normal file
5
migration-rook-ceph-helm/debian/deb_folder/changelog
Normal file
@ -0,0 +1,5 @@
|
||||
migration-rook-ceph-helm (1.7-11) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
||||
-- Caio Correa <caio.correa@windriver.com> Wed, 11 Mar 2024 10:45:00 +0000
|
15
migration-rook-ceph-helm/debian/deb_folder/control
Normal file
15
migration-rook-ceph-helm/debian/deb_folder/control
Normal file
@ -0,0 +1,15 @@
|
||||
Source: migration-rook-ceph-helm
|
||||
Section: libs
|
||||
Priority: optional
|
||||
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
helm
|
||||
Standards-Version: 4.5.1
|
||||
Homepage: https://www.starlingx.io
|
||||
|
||||
Package: migration-rook-ceph-helm
|
||||
Section: libs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}
|
||||
Description: StarlingX Rook-Ceph Helm Charts
|
||||
This package contains helm charts for the Rook-Ceph application.
|
@ -1,9 +1,9 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: python3-k8sapp-rook
|
||||
Upstream-Name: migration-rook-ceph-helm
|
||||
Source: https://opendev.org/starlingx/rook-ceph/
|
||||
|
||||
Files: *
|
||||
Copyright: (c) 2013-2021 Wind River Systems, Inc
|
||||
Copyright: (c) 2024 Wind River Systems, Inc
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -23,7 +23,7 @@ License: Apache-2
|
||||
# If you want to use GPL v2 or later for the /debian/* files use
|
||||
# the following clauses, or change it to suit. Delete these two lines
|
||||
Files: debian/*
|
||||
Copyright: 2021 Wind River Systems, Inc
|
||||
Copyright: 2024 Wind River Systems, Inc
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
@ -0,0 +1 @@
|
||||
usr/lib/helm/*
|
@ -0,0 +1,46 @@
|
||||
From 00715f72a3ad59f74889b98c2ad5ccb9b02bcd0c Mon Sep 17 00:00:00 2001
|
||||
From: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
Date: Mon, 5 Feb 2024 14:48:56 -0300
|
||||
Subject: [PATCH] Removed cephobjectstore.yaml from templates folder.
|
||||
|
||||
This patch removes objectstore from the templates thus preventing its
|
||||
instalation. Its unnecessary.
|
||||
|
||||
Signed-off-by: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
---
|
||||
.../templates/cephobjectstore.yaml | 23 -------------------
|
||||
1 file changed, 23 deletions(-)
|
||||
delete mode 100644 cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml
|
||||
|
||||
diff --git a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml b/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml
|
||||
deleted file mode 100644
|
||||
index 21177f32b..000000000
|
||||
--- a/cluster/charts/rook-ceph-cluster/templates/cephobjectstore.yaml
|
||||
+++ /dev/null
|
||||
@@ -1,23 +0,0 @@
|
||||
-{{- $root := . -}}
|
||||
-{{- range $objectstore := .Values.cephObjectStores -}}
|
||||
----
|
||||
-apiVersion: ceph.rook.io/v1
|
||||
-kind: CephObjectStore
|
||||
-metadata:
|
||||
- name: {{ $objectstore.name }}
|
||||
-spec:
|
||||
-{{ toYaml $objectstore.spec | indent 2 }}
|
||||
----
|
||||
-{{- if default false $objectstore.storageClass.enabled }}
|
||||
-apiVersion: storage.k8s.io/v1
|
||||
-kind: StorageClass
|
||||
-metadata:
|
||||
- name: {{ $objectstore.storageClass.name }}
|
||||
-provisioner: {{ $root.Release.Namespace }}.ceph.rook.io/bucket
|
||||
-reclaimPolicy: {{ default "Delete" $objectstore.storageClass.reclaimPolicy }}
|
||||
-parameters:
|
||||
- objectStoreName: {{ $objectstore.name }}
|
||||
- objectStoreNamespace: {{ $root.Release.Namespace }}
|
||||
-{{ toYaml $objectstore.storageClass.parameters | indent 2 }}
|
||||
-{{ end }}
|
||||
-{{ end }}
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,47 @@
|
||||
From 335b28d67df13d8d064fc8b654ccb7db2adf52ec Mon Sep 17 00:00:00 2001
|
||||
From: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
Date: Wed, 7 Feb 2024 15:28:22 -0300
|
||||
Subject: [PATCH] Adding the correct version to rook_ceph and rook_ceph_cluster
|
||||
|
||||
Adds some versioning to the charts. Upstream charts contains the spurious
|
||||
versioning numbers 0.0.1.
|
||||
|
||||
Signed-off-by: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
---
|
||||
cluster/charts/rook-ceph-cluster/Chart.yaml | 4 ++--
|
||||
cluster/charts/rook-ceph/Chart.yaml | 4 ++--
|
||||
2 files changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/cluster/charts/rook-ceph-cluster/Chart.yaml b/cluster/charts/rook-ceph-cluster/Chart.yaml
|
||||
index b282613cd..8e622e0f2 100644
|
||||
--- a/cluster/charts/rook-ceph-cluster/Chart.yaml
|
||||
+++ b/cluster/charts/rook-ceph-cluster/Chart.yaml
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
description: Manages a single Ceph cluster namespace for Rook
|
||||
name: rook-ceph-cluster
|
||||
-version: 0.0.1
|
||||
-appVersion: 0.0.1
|
||||
+version: 1.7.11
|
||||
+appVersion: 1.7.11
|
||||
icon: https://rook.io/images/rook-logo.svg
|
||||
sources:
|
||||
- https://github.com/rook/rook
|
||||
diff --git a/cluster/charts/rook-ceph/Chart.yaml b/cluster/charts/rook-ceph/Chart.yaml
|
||||
index 6b19f642b..43059e601 100644
|
||||
--- a/cluster/charts/rook-ceph/Chart.yaml
|
||||
+++ b/cluster/charts/rook-ceph/Chart.yaml
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
description: File, Block, and Object Storage Services for your Cloud-Native Environment
|
||||
name: rook-ceph
|
||||
-version: 0.0.1
|
||||
-appVersion: 0.0.1
|
||||
+version: 1.7.11
|
||||
+appVersion: 1.7.11
|
||||
icon: https://rook.io/images/rook-logo.svg
|
||||
sources:
|
||||
- https://github.com/rook/rook
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,29 @@
|
||||
From 12ce6fe529e690fc6e705a62d610679eb6b130f7 Mon Sep 17 00:00:00 2001
|
||||
From: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
Date: Wed, 7 Feb 2024 13:45:11 -0300
|
||||
Subject: [PATCH] Removing unnecessary 0
|
||||
|
||||
Removes the zero from the data pool from cephfs name to better match
|
||||
the current baremetal ceph usage on Wind River systems.
|
||||
|
||||
Signed-off-by: jdeolive <Joao.DeOliveiraSilva@windriver.com>
|
||||
---
|
||||
cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml
|
||||
index d2dc1cd12..57d6c2f1a 100644
|
||||
--- a/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml
|
||||
+++ b/cluster/charts/rook-ceph-cluster/templates/cephfilesystem.yaml
|
||||
@@ -18,7 +18,7 @@ metadata:
|
||||
provisioner: {{ $root.Values.operatorNamespace }}.cephfs.csi.ceph.com
|
||||
parameters:
|
||||
fsName: {{ $filesystem.name }}
|
||||
- pool: {{ $filesystem.name }}-data0
|
||||
+ pool: {{ $filesystem.name }}-data
|
||||
clusterID: {{ $root.Release.Namespace }}
|
||||
{{ toYaml $filesystem.storageClass.parameters | indent 2 }}
|
||||
reclaimPolicy: {{ default "Delete" $filesystem.storageClass.reclaimPolicy }}
|
||||
--
|
||||
2.25.1
|
||||
|
@ -0,0 +1,134 @@
|
||||
From bc78e84d0814c8fbac48565946a77af980324bf2 Mon Sep 17 00:00:00 2001
|
||||
From: Caio Correa <caio.correa@windriver.com>
|
||||
Date: Tue, 5 Mar 2024 19:10:55 -0300
|
||||
Subject: [PATCH] Added chart for duplex preparation
|
||||
|
||||
This patch adds a pre-install rook that edits the entrypoint to
|
||||
rook-ceph-mon. On a duplex this entrypoint should be the floating IP
|
||||
to acomplish the roaming mon strategy.
|
||||
|
||||
Signed-off-by: Caio Correa <caio.correa@windriver.com>
|
||||
---
|
||||
.../pre-install-duplex-preparation.yaml | 82 +++++++++++++++++++
|
||||
cluster/charts/rook-ceph-cluster/values.yaml | 18 ++++
|
||||
2 files changed, 100 insertions(+)
|
||||
create mode 100644 cluster/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml
|
||||
|
||||
diff --git a/cluster/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml b/cluster/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml
|
||||
new file mode 100644
|
||||
index 000000000..61e64c87b
|
||||
--- /dev/null
|
||||
+++ b/cluster/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml
|
||||
@@ -0,0 +1,82 @@
|
||||
+{{/*
|
||||
+#
|
||||
+# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
+#
|
||||
+# SPDX-License-Identifier: Apache-2.0
|
||||
+#
|
||||
+*/}}
|
||||
+
|
||||
+{{- if .Values.hook.duplexPreparation.enable }}
|
||||
+{{ $root := . }}
|
||||
+---
|
||||
+apiVersion: v1
|
||||
+kind: ConfigMap
|
||||
+metadata:
|
||||
+ name: config-rook-ceph-duplex-preparation
|
||||
+ namespace: {{ $root.Release.Namespace }}
|
||||
+ annotations:
|
||||
+ "helm.sh/hook": "pre-install"
|
||||
+ "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
+data:
|
||||
+ rook_duplex_preparation.sh: |-
|
||||
+ #!/bin/bash
|
||||
+
|
||||
+ cat > endpoint.yaml << EOF
|
||||
+ apiVersion: v1
|
||||
+ kind: ConfigMap
|
||||
+ metadata:
|
||||
+ name: rook-ceph-mon-endpoints
|
||||
+ namespace: $NAMESPACE
|
||||
+ data:
|
||||
+ data: a=$FLOAT_IP:6789
|
||||
+ mapping: '{"node":{"a":{"Name":"$ACTIVE_CONTROLLER","Hostname":"$ACTIVE_CONTROLLER","Address":"$FLOAT_IP"}}}'
|
||||
+ maxMonId: "0"
|
||||
+ EOF
|
||||
+
|
||||
+ kubectl apply -f endpoint.yaml
|
||||
+
|
||||
+ rm -f endpoint.yaml
|
||||
+---
|
||||
+apiVersion: batch/v1
|
||||
+kind: Job
|
||||
+metadata:
|
||||
+ name: rook-ceph-duplex-preparation
|
||||
+ namespace: {{ $root.Release.Namespace }}
|
||||
+ labels:
|
||||
+ heritage: {{$root.Release.Service | quote }}
|
||||
+ release: {{$root.Release.Name | quote }}
|
||||
+ chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
||||
+ annotations:
|
||||
+ "helm.sh/hook": "pre-install"
|
||||
+ "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
+spec:
|
||||
+ template:
|
||||
+ metadata:
|
||||
+ name: rook-ceph-duplex-preparation
|
||||
+ namespace: {{ $root.Release.Namespace }}
|
||||
+ labels:
|
||||
+ heritage: {{$root.Release.Service | quote }}
|
||||
+ release: {{$root.Release.Name | quote }}
|
||||
+ chart: "{{$root.Chart.Name}}-{{$root.Chart.Version}}"
|
||||
+ spec:
|
||||
+ serviceAccountName: rook-ceph-system
|
||||
+ restartPolicy: OnFailure
|
||||
+ volumes:
|
||||
+ - name: config-rook-ceph-duplex-preparation
|
||||
+ configMap:
|
||||
+ name: config-rook-ceph-duplex-preparation
|
||||
+ containers:
|
||||
+ - name: duplex-preparation
|
||||
+ image: {{ .Values.hook.image }}
|
||||
+ command: [ "/bin/bash", "/tmp/mount/rook_duplex_preparation.sh" ]
|
||||
+ env:
|
||||
+ - name: NAMESPACE
|
||||
+ value: {{ $root.Release.Namespace }}
|
||||
+ - name: ACTIVE_CONTROLLER
|
||||
+ value: {{ $root.Values.hook.duplexPreparation.activeController }}
|
||||
+ - name: FLOAT_IP
|
||||
+ value: {{ $root.Values.hook.duplexPreparation.floatIP }}
|
||||
+ volumeMounts:
|
||||
+ - name: config-rook-ceph-duplex-preparation
|
||||
+ mountPath: /tmp/mount
|
||||
+{{- end }}
|
||||
diff --git a/cluster/charts/rook-ceph-cluster/values.yaml b/cluster/charts/rook-ceph-cluster/values.yaml
|
||||
index ca29c52f7..9194cbac8 100644
|
||||
--- a/cluster/charts/rook-ceph-cluster/values.yaml
|
||||
+++ b/cluster/charts/rook-ceph-cluster/values.yaml
|
||||
@@ -424,3 +424,21 @@ cephObjectStores:
|
||||
parameters:
|
||||
# note: objectStoreNamespace and objectStoreName are configured by the chart
|
||||
region: us-east-1
|
||||
+
|
||||
+hook:
|
||||
+ image: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||
+ duplexPreparation:
|
||||
+ enable: false
|
||||
+ activeController: controller-0
|
||||
+ floatIP: 192.188.204.1
|
||||
+ cleanup:
|
||||
+ enable: true
|
||||
+ cluster_cleanup: rook-ceph
|
||||
+ rbac:
|
||||
+ clusterRole: rook-ceph-cleanup
|
||||
+ clusterRoleBinding: rook-ceph-cleanup
|
||||
+ role: rook-ceph-cleanup
|
||||
+ roleBinding: rook-ceph-cleanup
|
||||
+ serviceAccount: rook-ceph-cleanup
|
||||
+ mon_hosts:
|
||||
+ - controller-0
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.34.1
|
||||
|
@ -0,0 +1,4 @@
|
||||
0001-Removed-cephobjectstore.yaml-from-templates-folder.patch
|
||||
0002-Adding-the-correct-version-to-rook_ceph-and-rook_cep.patch
|
||||
0003-Removing-unnecessary-0.patch
|
||||
0004-Added-chart-for-duplex-preparation.patch
|
26
migration-rook-ceph-helm/debian/deb_folder/rules
Executable file
26
migration-rook-ceph-helm/debian/deb_folder/rules
Executable file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/make -f
|
||||
export DH_VERBOSE = 1
|
||||
|
||||
export ROOT = debian/tmp
|
||||
export APP_FOLDER = $(ROOT)/usr/lib/helm
|
||||
|
||||
%:
|
||||
dh $@
|
||||
|
||||
override_dh_auto_build:
|
||||
|
||||
# Copy migration-rook-ceph-helm charts
|
||||
mkdir -p migration-rook-ceph-helm
|
||||
cp -r cluster/charts/* migration-rook-ceph-helm
|
||||
cp Makefile migration-rook-ceph-helm
|
||||
|
||||
cd migration-rook-ceph-helm && make rook-ceph
|
||||
cd migration-rook-ceph-helm && make rook-ceph-cluster
|
||||
|
||||
override_dh_auto_install:
|
||||
# Install the app tar file.
|
||||
install -d -m 755 $(APP_FOLDER)
|
||||
install -p -D -m 755 migration-rook-ceph-helm/rook-ceph-cluster*.tgz $(APP_FOLDER)
|
||||
install -p -D -m 755 migration-rook-ceph-helm/rook-ceph-[!c]*.tgz $(APP_FOLDER)
|
||||
|
||||
override_dh_auto_test:
|
13
migration-rook-ceph-helm/debian/meta_data.yaml
Normal file
13
migration-rook-ceph-helm/debian/meta_data.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
debname: migration-rook-ceph-helm
|
||||
debver: 1.7-11
|
||||
dl_path:
|
||||
name: rook-ceph-1.7.11.tar.gz
|
||||
url: https://github.com/rook/rook/archive/refs/tags/v1.7.11.tar.gz
|
||||
sha256sum: 0654b293618ff33608d262b5dc42b7cdb3201e29a66a9a777403856ea4c1ef48
|
||||
src_files:
|
||||
- migration-rook-ceph-helm/files/Makefile
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
GITREVCOUNT:
|
||||
BASE_SRCREV: 10c623509a68acad945d4e0c06a86b3e8486ad5b
|
@ -0,0 +1,42 @@
|
||||
#
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# It's necessary to set this because some environments don't link sh -> bash.
|
||||
SHELL := /bin/bash
|
||||
TASK := build
|
||||
|
||||
EXCLUDES := helm-toolkit doc tests tools logs tmp
|
||||
CHARTS := helm-toolkit $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
|
||||
|
||||
.PHONY: $(EXCLUDES) $(CHARTS)
|
||||
|
||||
all: $(CHARTS)
|
||||
|
||||
$(CHARTS):
|
||||
@if [ -d $@ ]; then \
|
||||
echo; \
|
||||
echo "===== Processing [$@] chart ====="; \
|
||||
make $(TASK)-$@; \
|
||||
fi
|
||||
|
||||
init-%:
|
||||
if [ -f $*/Makefile ]; then make -C $*; fi
|
||||
|
||||
lint-%: init-%
|
||||
if [ -d $* ]; then helm lint $*; fi
|
||||
|
||||
build-%: lint-%
|
||||
if [ -d $* ]; then helm package $*; fi
|
||||
|
||||
clean:
|
||||
@echo "Clean all build artifacts"
|
||||
rm -f */templates/_partials.tpl */templates/_globals.tpl
|
||||
rm -f *tgz */charts/*tgz
|
||||
rm -rf */charts */tmpcharts
|
||||
|
||||
%:
|
||||
@:
|
@ -1,4 +1,4 @@
|
||||
python3-k8sapp-rook (1.0-1) unstable; urgency=medium
|
||||
python3-k8sapp-migration-rook-ceph (1.0-1) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@ -1,4 +1,4 @@
|
||||
Source: python3-k8sapp-rook
|
||||
Source: python3-k8sapp-migration-rook-ceph
|
||||
Section: libs
|
||||
Priority: optional
|
||||
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
||||
@ -11,14 +11,14 @@ Build-Depends: debhelper-compat (= 13),
|
||||
Standards-Version: 4.5.1
|
||||
Homepage: https://www.starlingx.io
|
||||
|
||||
Package: python3-k8sapp-rook
|
||||
Package: python3-k8sapp-migration-rook-ceph
|
||||
Section: libs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${python3:Depends}
|
||||
Description: StarlingX Sysinv Rook Ceph Extensions
|
||||
Sysinv plugins for the Rook Ceph K8S app.
|
||||
|
||||
Package: python3-k8sapp-rook-wheels
|
||||
Package: python3-k8sapp-migration-rook-ceph-wheels
|
||||
Section: libs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${python3:Depends}, python3-wheel
|
@ -0,0 +1,41 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: python3-k8sapp-migration-rook-ceph
|
||||
Source: https://opendev.org/starlingx/rook-ceph/
|
||||
|
||||
Files: *
|
||||
Copyright: (c) 2013-2024 Wind River Systems, Inc
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian-based systems the full text of the Apache version 2.0 license
|
||||
can be found in `/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
# If you want to use GPL v2 or later for the /debian/* files use
|
||||
# the following clauses, or change it to suit. Delete these two lines
|
||||
Files: debian/*
|
||||
Copyright: 2024 Wind River Systems, Inc
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian-based systems the full text of the Apache version 2.0 license
|
||||
can be found in `/usr/share/common-licenses/Apache-2.0'.
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/make -f
|
||||
# export DH_VERBOSE = 1
|
||||
|
||||
export APP_NAME = rook-ceph-apps
|
||||
export APP_NAME = rook-ceph-migration
|
||||
export PYBUILD_NAME = k8sapp-rook
|
||||
|
||||
export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ')
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
debname: python3-k8sapp-rook
|
||||
debname: python3-k8sapp-migration-rook-ceph
|
||||
debver: 1.0-1
|
||||
src_path: k8sapp_rook
|
||||
src_path: k8sapp_migration_rook_ceph
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
GITREVCOUNT:
|
@ -0,0 +1,7 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = k8sapp_migration_rook_ceph
|
||||
omit = k8sapp_migration_rook_ceph/tests/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
@ -0,0 +1,4 @@
|
||||
[DEFAULT]
|
||||
test_path=./k8sapp_migration_rook_ceph/tests
|
||||
top_dir=./k8sapp_migration_rook_ceph
|
||||
#parallel_class=True
|
@ -1,4 +1,4 @@
|
||||
k8sapp-rook-ceph
|
||||
k8sapp_migration_rook_ceph
|
||||
================
|
||||
|
||||
This project contains StarlingX Kubernetes application specific python plugins
|
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Application Name
|
||||
HELM_NS_ROOK_CEPH = 'rook-ceph'
|
||||
HELM_APP_ROOK_CEPH = 'rook-ceph-migration'
|
||||
|
||||
# Helm: Supported charts:
|
||||
# These values match the names in the chart package's Chart.yaml
|
||||
HELM_CHART_ROOK_CEPH = 'rook-ceph'
|
||||
HELM_CHART_ROOK_CEPH_CLUSTER = 'rook-ceph-cluster'
|
||||
HELM_CHART_ROOK_CEPH_PROVISIONER = 'rook-ceph-provisioner'
|
||||
|
||||
# FluxCD
|
||||
FLUXCD_HELMRELEASE_ROOK_CEPH = 'rook-ceph'
|
||||
FLUXCD_HELMRELEASE_ROOK_CEPH_CLUSTER = 'rook-ceph-cluster'
|
||||
FLUXCD_HELMRELEASE_ROOK_CEPH_PROVISIONER = 'rook-ceph-provisioner'
|
||||
|
||||
ROOK_CEPH_CLUSTER_SECRET_NAMESPACE = 'rook-ceph'
|
||||
|
||||
ROOK_CEPH_RDB_SECRET_NAME = 'rook-csi-rbd-provisioner'
|
||||
ROOK_CEPH_RDB_NODE_SECRET_NAME = 'rook-csi-rbd-node'
|
||||
|
||||
ROOK_CEPH_FS_SECRET_NAME = 'rook-csi-cephfs-provisioner'
|
||||
ROOK_CEPH_FS_NODE_SECRET_NAME = 'rook-csi-cephfs-node'
|
||||
|
||||
ROOK_CEPH_CLUSTER_RDB_STORAGE_CLASS_NAME = 'general'
|
||||
ROOK_CEPH_CLUSTER_CEPHFS_STORAGE_CLASS_NAME = 'cephfs'
|
||||
|
||||
ROOK_CEPH_CLUSTER_CEPHFS_FILE_SYSTEM_NAME = 'kube-cephfs'
|
@ -0,0 +1,43 @@
|
||||
#
|
||||
# Copyright (c) 2021 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from k8sapp_migration_rook_ceph.helm import storage
|
||||
from sysinv.common import exception
|
||||
|
||||
|
||||
class RookCephHelm(storage.StorageBaseHelm):
|
||||
"""Class to encapsulate helm operations for the rook-operator chart"""
|
||||
CHART = app_constants.HELM_CHART_ROOK_CEPH
|
||||
HELM_RELEASE = app_constants.FLUXCD_HELMRELEASE_ROOK_CEPH
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
secrets = [{"name": "default-registry-key"}]
|
||||
overrides = {
|
||||
app_constants.HELM_NS_ROOK_CEPH: {
|
||||
'operator': self._get_operator_override(),
|
||||
'imagePullSecrets': secrets,
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
def _get_operator_override(self):
|
||||
operator = {
|
||||
'csi': {
|
||||
'enableRbdDriver': True
|
||||
},
|
||||
'enableFlexDriver': False,
|
||||
'logLevel': 'DEBUG',
|
||||
}
|
||||
return operator
|
@ -0,0 +1,196 @@
|
||||
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from k8sapp_migration_rook_ceph.helm import storage
|
||||
|
||||
import socket
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
|
||||
|
||||
class RookCephClusterHelm(storage.StorageBaseHelm):
|
||||
"""Class to encapsulate helm operations for the rook-ceph chart"""
|
||||
|
||||
CHART = app_constants.HELM_CHART_ROOK_CEPH_CLUSTER
|
||||
HELM_RELEASE = app_constants.FLUXCD_HELMRELEASE_ROOK_CEPH_CLUSTER
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
overrides = {
|
||||
app_constants.HELM_NS_ROOK_CEPH: {
|
||||
'cephClusterSpec': self._get_cluster_override(),
|
||||
'cephFileSystems': self._get_cephfs_override(),
|
||||
'cephBlockPools': self._get_rdb_override(),
|
||||
'mds': self._get_mds_override(),
|
||||
'hook': self._get_hook_override(),
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
def _get_cephfs_override(self):
|
||||
if cutils.is_aio_simplex_system(self.dbapi):
|
||||
replica = 1
|
||||
else:
|
||||
replica = 2
|
||||
|
||||
parameters = {
|
||||
'csi.storage.k8s.io/provisioner-secret-name': app_constants.ROOK_CEPH_FS_SECRET_NAME,
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': app_constants.ROOK_CEPH_FS_SECRET_NAME,
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/node-stage-secret-name': app_constants.ROOK_CEPH_FS_NODE_SECRET_NAME,
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/fstype': 'ext4'
|
||||
}
|
||||
|
||||
storage_class = {
|
||||
'enabled': True,
|
||||
'name': app_constants.ROOK_CEPH_CLUSTER_CEPHFS_STORAGE_CLASS_NAME,
|
||||
'isDefault': False,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'parameters': parameters
|
||||
}
|
||||
|
||||
ceph_fs_config = [{
|
||||
'name': app_constants.ROOK_CEPH_CLUSTER_CEPHFS_FILE_SYSTEM_NAME,
|
||||
'spec': {
|
||||
'metadataPool': {
|
||||
'replicated':
|
||||
{'size': replica}},
|
||||
'metadataServer': {
|
||||
'activeCount': 1,
|
||||
'activeStandby': True},
|
||||
'dataPools': [{
|
||||
'failureDomain': 'host',
|
||||
'replicated':
|
||||
{'size': replica}}],
|
||||
},
|
||||
'storageClass': storage_class
|
||||
}]
|
||||
|
||||
return ceph_fs_config
|
||||
|
||||
def _get_rdb_override(self):
|
||||
if cutils.is_aio_simplex_system(self.dbapi):
|
||||
replica = 1
|
||||
else:
|
||||
replica = 2
|
||||
|
||||
parameters = {
|
||||
'imageFormat': '2',
|
||||
'imageFeatures': 'layering',
|
||||
'csi.storage.k8s.io/provisioner-secret-name': app_constants.ROOK_CEPH_RDB_SECRET_NAME,
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': app_constants.ROOK_CEPH_RDB_SECRET_NAME,
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/node-stage-secret-name': app_constants.ROOK_CEPH_RDB_NODE_SECRET_NAME,
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': app_constants.ROOK_CEPH_CLUSTER_SECRET_NAMESPACE,
|
||||
'csi.storage.k8s.io/fstype': 'ext4'
|
||||
}
|
||||
|
||||
storage_class = {
|
||||
'enabled': True,
|
||||
'name': app_constants.ROOK_CEPH_CLUSTER_RDB_STORAGE_CLASS_NAME,
|
||||
'isDefault': True,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'mountOptions': [],
|
||||
'parameters': parameters
|
||||
}
|
||||
|
||||
rdb_config = [{
|
||||
'name': 'kube-rbd',
|
||||
'spec': {
|
||||
'failureDomain': 'host',
|
||||
'replicated': {'size': replica}
|
||||
},
|
||||
'storageClass': storage_class
|
||||
}]
|
||||
|
||||
return rdb_config
|
||||
|
||||
def _get_cluster_override(self):
|
||||
cluster = {
|
||||
'mon': {
|
||||
'count': self._get_mon_count(),
|
||||
},
|
||||
}
|
||||
|
||||
return cluster
|
||||
|
||||
def _get_mon_count(self):
|
||||
# change it with deployment configs:
|
||||
# AIO simplex/duplex have 1 mon, multi-node has 3 mons,
|
||||
# 2 controllers + first mon (and cannot reconfig)
|
||||
if cutils.is_aio_system(self.dbapi):
|
||||
return 1
|
||||
else:
|
||||
return 3
|
||||
|
||||
def _get_mds_override(self):
|
||||
if cutils.is_aio_simplex_system(self.dbapi):
|
||||
replica = 1
|
||||
else:
|
||||
replica = 2
|
||||
|
||||
mds = {
|
||||
'replica': replica,
|
||||
}
|
||||
|
||||
return mds
|
||||
|
||||
def _get_hook_override(self):
|
||||
hook = {
|
||||
'cleanup': {
|
||||
'mon_hosts': self._get_mon_hosts(),
|
||||
},
|
||||
'duplexPreparation': self._get_duplex_preparation(),
|
||||
}
|
||||
return hook
|
||||
|
||||
def _get_mon_hosts(self):
|
||||
ceph_mon_label = "ceph-mon-placement=enabled"
|
||||
mon_hosts = []
|
||||
|
||||
hosts = self.dbapi.ihost_get_list()
|
||||
for h in hosts:
|
||||
labels = self.dbapi.label_get_by_host(h.uuid)
|
||||
for label in labels:
|
||||
if (ceph_mon_label == str(label.label_key) + '=' + str(label.label_value)):
|
||||
mon_hosts.append(h.hostname.encode('utf8', 'strict'))
|
||||
|
||||
return mon_hosts
|
||||
|
||||
def _get_duplex_preparation(self):
|
||||
duplex = {
|
||||
'enable': cutils.is_aio_duplex_system(self.dbapi)
|
||||
}
|
||||
|
||||
if cutils.is_aio_duplex_system(self.dbapi):
|
||||
hosts = self.dbapi.ihost_get_by_personality(
|
||||
constants.CONTROLLER)
|
||||
for host in hosts:
|
||||
if host['hostname'] == socket.gethostname():
|
||||
duplex.update({'activeController': host['hostname'].encode('utf8', 'strict')})
|
||||
|
||||
pools = self.dbapi.address_pools_get_all()
|
||||
for pool in pools:
|
||||
if pool.name == 'management':
|
||||
duplex.update({'floatIP': pool.floating_address})
|
||||
|
||||
return duplex
|
@ -1,46 +1,28 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_rook.common import constants as app_constants
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from k8sapp_migration_rook_ceph.helm import storage
|
||||
|
||||
from kubernetes.client.rest import ApiException
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils
|
||||
from sysinv.helm import base
|
||||
from sysinv.helm import common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RookCephProvisionerHelm(base.BaseHelm):
|
||||
class RookCephClusterProvisionerHelm(storage.StorageBaseHelm):
|
||||
"""Class to encapsulate helm operations for the rook-ceph-provisioner chart"""
|
||||
|
||||
CHART = app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_STORAGE_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_ROOK_CEPH:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
|
||||
}
|
||||
|
||||
SERVICE_NAME = app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER
|
||||
|
||||
def execute_manifest_updates(self, operator):
|
||||
# On application load this chart is enabled. Only disable if specified
|
||||
# by the user
|
||||
if not self._is_enabled(operator.APP, self.CHART,
|
||||
common.HELM_NS_STORAGE_PROVISIONER):
|
||||
operator.chart_group_chart_delete(
|
||||
operator.CHART_GROUPS_LUT[self.CHART],
|
||||
operator.CHARTS_LUT[self.CHART])
|
||||
|
||||
def get_namespaces(self):
|
||||
return self.SUPPORTED_NAMESPACES
|
||||
HELM_RELEASE = app_constants.FLUXCD_HELMRELEASE_ROOK_CEPH_PROVISIONER
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
base_name = 'ceph-pool'
|
||||
@ -54,7 +36,7 @@ class RookCephProvisionerHelm(base.BaseHelm):
|
||||
audit = utils.is_aio_duplex_system(self.dbapi)
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_STORAGE_PROVISIONER: {
|
||||
app_constants.HELM_NS_ROOK_CEPH: {
|
||||
"global": {
|
||||
"job_ceph_mon_audit": audit,
|
||||
},
|
||||
@ -100,7 +82,7 @@ class RookCephProvisionerHelm(base.BaseHelm):
|
||||
mon_ip_name = 'rook-ceph-mon-endpoints'
|
||||
|
||||
configmap = kube.kube_read_config_map(mon_ip_name,
|
||||
common.HELM_NS_STORAGE_PROVISIONER)
|
||||
app_constants.HELM_NS_ROOK_CEPH)
|
||||
if configmap is not None:
|
||||
data = configmap.data['data']
|
||||
LOG.info('rook configmap data is %s' % data)
|
||||
@ -120,11 +102,9 @@ class RookCephProvisionerHelm(base.BaseHelm):
|
||||
|
||||
def _is_rook_ceph(self):
|
||||
try:
|
||||
# check function getLabels in rook/pkg/operator/ceph/cluster/mon/spec.go
|
||||
# rook will assign label "mon_cluster=kube-system" to monitor pods
|
||||
label = "mon_cluster=" + common.HELM_NS_STORAGE_PROVISIONER
|
||||
label = "mon_cluster=" + app_constants.HELM_NS_ROOK_CEPH
|
||||
kube = kubernetes.KubeOperator()
|
||||
pods = kube.kube_get_pods_by_selector(common.HELM_NS_STORAGE_PROVISIONER, label, "")
|
||||
pods = kube.kube_get_pods_by_selector(app_constants.HELM_NS_ROOK_CEPH, label, "")
|
||||
if len(pods) > 0:
|
||||
return True
|
||||
except ApiException as ae:
|
@ -0,0 +1,53 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.helm import base
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
|
||||
|
||||
class BaseHelm(base.FluxCDBaseHelm):
|
||||
"""Class to encapsulate storage related service operations for helm"""
|
||||
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[app_constants.HELM_NS_ROOK_CEPH]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
app_constants.HELM_APP_ROOK_CEPH: SUPPORTED_NAMESPACES,
|
||||
}
|
||||
|
||||
|
||||
class StorageBaseHelm(BaseHelm):
|
||||
"""Class to encapsulate storage service operations for helm"""
|
||||
|
||||
def _is_enabled(self, app_name, chart_name, namespace):
|
||||
"""
|
||||
Check if the chart is enable at a system level
|
||||
|
||||
:param app_name: Application name
|
||||
:param chart_name: Chart supplied with the application
|
||||
:param namespace: Namespace where the chart will be executed
|
||||
|
||||
Returns true by default if an exception occurs as most charts are
|
||||
enabled.
|
||||
"""
|
||||
return super(StorageBaseHelm, self)._is_enabled(
|
||||
app_name, chart_name, namespace)
|
||||
|
||||
def execute_kustomize_updates(self, operator):
|
||||
"""
|
||||
Update the elements of FluxCD kustomize manifests.
|
||||
|
||||
This allows a helm chart plugin to use the FluxCDKustomizeOperator to
|
||||
make dynamic structural changes to the application manifest based on the
|
||||
current conditions in the platform
|
||||
|
||||
Changes currenty include updates to the top level kustomize manifest to
|
||||
disable helm releases.
|
||||
|
||||
:param operator: an instance of the FluxCDKustomizeOperator
|
||||
"""
|
||||
if not self._is_enabled(operator.APP, self.CHART,
|
||||
app_constants.HELM_NS_ROOK_CEPH):
|
||||
operator.helm_release_resource_delete(self.HELM_RELEASE)
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -0,0 +1,29 @@
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
""" System inventory Kustomization resource operator."""
|
||||
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from sysinv.helm import kustomize_base as base
|
||||
|
||||
|
||||
class RookCephFluxCDKustomizeOperator(base.FluxCDKustomizeOperator):
|
||||
|
||||
APP = app_constants.HELM_APP_ROOK_CEPH
|
||||
|
||||
def platform_mode_kustomize_updates(self, dbapi, mode):
|
||||
""" Update the top-level kustomization resource list
|
||||
|
||||
Make changes to the top-level kustomization resource list based on the
|
||||
platform mode
|
||||
|
||||
:param dbapi: DB api object
|
||||
:param mode: mode to control when to update the resource list
|
||||
"""
|
||||
pass
|
@ -0,0 +1,109 @@
|
||||
#
|
||||
# Copyright (c) 2021 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
""" System inventory App lifecycle operator."""
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.helm import lifecycle_base as base
|
||||
from sysinv.helm.lifecycle_constants import LifecycleConstants
|
||||
from sysinv.helm import lifecycle_utils as lifecycle_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
def app_lifecycle_actions(self, context, conductor_obj, app_op, app, hook_info):
|
||||
""" Perform lifecycle actions for an operation
|
||||
|
||||
:param context: request context
|
||||
:param conductor_obj: conductor object
|
||||
:param app_op: AppOperator object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
# Fluxcd request
|
||||
if hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_FLUXCD_REQUEST:
|
||||
if (hook_info.operation == constants.APP_REMOVE_OP and
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE):
|
||||
return self.remove_finalizers_crd()
|
||||
|
||||
# Resources
|
||||
if hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RESOURCE:
|
||||
if hook_info.operation == constants.APP_APPLY_OP:
|
||||
if hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return lifecycle_utils.create_local_registry_secrets(app_op, app, hook_info)
|
||||
elif (hook_info.operation == constants.APP_REMOVE_OP and
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST):
|
||||
return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info)
|
||||
|
||||
# Operation
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_OPERATION:
|
||||
if (hook_info.operation == constants.APP_APPLY_OP and
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST):
|
||||
return self.post_apply(context, conductor_obj, app, hook_info)
|
||||
|
||||
# Use the default behaviour for other hooks
|
||||
super(RookCephAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info)
|
||||
|
||||
def post_apply(self, context, conductor_obj, app, hook_info):
|
||||
""" Post apply actions
|
||||
|
||||
:param context: request context
|
||||
:param conductor_obj: conductor object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
if LifecycleConstants.EXTRA not in hook_info:
|
||||
raise exception.LifecycleMissingInfo("Missing {}".format(LifecycleConstants.EXTRA))
|
||||
if LifecycleConstants.APP_APPLIED not in hook_info[LifecycleConstants.EXTRA]:
|
||||
raise exception.LifecycleMissingInfo(
|
||||
"Missing {} {}".format(LifecycleConstants.EXTRA, LifecycleConstants.APP_APPLIED))
|
||||
|
||||
if hook_info[LifecycleConstants.EXTRA][LifecycleConstants.APP_APPLIED]:
|
||||
# apply any runtime configurations that are needed for
|
||||
# rook_ceph application
|
||||
conductor_obj._update_config_for_rook_ceph(context)
|
||||
|
||||
def remove_finalizers_crd(self):
|
||||
""" Remove finalizers from CustomResourceDefinitions (CRDs)
|
||||
|
||||
This function removes finalizers from rook-ceph CRDs for application removal
|
||||
operation
|
||||
|
||||
"""
|
||||
# Get all CRDs related to rook-ceph
|
||||
cmd_crds = ["kubectl", "--kubeconfig", kubernetes.KUBERNETES_ADMIN_CONF, "get", "crd",
|
||||
"-o=jsonpath='{.items[?(@.spec.group==\"ceph.rook.io\")].metadata.name}'"]
|
||||
|
||||
stdout, stderr = cutils.trycmd(*cmd_crds)
|
||||
if not stderr:
|
||||
crds = stdout.replace("'", "").strip().split(" ")
|
||||
for crd_name in crds:
|
||||
# Get custom resources based on each rook-ceph CRD
|
||||
cmd_instances = ["kubectl", "--kubeconfig", kubernetes.KUBERNETES_ADMIN_CONF,
|
||||
"get", "-n", "rook-ceph", crd_name, "-o", "name"]
|
||||
stdout, stderr = cutils.trycmd(*cmd_instances)
|
||||
crd_instances = stdout.strip().split("\n")
|
||||
if not stderr and crd_instances:
|
||||
for crd_instance in crd_instances:
|
||||
if crd_instance:
|
||||
# Patch each custom resource to remove finalizers
|
||||
patch_cmd = ["kubectl", "--kubeconfig", kubernetes.KUBERNETES_ADMIN_CONF,
|
||||
"patch", "-n", "rook-ceph", crd_instance, "-p",
|
||||
"{\"metadata\":{\"finalizers\":null}}", "--type=merge"]
|
||||
stdout, stderr = cutils.trycmd(*patch_cmd)
|
||||
LOG.debug("{} \n stdout: {} \n stderr: {}".format(crd_instance, stdout, stderr))
|
||||
else:
|
||||
LOG.error("Error removing finalizers: {stderr}")
|
@ -1,28 +1,25 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import constants
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from sysinv.tests.db import base as dbbase
|
||||
|
||||
|
||||
class K8SAppRookAppMixin(object):
|
||||
app_name = constants.HELM_APP_ROOK_CEPH
|
||||
class K8SAppMigrationRookAppMixin(object):
|
||||
app_name = app_constants.HELM_APP_ROOK_CEPH
|
||||
path_name = app_name + '.tgz'
|
||||
|
||||
def setUp(self): # pylint: disable=useless-super-delegation
|
||||
super(K8SAppRookAppMixin, self).setUp()
|
||||
|
||||
def test_stub(self):
|
||||
# This unit test stub should be removed when real
|
||||
# unit tests are added
|
||||
pass
|
||||
super(K8SAppMigrationRookAppMixin, self).setUp()
|
||||
|
||||
|
||||
# Test Configuration:
|
||||
# - Controller
|
||||
# - IPv6
|
||||
class K8SAppRookControllerTestCase(K8SAppRookAppMixin,
|
||||
class K8SAppMigrationRookControllerTestCase(K8SAppMigrationRookAppMixin,
|
||||
dbbase.BaseIPv6Mixin,
|
||||
dbbase.ControllerHostTestCase):
|
||||
pass
|
||||
@ -31,6 +28,6 @@ class K8SAppRookControllerTestCase(K8SAppRookAppMixin,
|
||||
# Test Configuration:
|
||||
# - AIO
|
||||
# - IPv4
|
||||
class K8SAppRookAIOTestCase(K8SAppRookAppMixin,
|
||||
class K8SAppMigrationRookAIOTestCase(K8SAppMigrationRookAppMixin,
|
||||
dbbase.AIOSimplexHostTestCase):
|
||||
pass
|
@ -0,0 +1,274 @@
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from k8sapp_migration_rook_ceph.common import constants as app_constants
|
||||
from k8sapp_migration_rook_ceph.tests import test_plugins
|
||||
|
||||
from sysinv.db import api as dbapi
|
||||
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
from sysinv.tests.helm import base
|
||||
|
||||
|
||||
class RookTestCase(test_plugins.K8SAppMigrationRookAppMixin,
|
||||
base.HelmTestCaseMixin):
|
||||
|
||||
def setUp(self):
|
||||
super(RookTestCase, self).setUp()
|
||||
self.app = dbutils.create_test_app(name=app_constants.HELM_APP_ROOK_CEPH)
|
||||
self.dbapi = dbapi.get_instance()
|
||||
|
||||
|
||||
class RookIPv4ControllerHostTestCase(RookTestCase,
|
||||
dbbase.ProvisionedControllerHostTestCase):
|
||||
|
||||
def test_rook_ceph_overrides(self):
|
||||
d_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
self.assertOverridesParameters(d_overrides, {
|
||||
'operator': {
|
||||
'csi': {
|
||||
'enableRbdDriver': True
|
||||
},
|
||||
'enableFlexDriver': False,
|
||||
'logLevel': 'DEBUG'},
|
||||
'imagePullSecrets': [
|
||||
{'name': 'default-registry-key'}
|
||||
]
|
||||
})
|
||||
|
||||
def test_rook_ceph_cluster_overrides(self):
|
||||
e_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH_CLUSTER,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
|
||||
self.assertOverridesParameters(e_overrides, {
|
||||
'cephClusterSpec': {
|
||||
'mon': {
|
||||
'count': 3
|
||||
}
|
||||
},
|
||||
'cephFileSystems':
|
||||
[
|
||||
{
|
||||
'name': 'kube-cephfs',
|
||||
'spec':
|
||||
{
|
||||
'metadataPool': {'replicated': {'size': 2}},
|
||||
'metadataServer': {'activeCount': 1, 'activeStandby': True},
|
||||
'dataPools':
|
||||
[{'failureDomain': 'host', 'replicated': {'size': 2}}],
|
||||
},
|
||||
'storageClass':
|
||||
{
|
||||
'enabled': True,
|
||||
'name': 'cephfs',
|
||||
'isDefault': False,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'parameters':
|
||||
{
|
||||
'csi.storage.k8s.io/provisioner-secret-name': 'rook-csi-cephfs-provisioner',
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': 'rook-csi-cephfs-provisioner',
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/node-stage-secret-name': 'rook-csi-cephfs-node',
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/fstype': 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
'cephBlockPools':
|
||||
[
|
||||
{
|
||||
'name': 'kube-rbd',
|
||||
'spec': {'failureDomain': 'host', 'replicated': {'size': 2}},
|
||||
'storageClass':
|
||||
{
|
||||
'enabled': True,
|
||||
'name': 'general',
|
||||
'isDefault': True,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'mountOptions': [],
|
||||
'parameters':
|
||||
{
|
||||
'imageFormat': '2',
|
||||
'imageFeatures': 'layering',
|
||||
'csi.storage.k8s.io/provisioner-secret-name': 'rook-csi-rbd-provisioner',
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': 'rook-csi-rbd-provisioner',
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/node-stage-secret-name': 'rook-csi-rbd-node',
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/fstype': 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
'mds': {'replica': 2},
|
||||
'hook':
|
||||
{
|
||||
'cleanup': {'mon_hosts': []},
|
||||
'duplexPreparation': {'enable': False}
|
||||
}
|
||||
})
|
||||
|
||||
def test_rook_ceph_provisioner_overrides(self):
|
||||
f_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
|
||||
self.assertOverridesParameters(f_overrides, {
|
||||
'global': {'job_ceph_mon_audit': False},
|
||||
'provisionStorage':
|
||||
{
|
||||
'defaultStorageClass': 'general',
|
||||
'classdefaults':
|
||||
{'monitors': '', 'adminId': 'admin', 'adminSecretName': 'ceph-admin'},
|
||||
'classes':
|
||||
{
|
||||
'name': 'general',
|
||||
'pool':
|
||||
{
|
||||
'pool_name': 'kube-rbd',
|
||||
'replication': 2,
|
||||
'crush_rule_name': 'storage_tier_ruleset',
|
||||
'chunk_size': 64,
|
||||
},
|
||||
'secret':
|
||||
{'userId': 'kube-rbd', 'userSecretName': 'ceph-pool-kube-rbd'}, },
|
||||
},
|
||||
'host_provision': {'controller_hosts': [b'controller-0']},
|
||||
'ceph_audit_jobs': {},
|
||||
})
|
||||
|
||||
|
||||
class RookIPv6AIODuplexSystemTestCase(RookTestCase,
|
||||
dbbase.BaseIPv6Mixin,
|
||||
dbbase.ProvisionedAIODuplexSystemTestCase):
|
||||
|
||||
def test_rook_ceph_overrides(self):
|
||||
a_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
|
||||
self.assertOverridesParameters(a_overrides, {
|
||||
'operator':
|
||||
{
|
||||
'csi': {'enableRbdDriver': True},
|
||||
'enableFlexDriver': False,
|
||||
'logLevel': 'DEBUG',
|
||||
},
|
||||
'imagePullSecrets': [{'name': 'default-registry-key'}],
|
||||
})
|
||||
|
||||
def test_rook_ceph_cluster_overrides(self):
|
||||
b_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH_CLUSTER,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
|
||||
self.assertOverridesParameters(b_overrides, {
|
||||
'cephClusterSpec': {'mon': {'count': 1}},
|
||||
'cephFileSystems':
|
||||
[
|
||||
{
|
||||
'name': 'kube-cephfs',
|
||||
'spec':
|
||||
{
|
||||
'metadataPool': {'replicated': {'size': 2}},
|
||||
'metadataServer': {'activeCount': 1, 'activeStandby': True},
|
||||
'dataPools':
|
||||
[{'failureDomain': 'host', 'replicated': {'size': 2}}],
|
||||
},
|
||||
'storageClass':
|
||||
{
|
||||
'enabled': True,
|
||||
'name': 'cephfs',
|
||||
'isDefault': False,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'parameters':
|
||||
{
|
||||
'csi.storage.k8s.io/provisioner-secret-name': 'rook-csi-cephfs-provisioner',
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': 'rook-csi-cephfs-provisioner',
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/node-stage-secret-name': 'rook-csi-cephfs-node',
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/fstype': 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
'cephBlockPools':
|
||||
[
|
||||
{
|
||||
'name': 'kube-rbd',
|
||||
'spec': {'failureDomain': 'host', 'replicated': {'size': 2}},
|
||||
'storageClass':
|
||||
{
|
||||
'enabled': True,
|
||||
'name': 'general',
|
||||
'isDefault': True,
|
||||
'allowVolumeExpansion': True,
|
||||
'reclaimPolicy': 'Delete',
|
||||
'mountOptions': [],
|
||||
'parameters':
|
||||
{
|
||||
'imageFormat': '2',
|
||||
'imageFeatures': 'layering',
|
||||
'csi.storage.k8s.io/provisioner-secret-name': 'rook-csi-rbd-provisioner',
|
||||
'csi.storage.k8s.io/provisioner-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/controller-expand-secret-name': 'rook-csi-rbd-provisioner',
|
||||
'csi.storage.k8s.io/controller-expand-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/node-stage-secret-name': 'rook-csi-rbd-node',
|
||||
'csi.storage.k8s.io/node-stage-secret-namespace': 'rook-ceph',
|
||||
'csi.storage.k8s.io/fstype': 'ext4',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
'mds': {'replica': 2},
|
||||
'hook':
|
||||
{
|
||||
'cleanup': {'mon_hosts': []},
|
||||
'duplexPreparation': {'enable': True, 'floatIP': 'fd01::2'},
|
||||
},
|
||||
})
|
||||
|
||||
def test_rook_ceph_provisioner_overrides(self):
|
||||
c_overrides = self.operator.get_helm_chart_overrides(
|
||||
app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER,
|
||||
cnamespace=app_constants.HELM_NS_ROOK_CEPH)
|
||||
|
||||
self.assertOverridesParameters(c_overrides, {
|
||||
'global': {'job_ceph_mon_audit': True},
|
||||
'provisionStorage':
|
||||
{
|
||||
'defaultStorageClass': 'general',
|
||||
'classdefaults':
|
||||
{'monitors': '', 'adminId': 'admin', 'adminSecretName': 'ceph-admin'},
|
||||
'classes':
|
||||
{
|
||||
'name': 'general',
|
||||
'pool':
|
||||
{
|
||||
'pool_name': 'kube-rbd',
|
||||
'replication': 2,
|
||||
'crush_rule_name': 'storage_tier_ruleset',
|
||||
'chunk_size': 64,
|
||||
},
|
||||
'secret':
|
||||
{
|
||||
'userId': 'kube-rbd',
|
||||
'userSecretName': 'ceph-pool-kube-rbd'
|
||||
}, },
|
||||
},
|
||||
'host_provision': {'controller_hosts': [b'controller-0', b'controller-1']},
|
||||
'ceph_audit_jobs': {'floatIP': 'fd01::2'},
|
||||
})
|
@ -0,0 +1,44 @@
|
||||
[metadata]
|
||||
name = k8sapp_migration_rook_ceph
|
||||
summary = StarlingX sysinv extensions for rook-ceph-migration app
|
||||
long_description = file: README.rst
|
||||
long_description_content_type = text/x-rst
|
||||
license = Apache 2.0
|
||||
author = StarlingX
|
||||
author-email = starlingx-discuss@lists.starlingx.io
|
||||
home-page = https://www.starlingx.io/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.9
|
||||
|
||||
[files]
|
||||
packages =
|
||||
k8sapp_migration_rook_ceph
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
||||
|
||||
[entry_points]
|
||||
systemconfig.helm_applications =
|
||||
rook-ceph-migration = systemconfig.helm_plugins.rook_ceph_apps
|
||||
|
||||
systemconfig.helm_plugins.rook_ceph_apps =
|
||||
001_rook-ceph = k8sapp_migration_rook_ceph.helm.rook_ceph:RookCephHelm
|
||||
002_rook-ceph-cluster = k8sapp_migration_rook_ceph.helm.rook_ceph_cluster:RookCephClusterHelm
|
||||
003_rook-ceph-provisioner = k8sapp_migration_rook_ceph.helm.rook_ceph_provisioner:RookCephClusterProvisionerHelm
|
||||
|
||||
systemconfig.fluxcd.kustomize_ops =
|
||||
rook-ceph-migration = k8sapp_migration_rook_ceph.kustomize.kustomize_rook_ceph:RookCephFluxCDKustomizeOperator
|
||||
|
||||
systemconfig.app_lifecycle =
|
||||
rook-ceph-migration = k8sapp_migration_rook_ceph.lifecycle.lifecycle_rook_ceph:RookCephAppLifecycleOperator
|
||||
|
||||
[wheel]
|
||||
universal = 1
|
@ -28,11 +28,11 @@ setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONHASHSEED=0
|
||||
PIP_RESOLVER_DEBUG=1
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
OS_TEST_PATH=./k8sapp_rook/tests
|
||||
OS_TEST_PATH=./k8sapp_migration_rook_ceph/tests
|
||||
LANG=en_US.UTF-8
|
||||
LANGUAGE=en_US:en
|
||||
LC_ALL=C
|
||||
EVENTS_YAML=./k8sapp_rook/tests/events_for_testing.yaml
|
||||
EVENTS_YAML=./k8sapp_migration_rook_ceph/tests/events_for_testing.yaml
|
||||
SYSINV_TEST_ENV=True
|
||||
TOX_WORK_DIR={toxworkdir}
|
||||
PYLINTHOME={toxworkdir}
|
||||
@ -96,7 +96,7 @@ max-line-length=120
|
||||
[testenv:flake8]
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
flake8 {posargs} ./k8sapp_rook
|
||||
flake8 {posargs} ./k8sapp_migration_rook_ceph
|
||||
|
||||
[testenv:py39]
|
||||
commands =
|
||||
@ -145,14 +145,14 @@ exclude = tests
|
||||
|
||||
[testenv:bandit]
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = bandit --ini tox.ini -n 5 -r k8sapp_rook
|
||||
commands = bandit --ini tox.ini -n 5 -r k8sapp_migration_rook_ceph
|
||||
|
||||
[testenv:pylint]
|
||||
install_command = pip install -v -v -v \
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \
|
||||
{opts} {packages}
|
||||
commands =
|
||||
pylint {posargs} k8sapp_rook --rcfile=./pylint.rc
|
||||
pylint {posargs} k8sapp_migration_rook_ceph --rcfile=./pylint.rc
|
||||
|
||||
[testenv:cover]
|
||||
# not sure is passenv is still needed
|
||||
@ -177,7 +177,7 @@ commands =
|
||||
# of the requirements.txt files
|
||||
deps = pip_missing_reqs
|
||||
-rrequirements.txt
|
||||
commands=pip-missing-reqs -d --ignore-file=/k8sapp_rook/tests k8sapp_rook
|
||||
commands=pip-missing-reqs -d --ignore-file=/k8sapp_migration_rook_ceph/tests k8sapp_migration_rook_ceph
|
||||
|
||||
[testenv:metadata]
|
||||
install_command = pip install -v -v -v \
|
||||
@ -185,4 +185,4 @@ install_command = pip install -v -v -v \
|
||||
{opts} {packages}
|
||||
# Pass top level app folder to 'sysinv-app tox' command.
|
||||
commands =
|
||||
bash -c "echo $(dirname $(dirname $(pwd))) | xargs -n 1 sysinv-app tox"
|
||||
bash -c "echo $(dirname $(dirname $(pwd))) | xargs -n 1 sysinv-app tox"
|
@ -1,7 +0,0 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = k8sapp_rook
|
||||
omit = k8sapp_rook/tests/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
@ -1,4 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_path=./k8sapp_rook/tests
|
||||
top_dir=./k8sapp_rook
|
||||
#parallel_class=True
|
@ -1,11 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Helm: Supported charts:
|
||||
# These values match the names in the chart package's Chart.yaml
|
||||
HELM_CHART_ROOK_OPERATOR = 'rook-operator'
|
||||
HELM_CHART_ROOK_CEPH = 'rook-ceph'
|
||||
HELM_CHART_ROOK_CEPH_PROVISIONER = 'rook-ceph-provisioner'
|
@ -1,128 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_rook.common import constants as app_constants
|
||||
|
||||
import socket
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
|
||||
from sysinv.helm import base
|
||||
from sysinv.helm import common
|
||||
|
||||
|
||||
class RookCephHelm(base.BaseHelm):
|
||||
"""Class to encapsulate helm operations for the rook-ceph chart"""
|
||||
|
||||
CHART = app_constants.HELM_CHART_ROOK_CEPH
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_STORAGE_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_ROOK_CEPH:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
|
||||
}
|
||||
|
||||
def execute_manifest_updates(self, operator):
|
||||
# On application load this chart is enabled. Only disable if specified
|
||||
# by the user
|
||||
if not self._is_enabled(operator.APP, self.CHART,
|
||||
common.HELM_NS_STORAGE_PROVISIONER):
|
||||
operator.chart_group_chart_delete(
|
||||
operator.CHART_GROUPS_LUT[self.CHART],
|
||||
operator.CHARTS_LUT[self.CHART])
|
||||
|
||||
def get_namespaces(self):
|
||||
return self.SUPPORTED_NAMESPACES
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
overrides = {
|
||||
common.HELM_NS_STORAGE_PROVISIONER: {
|
||||
'cluster': self._get_cluster_override(),
|
||||
'mds': self._get_mds_override(),
|
||||
'hook': self._get_hook_override(),
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
def _get_cluster_override(self):
|
||||
cluster = {
|
||||
'mon': {
|
||||
'count': self._get_mon_count(),
|
||||
},
|
||||
}
|
||||
|
||||
return cluster
|
||||
|
||||
def _get_mon_count(self):
|
||||
# change it with deployment configs:
|
||||
# AIO simplex/duplex have 1 mon, multi-node has 3 mons,
|
||||
# 2 controllers + first mon (and cannot reconfig)
|
||||
if cutils.is_aio_system(self.dbapi):
|
||||
return 1
|
||||
else:
|
||||
return 3
|
||||
|
||||
def _get_mds_override(self):
|
||||
if cutils.is_aio_simplex_system(self.dbapi):
|
||||
replica = 1
|
||||
else:
|
||||
replica = 2
|
||||
|
||||
mds = {
|
||||
'replica': replica,
|
||||
}
|
||||
|
||||
return mds
|
||||
|
||||
def _get_hook_override(self):
|
||||
hook = {
|
||||
'cleanup': {
|
||||
'mon_hosts': self._get_mon_hosts(),
|
||||
},
|
||||
'duplexPreparation': self._get_duplex_preparation(),
|
||||
}
|
||||
return hook
|
||||
|
||||
def _get_mon_hosts(self):
|
||||
ceph_mon_label = "ceph-mon-placement=enabled"
|
||||
mon_hosts = []
|
||||
|
||||
hosts = self.dbapi.ihost_get_list()
|
||||
for h in hosts:
|
||||
labels = self.dbapi.label_get_by_host(h.uuid)
|
||||
for label in labels:
|
||||
if (ceph_mon_label == str(label.label_key) + '=' + str(label.label_value)):
|
||||
mon_hosts.append(h.hostname.encode('utf8', 'strict'))
|
||||
|
||||
return mon_hosts
|
||||
|
||||
def _get_duplex_preparation(self):
|
||||
duplex = {
|
||||
'enable': cutils.is_aio_duplex_system(self.dbapi)
|
||||
}
|
||||
|
||||
if cutils.is_aio_duplex_system(self.dbapi):
|
||||
hosts = self.dbapi.ihost_get_by_personality(
|
||||
constants.CONTROLLER)
|
||||
for host in hosts:
|
||||
if host['hostname'] == socket.gethostname():
|
||||
duplex.update({'activeController': host['hostname'].encode('utf8', 'strict')})
|
||||
|
||||
pools = self.dbapi.address_pools_get_all()
|
||||
for pool in pools:
|
||||
if pool.name == 'management':
|
||||
duplex.update({'floatIP': pool.floating_address})
|
||||
|
||||
return duplex
|
@ -1,65 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2021 Intel Corporation, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_rook.common import constants as app_constants
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
|
||||
from sysinv.helm import base
|
||||
from sysinv.helm import common
|
||||
|
||||
|
||||
class RookOperatorHelm(base.BaseHelm):
|
||||
"""Class to encapsulate helm operations for the rook-operator chart"""
|
||||
|
||||
CHART = app_constants.HELM_CHART_ROOK_OPERATOR
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_STORAGE_PROVISIONER]
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_ROOK_CEPH:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER],
|
||||
}
|
||||
|
||||
def execute_manifest_updates(self, operator):
|
||||
# On application load this chart is enabled. Only disable if specified
|
||||
# by the user
|
||||
if not self._is_enabled(operator.APP, self.CHART,
|
||||
common.HELM_NS_STORAGE_PROVISIONER):
|
||||
operator.chart_group_chart_delete(
|
||||
operator.CHART_GROUPS_LUT[self.CHART],
|
||||
operator.CHARTS_LUT[self.CHART])
|
||||
|
||||
def get_namespaces(self):
|
||||
return self.SUPPORTED_NAMESPACES
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
secrets = [{"name": "default-registry-key"}]
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_STORAGE_PROVISIONER: {
|
||||
'operator': self._get_operator_override(),
|
||||
'imagePullSecrets': secrets,
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
def _get_operator_override(self):
|
||||
operator = {
|
||||
'csi': {
|
||||
'enableRbdDriver': True
|
||||
},
|
||||
'enableFlexDriver': False,
|
||||
'logLevel': 'DEBUG',
|
||||
}
|
||||
return operator
|
@ -1,80 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2021 Intel Corporation, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
""" System inventory App lifecycle operator."""
|
||||
# Temporary disable pylint for lifecycle hooks
|
||||
# This will be reverted in a future commit
|
||||
# pylint: disable=no-member
|
||||
# pylint: disable=no-name-in-module
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.helm import lifecycle_base as base
|
||||
from sysinv.helm.lifecycle_constants import LifecycleConstants
|
||||
from sysinv.helm import lifecycle_utils as lifecycle_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
def app_lifecycle_actions(self, context, conductor_obj, app_op, app, hook_info):
|
||||
""" Perform lifecycle actions for an operation
|
||||
|
||||
:param context: request context
|
||||
:param conductor_obj: conductor object
|
||||
:param app_op: AppOperator object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
# Rbd
|
||||
if hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RBD:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return lifecycle_utils.create_rbd_provisioner_secrets(app_op, app, hook_info)
|
||||
elif hook_info.operation == constants.APP_REMOVE_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return lifecycle_utils.delete_rbd_provisioner_secrets(app_op, app, hook_info)
|
||||
|
||||
# Resources
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_RESOURCE:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_PRE:
|
||||
return lifecycle_utils.create_local_registry_secrets(app_op, app, hook_info)
|
||||
elif hook_info.operation == constants.APP_REMOVE_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return lifecycle_utils.delete_local_registry_secrets(app_op, app, hook_info)
|
||||
|
||||
# Operation
|
||||
elif hook_info.lifecycle_type == constants.APP_LIFECYCLE_TYPE_OPERATION:
|
||||
if hook_info.operation == constants.APP_APPLY_OP and \
|
||||
hook_info.relative_timing == constants.APP_LIFECYCLE_TIMING_POST:
|
||||
return self.post_apply(context, conductor_obj, app, hook_info)
|
||||
|
||||
# Use the default behaviour for other hooks
|
||||
super(RookCephAppLifecycleOperator, self).app_lifecycle_actions(context, conductor_obj, app_op, app, hook_info)
|
||||
|
||||
def post_apply(self, context, conductor_obj, app, hook_info):
|
||||
""" Post apply actions
|
||||
|
||||
:param context: request context
|
||||
:param conductor_obj: conductor object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
if LifecycleConstants.EXTRA not in hook_info:
|
||||
raise exception.LifecycleMissingInfo("Missing {}".format(LifecycleConstants.EXTRA))
|
||||
if LifecycleConstants.APP_APPLIED not in hook_info[LifecycleConstants.EXTRA]:
|
||||
raise exception.LifecycleMissingInfo(
|
||||
"Missing {} {}".format(LifecycleConstants.EXTRA, LifecycleConstants.APP_APPLIED))
|
||||
|
||||
if hook_info[LifecycleConstants.EXTRA][LifecycleConstants.APP_APPLIED]:
|
||||
# apply any runtime configurations that are needed for
|
||||
# rook_ceph application
|
||||
conductor_obj._update_config_for_rook_ceph(context)
|
@ -1,46 +0,0 @@
|
||||
[metadata]
|
||||
name = k8sapp-rook
|
||||
summary = StarlingX sysinv extensions for rook-ceph-apps
|
||||
long_description = file: README.rst
|
||||
long_description_content_type = text/x-rst
|
||||
license = Apache 2.0
|
||||
author = StarlingX
|
||||
author-email = starlingx-discuss@lists.starlingx.io
|
||||
home-page = https://www.starlingx.io/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.5
|
||||
Programming Language :: Python :: 3.6
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
|
||||
[files]
|
||||
packages =
|
||||
k8sapp_rook
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
||||
|
||||
[entry_points]
|
||||
systemconfig.helm_applications =
|
||||
rook-ceph-apps = systemconfig.helm_plugins.rook_ceph_apps
|
||||
|
||||
systemconfig.helm_plugins.rook_ceph_apps =
|
||||
001_rook-operator = k8sapp_rook.helm.rook_operator:RookOperatorHelm
|
||||
002_rook-ceph = k8sapp_rook.helm.rook_ceph:RookCephHelm
|
||||
003_rook-ceph-provisioner = k8sapp_rook.helm.rook_ceph_provisioner:RookCephProvisionerHelm
|
||||
|
||||
systemconfig.app_lifecycle =
|
||||
rook-ceph-apps = k8sapp_rook.lifecycle.lifecycle_rook_ceph:RookCephAppLifecycleOperator
|
||||
|
||||
[bdist_wheel]
|
||||
universal = 1
|
5
stx-migration-rook-ceph-helm/debian/deb_folder/changelog
Normal file
5
stx-migration-rook-ceph-helm/debian/deb_folder/changelog
Normal file
@ -0,0 +1,5 @@
|
||||
stx-migration-rook-ceph-helm (2.0-1) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
||||
-- Caio Cesar Correa <caio.correa@windriver.com> Fri, 20 Oct 2023 15:00:00 -0300
|
@ -1,17 +1,15 @@
|
||||
Source: stx-rook-ceph
|
||||
Source: stx-migration-rook-ceph-helm
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Maintainer: StarlingX Developers <starlingx-discuss@lists.starlingx.io>
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
helm,
|
||||
chartmuseum,
|
||||
python3-k8sapp-rook,
|
||||
python3-k8sapp-rook-wheels,
|
||||
procps
|
||||
migration-rook-ceph-helm,
|
||||
python3-k8sapp-migration-rook-ceph-wheels,
|
||||
Standards-Version: 4.1.2
|
||||
Homepage: https://www.starlingx.io
|
||||
|
||||
Package: stx-rook-ceph
|
||||
Package: stx-migration-rook-ceph-helm
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Description: StarlingX K8S application: Rook Ceph
|
@ -1,10 +1,10 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: stx-rook-ceph
|
||||
Upstream-Name: stx-migration-rook-ceph-helm
|
||||
Source: https://opendev.org/starlingx/rook-ceph/
|
||||
|
||||
Files: *
|
||||
Copyright:
|
||||
(c) 2013-2021 Wind River Systems, Inc
|
||||
(c) 2013-2024 Wind River Systems, Inc
|
||||
(c) Others (See individual files for more details)
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -25,7 +25,7 @@ License: Apache-2
|
||||
# If you want to use GPL v2 or later for the /debian/* files use
|
||||
# the following clauses, or change it to suit. Delete these two lines
|
||||
Files: debian/*
|
||||
Copyright: 2021 Wind River Systems, Inc
|
||||
Copyright: 2013-2024 Wind River Systems, Inc
|
||||
License: Apache-2
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
66
stx-migration-rook-ceph-helm/debian/deb_folder/rules
Executable file
66
stx-migration-rook-ceph-helm/debian/deb_folder/rules
Executable file
@ -0,0 +1,66 @@
|
||||
#!/usr/bin/make -f
|
||||
export DH_VERBOSE = 1
|
||||
|
||||
export ROOT = debian/tmp
|
||||
export APP_FOLDER = $(ROOT)/usr/local/share/applications/helm
|
||||
export INITRD_DIR = $(ROOT)/etc/init.d
|
||||
|
||||
export DEB_VERSION = $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ')
|
||||
export RELEASE = $(shell echo $(DEB_VERSION) | cut -f 1 -d '-')
|
||||
export REVISION = $(shell echo $(DEB_VERSION) | cut -f 4 -d '.')
|
||||
|
||||
export APP_NAME = rook-ceph-migration
|
||||
export APP_VERSION = $(RELEASE)-$(REVISION)
|
||||
export APP_TARBALL_FLUXCD = $(APP_NAME)-$(APP_VERSION).tgz
|
||||
|
||||
export HELM_REPO = stx-platform
|
||||
export HELM_FOLDER = /usr/lib/helm
|
||||
export STAGING = staging-fluxcd
|
||||
|
||||
%:
|
||||
dh $@
|
||||
|
||||
override_dh_auto_build:
|
||||
|
||||
# Create the TGZ file.
|
||||
cd helm-charts && make rook-ceph-provisioner
|
||||
|
||||
# Setup staging
|
||||
mkdir -p $(STAGING)
|
||||
cp files/metadata.yaml $(STAGING)
|
||||
cp -Rv fluxcd-manifests $(STAGING)
|
||||
|
||||
|
||||
mkdir -p $(STAGING)/charts
|
||||
cp helm-charts/*.tgz $(STAGING)/charts
|
||||
cp $(HELM_FOLDER)/rook-ceph*.tgz $(STAGING)/charts
|
||||
|
||||
# Populate metadata
|
||||
sed -i 's/APP_REPLACE_NAME/$(APP_NAME)/g' $(STAGING)/metadata.yaml
|
||||
sed -i 's/APP_REPLACE_VERSION/$(APP_VERSION)/g' $(STAGING)/metadata.yaml
|
||||
sed -i 's/HELM_REPLACE_REPO/$(HELM_REPO)/g' $(STAGING)/metadata.yaml
|
||||
|
||||
# Copy the plugins: installed in the buildroot
|
||||
mkdir -p $(STAGING)/plugins
|
||||
cp /plugins/*.whl $(STAGING)/plugins
|
||||
|
||||
# Package it up
|
||||
cd $(STAGING)
|
||||
find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
|
||||
tar -zcf $(APP_TARBALL_FLUXCD) -C $(STAGING)/ .
|
||||
|
||||
# Cleanup staging
|
||||
rm -fr $(STAGING)
|
||||
|
||||
override_dh_auto_install:
|
||||
# Install the app tar file
|
||||
install -d -m 755 $(APP_FOLDER)
|
||||
install -d -m 755 $(INITRD_DIR)
|
||||
install -p -D -m 755 $(APP_TARBALL_FLUXCD) $(APP_FOLDER)
|
||||
install -m 750 files/rook-mon-exit.sh $(INITRD_DIR)/rook-mon-exit
|
||||
|
||||
# Prevents dh_fixperms from changing the permissions defined in this file
|
||||
override_dh_fixperms:
|
||||
dh_fixperms --exclude etc/init.d/rook-mon-exit
|
||||
|
||||
override_dh_usrlocal:
|
@ -0,0 +1 @@
|
||||
3.0 (quilt)
|
@ -1,2 +1,2 @@
|
||||
usr/local/share/applications/helm/*
|
||||
etc/rc.d/init.d/rook-mon-exit
|
||||
etc/init.d/rook-mon-exit
|
@ -1,11 +1,10 @@
|
||||
---
|
||||
debname: stx-rook-ceph
|
||||
debver: 1.0-1
|
||||
src_path: stx-rook-ceph
|
||||
debname: stx-migration-rook-ceph-helm
|
||||
debver: 2.0-0
|
||||
src_path: stx-migration-rook-ceph-helm
|
||||
src_files:
|
||||
- files
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
GITREVCOUNT:
|
||||
SRC_DIR: ${MY_REPO}/stx/rook-ceph
|
||||
BASE_SRCREV: 10c623509a68acad945d4e0c06a86b3e8486ad5b
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -21,19 +22,19 @@ function stop {
|
||||
pgrep ceph-mon
|
||||
if [ x"$?" = x"0" ]; then
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete \
|
||||
deployments.apps -n kube-system rook-ceph-mon-a
|
||||
deployments.apps -n rook-ceph rook-ceph-mon-a
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete po \
|
||||
-n kube-system --selector="app=rook-ceph-mon,mon=a"
|
||||
-n rook-ceph --selector="app=rook-ceph-mon,mon=a"
|
||||
fi
|
||||
|
||||
pgrep ceph-osd
|
||||
if [ x"$?" = x"0" ]; then
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete \
|
||||
deployments.apps -n kube-system \
|
||||
deployments.apps -n rook-ceph \
|
||||
--selector="app=rook-ceph-osd,failure-domain=$(hostname)"
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete po \
|
||||
--selector="app=rook-ceph-osd,failure-domain=$(hostname)" \
|
||||
-n kube-system
|
||||
-n rook-ceph
|
||||
fi
|
||||
}
|
||||
|
@ -3,10 +3,14 @@ app_version: APP_REPLACE_VERSION
|
||||
helm_repo: HELM_REPLACE_REPO
|
||||
|
||||
helm_toolkit_required: false
|
||||
maintain_user_overrides: true
|
||||
maintain_attributes: true
|
||||
|
||||
upgrades:
|
||||
auto_update: false
|
||||
|
||||
behavior:
|
||||
platform_managed_app: yes
|
||||
desired_state: uploaded
|
||||
platform_managed_app: no
|
||||
evaluate_reapply:
|
||||
triggers:
|
||||
- type: runtime-apply-puppet
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -7,4 +7,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
||||
name: rook-ceph
|
@ -1,14 +1,14 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: kube-system
|
||||
namespace: rook-ceph
|
||||
resources:
|
||||
- base
|
||||
- rook-ceph
|
||||
- rook-operator
|
||||
- rook-ceph-cluster
|
||||
- rook-ceph-provisioner
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -7,20 +7,22 @@
|
||||
apiVersion: "helm.toolkit.fluxcd.io/v2beta1"
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: rook-operator
|
||||
name: rook-ceph-cluster
|
||||
labels:
|
||||
chart_group: starlingx-rook-charts
|
||||
spec:
|
||||
releaseName: rook-operator
|
||||
releaseName: rook-ceph-cluster
|
||||
chart:
|
||||
spec:
|
||||
chart: rook-operator
|
||||
version: 0.1.0
|
||||
chart: rook-ceph-cluster
|
||||
version: 1.7.11
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: stx-platform
|
||||
interval: 5m
|
||||
timeout: 30m
|
||||
dependsOn:
|
||||
- name: rook-ceph
|
||||
test:
|
||||
enable: false
|
||||
install:
|
||||
@ -28,11 +30,11 @@ spec:
|
||||
upgrade:
|
||||
disableHooks: false
|
||||
uninstall:
|
||||
disableHooks: true
|
||||
disableHooks: false
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: rook-operator-static-overrides
|
||||
valuesKey: rook-operator-static-overrides.yaml
|
||||
name: rook-ceph-cluster-static-overrides
|
||||
valuesKey: rook-ceph-cluster-static-overrides.yaml
|
||||
- kind: Secret
|
||||
name: rook-operator-system-overrides
|
||||
valuesKey: rook-operator-system-overrides.yaml
|
||||
name: rook-ceph-cluster-system-overrides
|
||||
valuesKey: rook-ceph-cluster-system-overrides.yaml
|
@ -0,0 +1,18 @@
|
||||
#
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
namespace: rook-ceph
|
||||
resources:
|
||||
- helmrelease.yaml
|
||||
secretGenerator:
|
||||
- name: rook-ceph-cluster-static-overrides
|
||||
files:
|
||||
- rook-ceph-cluster-static-overrides.yaml
|
||||
- name: rook-ceph-cluster-system-overrides
|
||||
files:
|
||||
- rook-ceph-cluster-system-overrides.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
@ -0,0 +1,242 @@
|
||||
#
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Default values for ceph-cluster
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
configOverride: |
|
||||
[global]
|
||||
osd_journal_size = 1024
|
||||
osd_pool_default_size = 1
|
||||
osd_pool_default_min_size = 1
|
||||
osd_pool_default_pg_num = 64
|
||||
osd_pool_default_pgp_num = 64
|
||||
|
||||
osd_crush_chooseleaf_type = 1
|
||||
setuser match path = /var/lib/ceph/$type/$cluster-$id
|
||||
mon_osd_min_down_reporters = 1
|
||||
osd_mon_report_interval_max = 120
|
||||
mon_max_pg_per_osd = 2048
|
||||
osd_max_pg_per_osd_hard_ratio = 1.2
|
||||
ms_bind_ipv6 = false
|
||||
|
||||
[osd]
|
||||
osd_mkfs_type = xfs
|
||||
osd_mkfs_options_xfs = "-f"
|
||||
osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k"
|
||||
|
||||
[mon]
|
||||
mon warn on legacy crush tunables = false
|
||||
mon pg warn max per osd = 2048
|
||||
mon pg warn max object skew = 0
|
||||
mon clock drift allowed = .1
|
||||
mon warn on pool no redundancy = false
|
||||
|
||||
operatorNamespace: rook-ceph
|
||||
cephClusterSpec:
|
||||
dataDirHostPath: /var/lib/ceph
|
||||
cephVersion:
|
||||
image: quay.io/ceph/ceph:v14.2.22
|
||||
allowUnsupported: true
|
||||
network:
|
||||
provider: host
|
||||
mon:
|
||||
count: 1
|
||||
allowMultiplePerNode: false
|
||||
mgr:
|
||||
count: 1
|
||||
allowMultiplePerNode: false
|
||||
modules:
|
||||
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
|
||||
# are already enabled by other settings in the cluster CR.
|
||||
- name: pg_autoscaler
|
||||
enabled: false
|
||||
dashboard:
|
||||
enabled: false
|
||||
crashCollector:
|
||||
disable: true
|
||||
#deviceFilter:
|
||||
healthCheck:
|
||||
daemonHealth:
|
||||
mon:
|
||||
interval: 45s
|
||||
timeout: 600s
|
||||
disruptionManagement:
|
||||
managePodBudgets: true
|
||||
storage:
|
||||
useAllNodes: false
|
||||
useAllDevices: false
|
||||
|
||||
placement:
|
||||
all:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
mon:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: ceph-mon-placement
|
||||
operator: In
|
||||
values:
|
||||
- enabled
|
||||
mgr:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: ceph-mgr-placement
|
||||
operator: In
|
||||
values:
|
||||
- enabled
|
||||
|
||||
toolbox:
|
||||
enabled: true
|
||||
image: rook/ceph:v1.7.11
|
||||
|
||||
pspEnable: false
|
||||
|
||||
monitoring:
|
||||
# requires Prometheus to be pre-installed
|
||||
# enabling will also create RBAC rules to allow Operator to create ServiceMonitors
|
||||
enabled: false
|
||||
|
||||
|
||||
cephFileSystems:
|
||||
- name: kube-cephfs
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 1
|
||||
dataPools:
|
||||
- failureDomain: host # TODO
|
||||
replicated:
|
||||
size: 1
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
||||
storageClass:
|
||||
enabled: true
|
||||
isDefault: false
|
||||
name: cephfs
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions: []
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
|
||||
parameters:
|
||||
# The secrets contain Ceph admin credentials.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
||||
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
||||
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
|
||||
cephBlockPools:
|
||||
- name: kube-rbd
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 1
|
||||
storageClass:
|
||||
enabled: true
|
||||
name: general
|
||||
isDefault: true
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions: []
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
|
||||
parameters:
|
||||
# (optional) mapOptions is a comma-separated list of map options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
|
||||
# mapOptions: lock_on_read,queue_depth=1024
|
||||
|
||||
# (optional) unmapOptions is a comma-separated list of unmap options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
|
||||
# unmapOptions: force
|
||||
|
||||
# RBD image format. Defaults to "2".
|
||||
imageFormat: "2"
|
||||
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
|
||||
imageFeatures: layering
|
||||
# The secrets contain Ceph admin credentials.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
||||
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
||||
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
|
||||
cephObjectStores:
|
||||
- name: ceph-objectstore
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
|
||||
spec:
|
||||
metadataPool:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 0
|
||||
dataPool:
|
||||
failureDomain: host
|
||||
erasureCoded:
|
||||
dataChunks: 0
|
||||
codingChunks: 0
|
||||
preservePoolsOnDelete: false
|
||||
gateway:
|
||||
port: 80
|
||||
# securePort: 443
|
||||
# sslCertificateRef:
|
||||
instances: 0
|
||||
healthCheck:
|
||||
bucket:
|
||||
interval: 60s
|
||||
storageClass:
|
||||
enabled: false
|
||||
|
||||
mds:
|
||||
name: cephfs
|
||||
replica: 2
|
||||
|
||||
imagePullSecrets:
|
||||
- name: default-registry-key
|
||||
|
||||
hook:
|
||||
image: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||
duplexPreparation:
|
||||
enable: false
|
||||
activeController: controller-0
|
||||
floatIP: 192.188.204.1
|
||||
cleanup:
|
||||
enable: true
|
||||
cluster_cleanup: rook-ceph
|
||||
rbac:
|
||||
clusterRole: rook-ceph-cleanup
|
||||
clusterRoleBinding: rook-ceph-cleanup
|
||||
role: rook-ceph-cleanup
|
||||
roleBinding: rook-ceph-cleanup
|
||||
serviceAccount: rook-ceph-cleanup
|
||||
mon_hosts:
|
||||
- controller-0
|
@ -0,0 +1,6 @@
|
||||
#
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -15,14 +15,14 @@ spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: rook-ceph-provisioner
|
||||
version: 0.1.0
|
||||
version: 1.1.0
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: stx-platform
|
||||
interval: 5m
|
||||
timeout: 30m
|
||||
dependsOn:
|
||||
- name: rook-ceph
|
||||
- name: rook-ceph-cluster
|
||||
test:
|
||||
enable: false
|
||||
install:
|
||||
@ -30,7 +30,7 @@ spec:
|
||||
upgrade:
|
||||
disableHooks: false
|
||||
uninstall:
|
||||
disableHooks: true
|
||||
disableHooks: false
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: rook-ceph-provisioner-static-overrides
|
@ -1,10 +1,10 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
namespace: kube-system
|
||||
namespace: rook-ceph
|
||||
resources:
|
||||
- helmrelease.yaml
|
||||
secretGenerator:
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -40,7 +40,7 @@ images:
|
||||
|
||||
provisionStorage:
|
||||
# Defines the name of the provisioner associated with a set of storage classes
|
||||
provisioner_name: kube-system.rbd.csi.ceph.com
|
||||
provisioner_name: rook-ceph.rbd.csi.ceph.com
|
||||
# Enable this storage class as the system default storage class
|
||||
defaultStorageClass: rook-ceph
|
||||
# Configure storage classes.
|
||||
@ -65,22 +65,24 @@ provisionStorage:
|
||||
# Ceph user name to access this pool
|
||||
userId: kube
|
||||
pool:
|
||||
pool_name: kube
|
||||
pool_name: kube-rbd
|
||||
replication: 1
|
||||
crush_rule_name: storage_tier_ruleset
|
||||
chunk_size: 8
|
||||
|
||||
|
||||
cephfsStorage:
|
||||
provisioner_name: kube-system.cephfs.csi.ceph.com
|
||||
fs_name: stxfs
|
||||
pool_name: stxfs-data0
|
||||
provisioner_name: rook-ceph.cephfs.csi.ceph.com
|
||||
fs_name: kube-cephfs
|
||||
pool_name: kube-cephfs-data
|
||||
|
||||
|
||||
host_provision:
|
||||
controller_hosts:
|
||||
- controller-0
|
||||
|
||||
imagePullSecrets:
|
||||
- name: default-registry-key
|
||||
|
||||
ceph_audit_jobs:
|
||||
floatIP: 192.168.204.2
|
||||
@ -90,3 +92,19 @@ ceph_audit_jobs:
|
||||
history:
|
||||
success: 1
|
||||
failed: 1
|
||||
hook:
|
||||
image: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||
duplexPreparation:
|
||||
enable: true
|
||||
activeController: controller-0
|
||||
floatIP: 192.188.204.1
|
||||
cleanup:
|
||||
enable: true
|
||||
rbac:
|
||||
clusterRole: rook-ceph-cleanup
|
||||
clusterRoleBinding: rook-ceph-cleanup
|
||||
role: rook-ceph-cleanup/
|
||||
roleBinding: rook-ceph-cleanup
|
||||
serviceAccount: rook-ceph-cleanup
|
||||
mon_hosts:
|
||||
- controller-0
|
@ -0,0 +1,6 @@
|
||||
#
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -15,14 +15,12 @@ spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: rook-ceph
|
||||
version: 0.1.0
|
||||
version: 1.7.11
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: stx-platform
|
||||
interval: 5m
|
||||
timeout: 30m
|
||||
dependsOn:
|
||||
- name: rook-operator
|
||||
test:
|
||||
enable: false
|
||||
install:
|
@ -1,12 +1,13 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
namespace: kube-system
|
||||
namespace: rook-ceph
|
||||
resources:
|
||||
- helmrelease.yaml
|
||||
- service-account-default.yaml
|
||||
secretGenerator:
|
||||
- name: rook-ceph-static-overrides
|
||||
files:
|
@ -0,0 +1,103 @@
|
||||
#
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
image:
|
||||
prefix: rook
|
||||
repository: rook/ceph
|
||||
tag: v1.7.11
|
||||
pullPolicy: IfNotPresent
|
||||
app.starlingx.io/component: platform
|
||||
nodeSelector: {node-role.kubernetes.io/control-plane : ""}
|
||||
|
||||
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
|
||||
# Disable it here if you have similar issues.
|
||||
# For more details see https://github.com/rook/rook/issues/2417
|
||||
enableSelinuxRelabeling: true
|
||||
# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
|
||||
# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
|
||||
hostpathRequiresPrivileged: false
|
||||
# Disable automatic orchestration when new devices are discovered.
|
||||
disableDeviceHotplug: false
|
||||
# Blacklist certain disks according to the regex provided.
|
||||
discoverDaemonUdev:
|
||||
enableDiscoveryDaemon: false
|
||||
# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
|
||||
|
||||
pspEnable: false
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
crds:
|
||||
# Whether the helm chart should create and update the CRDs. If false, the CRDs must be
|
||||
# managed independently with cluster/examples/kubernetes/ceph/crds.yaml.
|
||||
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
|
||||
# If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
|
||||
# https://rook.github.io/docs/rook/master/ceph-disaster-recovery.html#restoring-crds-after-deletion
|
||||
enabled: true
|
||||
|
||||
currentNamespaceOnly: false
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 600m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
|
||||
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
||||
imagePullSecrets:
|
||||
- name: default-registry-key
|
||||
|
||||
csi:
|
||||
cephcsi:
|
||||
image: quay.io/cephcsi/cephcsi:v3.4.0
|
||||
registrar:
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
|
||||
provisioner:
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
|
||||
snapshotter:
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
|
||||
attacher:
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0
|
||||
resizer:
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
|
||||
|
||||
# kubeletDirPath: /var/lib/kubelet
|
||||
pluginTolerations:
|
||||
- operator: "Exists"
|
||||
|
||||
enableRbdDriver: true
|
||||
enableCephfsDriver: true
|
||||
enableGrpcMetrics: true
|
||||
enableSnapshotter: true
|
||||
|
||||
provisionerTolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
admissionController:
|
||||
# Set tolerations and nodeAffinity for admission controller pod.
|
||||
# The admission controller would be best to start on the same nodes as other ceph daemons.
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
@ -0,0 +1,6 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -0,0 +1,13 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
imagePullSecrets:
|
||||
- name: default-registry-key
|
||||
metadata:
|
||||
name: default
|
||||
namespace: rook-ceph
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2019-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -1,11 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018,2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020 Intel, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
appVersion: "1.0"
|
||||
appVersion: "1.1"
|
||||
description: A Helm chart for Kubernetes
|
||||
name: rook-ceph-provisioner
|
||||
version: 0.1.0
|
||||
version: 1.1.0
|
@ -117,7 +117,7 @@ def osd_audit():
|
||||
kube = KubeOperator()
|
||||
group = "ceph.rook.io"
|
||||
version = "v1"
|
||||
namespace = "kube-system"
|
||||
namespace = "rook-ceph"
|
||||
plural = "cephclusters"
|
||||
name = "cephclusters.ceph.rook.io.ceph-cluster"
|
||||
|
@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018,2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018,2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
@ -16,6 +16,13 @@ spec:
|
||||
labels:
|
||||
app: stx-ceph-manager
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
||||
volumes:
|
@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2019,2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -29,7 +29,7 @@ data:
|
||||
active=$node
|
||||
fi
|
||||
|
||||
controller_node=$(kubectl get pods -n kube-system --selector=app="rook-ceph-mon,ceph_daemon_id=a" -o wide | awk '/Running.*controller/ {print $7}')
|
||||
controller_node=$(kubectl get pods -n rook-ceph --selector=app="rook-ceph-mon,ceph_daemon_id=a" -o wide | awk '/Running.*controller/ {print $7}')
|
||||
if [ x"$active" = x"$controller_node" ]; then
|
||||
echo "mon-a pod is running on active controler"
|
||||
|
||||
@ -53,10 +53,10 @@ data:
|
||||
rm -f endpoint.yaml
|
||||
|
||||
# delete mon-a deployment and pod
|
||||
kubectl delete deployments.apps -n kube-system rook-ceph-mon-a
|
||||
kubectl delete pods -n kube-system --selector="app=rook-ceph-mon,ceph_daemon_id=a"
|
||||
kubectl delete deployments.apps -n rook-ceph rook-ceph-mon-a
|
||||
kubectl delete pods -n rook-ceph --selector="app=rook-ceph-mon,ceph_daemon_id=a"
|
||||
|
||||
kubectl delete po -n kube-system --selector="app=rook-ceph-operator"
|
||||
kubectl delete po -n rook-ceph --selector="app=rook-ceph-operator"
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2019,2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -38,12 +38,19 @@ spec:
|
||||
labels:
|
||||
app: ceph-osd-audit
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
||||
restartPolicy: OnFailure
|
||||
hostNetwork: true
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 10 }}
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: ceph-osd-audit-bin
|
@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2018-2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -62,6 +62,8 @@ data:
|
||||
DNS.2 = controller-1
|
||||
EOF
|
||||
|
||||
openssl req -new -nodes -x509 -subj /O=IT/CN=controller -days 3650 -config /tmp/controller -out /tmp/controller.crt -keyout /tmp/controller.key -extensions v3_ca
|
||||
|
||||
# Exec_retry - wait for the cluster to create osd pools
|
||||
retries=25 # 4 minutes
|
||||
retry_count=1
|
||||
@ -80,10 +82,9 @@ data:
|
||||
|
||||
if [ $retry_count -gt $retries ]; then
|
||||
echo "Error: File /tmp/controller.key was not created."
|
||||
# exit 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
openssl req -new -nodes -x509 -subj /O=IT/CN=controller -days 3650 -config /tmp/controller -out /tmp/controller.crt -keyout /tmp/controller.key -extensions v3_ca
|
||||
|
||||
for i in "a" "controller-0" "controller-1"
|
||||
do
|
||||
@ -117,6 +118,13 @@ spec:
|
||||
release: {{ .Release.Name | quote }}
|
||||
chart: "{{.Chart.Name}}-{{.Chart.Version}}"
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
restartPolicy: OnFailure
|
||||
volumes:
|
||||
- name: ceph-mgr-provision-bin
|
@ -1,7 +1,7 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
# Copyright (c) 2018-2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user