Merge "Partial FluxCD version of stx-openstack: compute"

This commit is contained in:
Zuul 2022-09-27 17:51:33 +00:00 committed by Gerrit Code Review
commit 27d34a0336
41 changed files with 1166 additions and 873 deletions

View File

@ -33,11 +33,10 @@ Patch12: 0012-Replace-deprecated-Nova-VNC-configurations.patch
Patch13: 0013-Remove-TLS-from-openstack-services.patch
Patch14: 0014-Remove-mariadb-and-rabbit-tls.patch
Patch15: 0015-Decrease-terminationGracePeriodSeconds-on-glance-api.patch
Patch16: 0016-Network-Resources-Cleanup-before-OpenStack-Removal.patch
Patch17: 0017-Update-RBAC-authorization-api-to-v1.patch
Patch18: 0018-Fixing-cinder-helm-release-hooks-weights-helmv3.patch
Patch19: 0019-Fixing-placement-helm-release-hooks.patch
Patch20: 0020-Fixing-nova-helm-release-hooks-and-weights.patch
Patch16: 0016-Update-RBAC-authorization-api-to-v1.patch
Patch17: 0017-Fixing-cinder-helm-release-hooks-weights-helmv3.patch
Patch18: 0018-Fixing-placement-helm-release-hooks.patch
Patch19: 0019-Fixing-nova-helm-release-hooks-and-weights.patch
BuildRequires: helm
BuildRequires: openstack-helm-infra
@ -67,7 +66,6 @@ Openstack Helm charts
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
%build
# Stage helm-toolkit in the local repo

View File

@ -1,431 +0,0 @@
From 26035d478bc2e70182446658f3677b079818305e Mon Sep 17 00:00:00 2001
From: rferraz <RogerioOliveira.Ferraz@windriver.com>
Date: Wed, 25 May 2022 05:49:04 -0300
Subject: [PATCH] Network Resources Cleanup before OpenStack Removal
This patch introduces a new job for the purpose
to cleanup network resources before OpenStack removal.
Changes:
- new file: neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
- new file: neutron/templates/job-resources-cleanup.yaml
- modified: neutron/templates/configmap-bin.yaml
- modified: neutron/values.yaml
Signed-off-by: rferraz <RogerioOliveira.Ferraz@windriver.com>
---
.../bin/_neutron-resources-cleanup.sh.tpl | 220 ++++++++++++++++++
neutron/templates/configmap-bin.yaml | 2 +
neutron/templates/job-resources-cleanup.yaml | 81 +++++++
neutron/values.yaml | 31 +++
4 files changed, 334 insertions(+)
create mode 100644 neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
create mode 100644 neutron/templates/job-resources-cleanup.yaml
diff --git a/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl b/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
new file mode 100644
index 00000000..8d38373d
--- /dev/null
+++ b/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
@@ -0,0 +1,220 @@
+#!/bin/bash
+
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+function cleanup_network_trunks()
+{
+ TRUNKS=$(openstack network trunk list -c ID -f value)
+ PORTS=$(openstack network trunk list -c "Parent Port" -f value)
+
+ for TRUNK in ${TRUNKS}; do
+ openstack network trunk delete ${TRUNK}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete trunk ${TRUNK}"
+ return ${RET}
+ fi
+ done
+
+ for PORT in ${PORTS}; do
+ openstack port delete ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete port ${PORT}"
+ return ${RET}
+ fi
+ done
+ return 0
+}
+
+function cleanup_vm_instances()
+{
+ local VMLIST=""
+ local ID=""
+ local RETRY=0
+
+ VMLIST=$(openstack server list --all-projects -c ID -f value)
+ for VM in ${VMLIST}; do
+ openstack server delete ${VM} --wait
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete VM ${ID}"
+ return ${RET}
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_floating_ips()
+{
+ local IPLIST=""
+ local IP=""
+
+ IPLIST=$(openstack floating ip list | grep -E "[0-9]+.[0-9]+.[0-9]+.[0-9]" | awk '{ print $2; }')
+ for IP in ${IPLIST}; do
+ openstack floating ip delete ${IP}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete floating ip ${IP}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_manual_ports()
+{
+ PORTS=$(openstack port list --device-owner=compute:manual | grep -E "^\|\s\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\s\|" | awk '{ print $2; }')
+ for PORT in ${PORTS}; do
+ openstack port delete ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete manual port ${PORT}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_routers()
+{
+ local ROUTERLIST=""
+ local ID=""
+
+ ROUTERLIST=$(openstack router list -c ID -f value)
+ for ID in ${ROUTERLIST}; do
+ openstack router set ${ID} --no-route
+ openstack router unset --external-gateway ${ID}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to clear gateway on router ${ID}"
+ return 1
+ fi
+
+ PORTS=$(openstack port list --router ${ID} -c ID -f value)
+ for PORT in ${PORTS}; do
+ openstack router remove port ${ID} ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete interface ${PORT} from router ${ID}"
+ return ${RET}
+ fi
+ done
+
+ openstack router delete ${ID}
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete router ${ID}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_application_ports()
+{
+ NETS=$(openstack network list -c ID -f value)
+ for NET in $NETS; do
+ NET_PORTS=$(openstack port list --network $NET -c ID -f value)
+ for NET_PORT in $NET_PORTS; do
+ openstack port delete $NET_PORT
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete port ${NET_PORT}"
+ return 1
+ fi
+ done
+ done
+
+ return 0
+}
+
+function cleanup_networks()
+{
+ local ID=""
+ NETLIST=$(openstack network list -c ID -f value)
+ for ID in ${NETLIST}; do
+ openstack network delete ${ID}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete network ${ID}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+date
+echo "Cleaning up network resources..."
+
+echo "Cleaning up network trunks"
+cleanup_network_trunks
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup network trunks"
+fi
+
+echo "Cleaning up VM instances"
+cleanup_vm_instances
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup VM instances"
+fi
+
+echo "Cleaning up floating IP addresses"
+cleanup_floating_ips
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup floating IP addresses"
+fi
+
+echo "Cleaning up manual ports"
+cleanup_manual_ports
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup manual ports"
+fi
+
+echo "Cleaning up routers"
+cleanup_routers
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup routers"
+fi
+
+echo "Cleaning up application ports"
+cleanup_application_ports
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup shared networks"
+fi
+
+echo "Cleaning up networks"
+cleanup_networks
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup networks"
+fi
+
+date
+echo "Cleanup finished"
+
+exit 0
diff --git a/neutron/templates/configmap-bin.yaml b/neutron/templates/configmap-bin.yaml
index 2a6b9cff..647762c4 100644
--- a/neutron/templates/configmap-bin.yaml
+++ b/neutron/templates/configmap-bin.yaml
@@ -95,6 +95,8 @@ data:
{{- include "helm-toolkit.scripts.rabbit_init" . | indent 4 }}
neutron-test-force-cleanup.sh: |
{{ tuple "bin/_neutron-test-force-cleanup.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+ neutron-resources-cleanup.sh: |
+{{ tuple "bin/_neutron-resources-cleanup.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- if ( has "tungstenfabric" .Values.network.backend ) }}
tf-plugin.pth: |
/opt/plugin/site-packages
diff --git a/neutron/templates/job-resources-cleanup.yaml b/neutron/templates/job-resources-cleanup.yaml
new file mode 100644
index 00000000..9870305f
--- /dev/null
+++ b/neutron/templates/job-resources-cleanup.yaml
@@ -0,0 +1,81 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.manifests.job_resources_cleanup }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "neutron-resources-cleanup" }}
+{{ tuple $envAll "resources_cleanup" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ $serviceAccountName }}
+ labels:
+{{ tuple $envAll "neutron" "resources_cleanup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+ annotations:
+{{- if .Values.helm3_hook }}
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+{{- end }}
+{{- if .Values.helm2_hook }}
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+{{- end }}
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+spec:
+ backoffLimit: 2
+ activeDeadlineSeconds: 1500
+ template:
+ metadata:
+ labels:
+{{ tuple $envAll "neutron" "resources_cleanup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+ spec:
+ serviceAccountName: {{ $serviceAccountName }}
+{{ dict "envAll" $envAll "application" "neutron_resources_cleanup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+ restartPolicy: OnFailure
+{{ if .Values.pod.tolerations.neutron.enabled }}
+{{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+{{ end }}
+ nodeSelector:
+ {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+ initContainers:
+{{ tuple $envAll "resources_cleanup" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+ containers:
+ - name: {{ $serviceAccountName }}
+{{ tuple $envAll "neutron_resources_cleanup" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll .Values.pod.resources.jobs.resources_cleanup | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+{{ dict "envAll" $envAll "application" "neutron_resources_cleanup" "container" "neutron_resources_cleanup" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
+ env:
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" .Values.manifests.certificates}}
+{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
+{{- end }}
+ command:
+ - /tmp/{{ $serviceAccountName }}.sh
+ volumeMounts:
+ - name: pod-tmp
+ mountPath: /tmp
+ - name: neutron-bin
+ mountPath: /tmp/{{ $serviceAccountName }}.sh
+ subPath: {{ $serviceAccountName }}.sh
+{{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.network.server.public | include "helm-toolkit.snippets.tls_volume_mount" | indent 12 }}
+ volumes:
+ - name: pod-tmp
+ emptyDir: {}
+ - name: neutron-bin
+ configMap:
+ name: neutron-bin
+ defaultMode: 0555
+{{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.network.server.public | include "helm-toolkit.snippets.tls_volume" | indent 8 }}
+{{- end }}
diff --git a/neutron/values.yaml b/neutron/values.yaml
index dc73b68a..4be350e8 100644
--- a/neutron/values.yaml
+++ b/neutron/values.yaml
@@ -42,6 +42,7 @@ images:
neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
+ neutron_resources_cleanup: docker.io/openstackhelm/heat:stein-ubuntu_bionic
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
@@ -326,6 +327,21 @@ dependencies:
service: oslo_cache
- endpoint: internal
service: identity
+ resources_cleanup:
+ jobs:
+ - neutron-db-sync
+ - neutron-rabbit-init
+ services:
+ - endpoint: internal
+ service: oslo_messaging
+ - endpoint: internal
+ service: oslo_db
+ - endpoint: internal
+ service: identity
+ - endpoint: internal
+ service: compute
+ - endpoint: internal
+ service: network
tests:
services:
- endpoint: internal
@@ -547,6 +563,12 @@ pod:
neutron_netns_cleanup_cron:
readOnlyRootFilesystem: true
privileged: true
+ neutron_resources_cleanup:
+ pod:
+ runAsUser: 42424
+ container:
+ neutron_resources_cleanup:
+ readOnlyRootFilesystem: true
affinity:
anti:
type:
@@ -836,6 +858,13 @@ pod:
limits:
memory: "1024Mi"
cpu: "2000m"
+ resources_cleanup:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "1024Mi"
+ cpu: "2000m"
conf:
rally_tests:
@@ -2522,6 +2551,7 @@ network_policy:
egress:
- {}
+helm2_hook: true
helm3_hook: true
manifests:
@@ -2549,6 +2579,7 @@ manifests:
job_ks_service: true
job_ks_user: true
job_rabbit_init: true
+ job_resources_cleanup: true
pdb_server: true
pod_rally_test: true
network_policy: false
--
2.25.1

View File

@ -12,8 +12,7 @@
0013-Remove-TLS-from-openstack-services.patch
0014-Remove-mariadb-and-rabbit-tls.patch
0015-Decrease-terminationGracePeriodSeconds-on-glance-api.patch
0016-Network-Resources-Cleanup-before-OpenStack-Removal.patch
0017-Update-RBAC-authorization-api-to-v1.patch
0018-Fixing-cinder-helm-release-hooks-weights-helmv3.patch
0019-Fixing-placement-helm-release-hooks.patch
0020-Fixing-nova-helm-release-hooks-and-weights.patch
0016-Update-RBAC-authorization-api-to-v1.patch
0017-Fixing-cinder-helm-release-hooks-weights-helmv3.patch
0018-Fixing-placement-helm-release-hooks.patch
0019-Fixing-nova-helm-release-hooks-and-weights.patch

View File

@ -1,431 +0,0 @@
From 26035d478bc2e70182446658f3677b079818305e Mon Sep 17 00:00:00 2001
From: rferraz <RogerioOliveira.Ferraz@windriver.com>
Date: Wed, 25 May 2022 05:49:04 -0300
Subject: [PATCH] Network Resources Cleanup before OpenStack Removal
This patch introduces a new job for the purpose
to cleanup network resources before OpenStack removal.
Changes:
- new file: neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
- new file: neutron/templates/job-resources-cleanup.yaml
- modified: neutron/templates/configmap-bin.yaml
- modified: neutron/values.yaml
Signed-off-by: rferraz <RogerioOliveira.Ferraz@windriver.com>
---
.../bin/_neutron-resources-cleanup.sh.tpl | 220 ++++++++++++++++++
neutron/templates/configmap-bin.yaml | 2 +
neutron/templates/job-resources-cleanup.yaml | 81 +++++++
neutron/values.yaml | 31 +++
4 files changed, 334 insertions(+)
create mode 100644 neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
create mode 100644 neutron/templates/job-resources-cleanup.yaml
diff --git a/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl b/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
new file mode 100644
index 00000000..8d38373d
--- /dev/null
+++ b/neutron/templates/bin/_neutron-resources-cleanup.sh.tpl
@@ -0,0 +1,220 @@
+#!/bin/bash
+
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+function cleanup_network_trunks()
+{
+ TRUNKS=$(openstack network trunk list -c ID -f value)
+ PORTS=$(openstack network trunk list -c "Parent Port" -f value)
+
+ for TRUNK in ${TRUNKS}; do
+ openstack network trunk delete ${TRUNK}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete trunk ${TRUNK}"
+ return ${RET}
+ fi
+ done
+
+ for PORT in ${PORTS}; do
+ openstack port delete ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete port ${PORT}"
+ return ${RET}
+ fi
+ done
+ return 0
+}
+
+function cleanup_vm_instances()
+{
+ local VMLIST=""
+ local ID=""
+ local RETRY=0
+
+ VMLIST=$(openstack server list --all-projects -c ID -f value)
+ for VM in ${VMLIST}; do
+ openstack server delete ${VM} --wait
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete VM ${ID}"
+ return ${RET}
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_floating_ips()
+{
+ local IPLIST=""
+ local IP=""
+
+ IPLIST=$(openstack floating ip list | grep -E "[0-9]+.[0-9]+.[0-9]+.[0-9]" | awk '{ print $2; }')
+ for IP in ${IPLIST}; do
+ openstack floating ip delete ${IP}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete floating ip ${IP}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_manual_ports()
+{
+ PORTS=$(openstack port list --device-owner=compute:manual | grep -E "^\|\s\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\s\|" | awk '{ print $2; }')
+ for PORT in ${PORTS}; do
+ openstack port delete ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete manual port ${PORT}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_routers()
+{
+ local ROUTERLIST=""
+ local ID=""
+
+ ROUTERLIST=$(openstack router list -c ID -f value)
+ for ID in ${ROUTERLIST}; do
+ openstack router set ${ID} --no-route
+ openstack router unset --external-gateway ${ID}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to clear gateway on router ${ID}"
+ return 1
+ fi
+
+ PORTS=$(openstack port list --router ${ID} -c ID -f value)
+ for PORT in ${PORTS}; do
+ openstack router remove port ${ID} ${PORT}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete interface ${PORT} from router ${ID}"
+ return ${RET}
+ fi
+ done
+
+ openstack router delete ${ID}
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete router ${ID}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+function cleanup_application_ports()
+{
+ NETS=$(openstack network list -c ID -f value)
+ for NET in $NETS; do
+ NET_PORTS=$(openstack port list --network $NET -c ID -f value)
+ for NET_PORT in $NET_PORTS; do
+ openstack port delete $NET_PORT
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete port ${NET_PORT}"
+ return 1
+ fi
+ done
+ done
+
+ return 0
+}
+
+function cleanup_networks()
+{
+ local ID=""
+ NETLIST=$(openstack network list -c ID -f value)
+ for ID in ${NETLIST}; do
+ openstack network delete ${ID}
+ RET=$?
+ if [ ${RET} -ne 0 ]; then
+ echo "Failed to delete network ${ID}"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+date
+echo "Cleaning up network resources..."
+
+echo "Cleaning up network trunks"
+cleanup_network_trunks
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup network trunks"
+fi
+
+echo "Cleaning up VM instances"
+cleanup_vm_instances
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup VM instances"
+fi
+
+echo "Cleaning up floating IP addresses"
+cleanup_floating_ips
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup floating IP addresses"
+fi
+
+echo "Cleaning up manual ports"
+cleanup_manual_ports
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup manual ports"
+fi
+
+echo "Cleaning up routers"
+cleanup_routers
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup routers"
+fi
+
+echo "Cleaning up application ports"
+cleanup_application_ports
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup shared networks"
+fi
+
+echo "Cleaning up networks"
+cleanup_networks
+RET=$?
+if [ ${RET} -ne 0 ]; then
+ echo "Failed to cleanup networks"
+fi
+
+date
+echo "Cleanup finished"
+
+exit 0
diff --git a/neutron/templates/configmap-bin.yaml b/neutron/templates/configmap-bin.yaml
index 2a6b9cff..647762c4 100644
--- a/neutron/templates/configmap-bin.yaml
+++ b/neutron/templates/configmap-bin.yaml
@@ -95,6 +95,8 @@ data:
{{- include "helm-toolkit.scripts.rabbit_init" . | indent 4 }}
neutron-test-force-cleanup.sh: |
{{ tuple "bin/_neutron-test-force-cleanup.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+ neutron-resources-cleanup.sh: |
+{{ tuple "bin/_neutron-resources-cleanup.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- if ( has "tungstenfabric" .Values.network.backend ) }}
tf-plugin.pth: |
/opt/plugin/site-packages
diff --git a/neutron/templates/job-resources-cleanup.yaml b/neutron/templates/job-resources-cleanup.yaml
new file mode 100644
index 00000000..9870305f
--- /dev/null
+++ b/neutron/templates/job-resources-cleanup.yaml
@@ -0,0 +1,81 @@
+{{/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.manifests.job_resources_cleanup }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "neutron-resources-cleanup" }}
+{{ tuple $envAll "resources_cleanup" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ $serviceAccountName }}
+ labels:
+{{ tuple $envAll "neutron" "resources_cleanup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+ annotations:
+{{- if .Values.helm3_hook }}
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+{{- end }}
+{{- if .Values.helm2_hook }}
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
+{{- end }}
+ {{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
+spec:
+ backoffLimit: 2
+ activeDeadlineSeconds: 1500
+ template:
+ metadata:
+ labels:
+{{ tuple $envAll "neutron" "resources_cleanup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+ spec:
+ serviceAccountName: {{ $serviceAccountName }}
+{{ dict "envAll" $envAll "application" "neutron_resources_cleanup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
+ restartPolicy: OnFailure
+{{ if .Values.pod.tolerations.neutron.enabled }}
+{{ tuple $envAll "neutron" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
+{{ end }}
+ nodeSelector:
+ {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+ initContainers:
+{{ tuple $envAll "resources_cleanup" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+ containers:
+ - name: {{ $serviceAccountName }}
+{{ tuple $envAll "neutron_resources_cleanup" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll .Values.pod.resources.jobs.resources_cleanup | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+{{ dict "envAll" $envAll "application" "neutron_resources_cleanup" "container" "neutron_resources_cleanup" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
+ env:
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.admin "useCA" .Values.manifests.certificates}}
+{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
+{{- end }}
+ command:
+ - /tmp/{{ $serviceAccountName }}.sh
+ volumeMounts:
+ - name: pod-tmp
+ mountPath: /tmp
+ - name: neutron-bin
+ mountPath: /tmp/{{ $serviceAccountName }}.sh
+ subPath: {{ $serviceAccountName }}.sh
+{{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.network.server.public | include "helm-toolkit.snippets.tls_volume_mount" | indent 12 }}
+ volumes:
+ - name: pod-tmp
+ emptyDir: {}
+ - name: neutron-bin
+ configMap:
+ name: neutron-bin
+ defaultMode: 0555
+{{- dict "enabled" .Values.manifests.certificates "name" .Values.secrets.tls.network.server.public | include "helm-toolkit.snippets.tls_volume" | indent 8 }}
+{{- end }}
diff --git a/neutron/values.yaml b/neutron/values.yaml
index dc73b68a..4be350e8 100644
--- a/neutron/values.yaml
+++ b/neutron/values.yaml
@@ -42,6 +42,7 @@ images:
neutron_bagpipe_bgp: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
neutron_netns_cleanup_cron: docker.io/openstackhelm/neutron:stein-ubuntu_bionic
+ neutron_resources_cleanup: docker.io/openstackhelm/heat:stein-ubuntu_bionic
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/docker:17.07.0
pull_policy: "IfNotPresent"
@@ -326,6 +327,21 @@ dependencies:
service: oslo_cache
- endpoint: internal
service: identity
+ resources_cleanup:
+ jobs:
+ - neutron-db-sync
+ - neutron-rabbit-init
+ services:
+ - endpoint: internal
+ service: oslo_messaging
+ - endpoint: internal
+ service: oslo_db
+ - endpoint: internal
+ service: identity
+ - endpoint: internal
+ service: compute
+ - endpoint: internal
+ service: network
tests:
services:
- endpoint: internal
@@ -547,6 +563,12 @@ pod:
neutron_netns_cleanup_cron:
readOnlyRootFilesystem: true
privileged: true
+ neutron_resources_cleanup:
+ pod:
+ runAsUser: 42424
+ container:
+ neutron_resources_cleanup:
+ readOnlyRootFilesystem: true
affinity:
anti:
type:
@@ -836,6 +858,13 @@ pod:
limits:
memory: "1024Mi"
cpu: "2000m"
+ resources_cleanup:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "1024Mi"
+ cpu: "2000m"
conf:
rally_tests:
@@ -2522,6 +2551,7 @@ network_policy:
egress:
- {}
+helm2_hook: true
helm3_hook: true
manifests:
@@ -2549,6 +2579,7 @@ manifests:
job_ks_service: true
job_ks_user: true
job_rabbit_init: true
+ job_resources_cleanup: true
pdb_server: true
pod_rally_test: true
network_policy: false
--
2.25.1

View File

@ -23,4 +23,11 @@ resources:
- glance
- cinder
- ceph-rgw
- placement
- nova
- nova-api-proxy
- neutron
- libvirt
- pci-irq-affinity-agent
- openvswitch
...

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: "helm.toolkit.fluxcd.io/v2beta1"
kind: HelmRelease
metadata:
name: libvirt
labels:
chart_group: openstack-libvirt
spec:
releaseName: osh-openstack-libvirt
chart:
spec:
chart: libvirt
version: 0.1.7
sourceRef:
kind: HelmRepository
name: starlingx
interval: 5m
timeout: 30m
test:
enable: false
install:
disableHooks: false
upgrade:
disableHooks: false
dependsOn:
- name: placement
namespace: openstack
valuesFrom:
- kind: Secret
name: libvirt-static-overrides
valuesKey: libvirt-static-overrides.yaml
- kind: Secret
name: libvirt-system-overrides
valuesKey: libvirt-system-overrides.yaml
...

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
namespace: openstack
resources:
- helmrelease.yaml
secretGenerator:
- name: libvirt-static-overrides
files:
- libvirt-static-overrides.yaml
- name: libvirt-system-overrides
files:
- libvirt-system-overrides.yaml
generatorOptions:
disableNameSuffixHash: true
...

View File

@ -0,0 +1,39 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
release_group: osh-openstack-libvirt
ceph_client:
user_secret_name: cinder-volume-rbd-keyring
labels:
agent:
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
conf:
ceph:
enabled: true
kubernetes:
cgroup: "k8s-infra"
libvirt:
listen_addr: "::"
pod:
tolerations:
libvirt:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
images:
tags:
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20201223
image_repo_sync: null
libvirt: docker.io/starlingx/stx-libvirt:master-centos-stable-latest
...

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: "helm.toolkit.fluxcd.io/v2beta1"
kind: HelmRelease
metadata:
name: neutron
labels:
chart_group: openstack-neutron
spec:
releaseName: osh-openstack-neutron
chart:
spec:
chart: neutron
version: 0.2.9
sourceRef:
kind: HelmRepository
name: starlingx
interval: 5m
timeout: 30m
test:
enable: false
install:
disableHooks: false
upgrade:
disableHooks: false
dependsOn:
- name: placement
namespace: openstack
valuesFrom:
- kind: Secret
name: neutron-static-overrides
valuesKey: neutron-static-overrides.yaml
- kind: Secret
name: neutron-system-overrides
valuesKey: neutron-system-overrides.yaml
...

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
namespace: openstack
resources:
- helmrelease.yaml
secretGenerator:
- name: neutron-static-overrides
files:
- neutron-static-overrides.yaml
- name: neutron-system-overrides
files:
- neutron-system-overrides.yaml
generatorOptions:
disableNameSuffixHash: true
...

View File

@ -0,0 +1,243 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
release_group: osh-openstack-neutron
endpoints:
oslo_messaging:
statefulset:
name: osh-openstack-rabbitmq-rabbitmq
identity:
force_public_endpoint: true
pod:
replicas:
server: 2
security_context:
neutron_dhcp_agent:
pod:
runAsUser: 0
neutron_l2gw_agent:
pod:
runAsUser: 0
neutron_bagpipe_bgp:
pod:
runAsUser: 0
neutron_l3_agent:
pod:
runAsUser: 0
neutron_lb_agent:
pod:
runAsUser: 0
neutron_metadata_agent:
pod:
runAsUser: 0
neutron_ovs_agent:
pod:
runAsUser: 0
neutron_server:
pod:
runAsUser: 0
neutron_sriov_agent:
pod:
runAsUser: 0
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
tolerations:
neutron:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
# Probes fail cause a long delay and eventual failure of the armada
# application apply. Need to determine the fix to re-enable these.
probes:
dhcp_agent:
dhcp_agent:
readiness:
enabled: false
liveness:
enabled: false
l3_agent:
l3_agent:
readiness:
enabled: false
liveness:
enabled: false
lb_agent:
lb_agent:
readiness:
enabled: false
liveness:
enabled: false
metadata_agent:
metadata_agent:
readiness:
enabled: false
liveness:
enabled: false
ovs_agent:
ovs_agent:
readiness:
enabled: false
liveness:
enabled: false
sriov_agent:
sriov_agent:
readiness:
enabled: false
liveness:
enabled: false
labels:
agent:
dhcp:
node_selector_key: openstack-compute-node
node_selector_value: enabled
l3:
node_selector_key: openstack-compute-node
node_selector_value: enabled
metadata:
node_selector_key: openstack-compute-node
node_selector_value: enabled
l2gw:
node_selector_key: openstack-compute-node
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
lb:
node_selector_key: linuxbridge
node_selector_value: enabled
# ovs is a special case, requiring a special
# label that can apply to both control hosts
# and compute hosts, until we get more sophisticated
# with our daemonset scheduling
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
test:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
bootstrap: docker.io/starlingx/stx-heat:master-centos-stable-latest
db_init: docker.io/starlingx/stx-heat:master-centos-stable-latest
db_drop: docker.io/starlingx/stx-heat:master-centos-stable-latest
image_repo_sync: null
ks_user: docker.io/starlingx/stx-heat:master-centos-stable-latest
ks_service: docker.io/starlingx/stx-heat:master-centos-stable-latest
ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest
neutron_db_sync: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_dhcp: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_l3: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_l2gw: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_openvswitch_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_linuxbridge_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_metadata: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_server: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_sriov_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_sriov_agent_init: docker.io/starlingx/stx-neutron:master-centos-stable-latest
test: null
purge_test: null
neutron_bagpipe_bgp: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_ironic_agent: docker.io/starlingx/stx-neutron:master-centos-stable-latest
neutron_netns_cleanup_cron: docker.io/starlingx/stx-neutron:master-centos-stable-latest
network:
interface:
tunnel: docker0
backend:
- openvswitch
- sriov
dependencies:
static:
ovs_agent:
pod: null
conf:
neutron:
DEFAULT:
l3_ha: false
min_l3_agents_per_router: 1
max_l3_agents_per_router: 1
l3_ha_network_type: vxlan
dhcp_agents_per_network: 1
max_overflow: 64
max_pool_size: 1
idle_timeout: 60
rpc_response_max_timeout: 60
router_status_managed: true
vlan_transparent: true
wsgi_default_pool_size: 100
notify_nova_on_port_data_changes: true
notify_nova_on_port_status_changes: true
control_exchange: neutron
core_plugin: neutron.plugins.ml2.plugin.Ml2Plugin
state_path: /var/run/neutron
syslog_log_facility: local2
use_syslog: true
pnet_audit_enabled: false
driver: messagingv2
enable_proxy_headers_parsing: true
log_format: '[%(name)s] %(message)s'
policy_file: /etc/neutron/policy.json
service_plugins: router,network_segment_range
dns_domain: openstacklocal
enable_new_agents: false
allow_automatic_dhcp_failover: true
allow_automatic_l3agent_failover: true
# Increase from default of 75 seconds to avoid agents being declared
# down during controller swacts, reboots, etc...
agent_down_time: 180
bind_host: "::"
oslo_concurrency:
lock_path: /var/run/neutron/lock
vhost:
vhost_user_enabled: true
keystone_authtoken:
auth_uri: http://keystone.openstack.svc.cluster.local:80/v3
auth_url: http://keystone.openstack.svc.cluster.local:80/v3
nova:
auth_url: http://keystone.openstack.svc.cluster.local:80/v3
dhcp_agent:
DEFAULT:
enable_isolated_metadata: true
enable_metadata_network: false
interface_driver: openvswitch
resync_interval: 30
l3_agent:
DEFAULT:
agent_mode: dvr_snat
interface_driver: openvswitch
metadata_port: 80
plugins:
ml2_conf:
ml2:
mechanism_drivers: openvswitch,sriovnicswitch,l2population
path_mtu: 0
tenant_network_types: vlan,vxlan
type_drivers: flat,vlan,vxlan
ml2_type_vxlan:
vni_ranges: ''
vxlan_group: ''
ovs_driver:
vhost_user_enabled: true
securitygroup:
firewall_driver: openvswitch
openvswitch_agent:
agent:
tunnel_types: vxlan
ovs:
bridge_mappings: public:br-ex
securitygroup:
firewall_driver: openvswitch
...

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: "helm.toolkit.fluxcd.io/v2beta1"
kind: HelmRelease
metadata:
name: nova-api-proxy
labels:
chart_group: openstack-nova-api-proxy
spec:
releaseName: osh-openstack-nova-api-proxy
chart:
spec:
chart: nova-api-proxy
version: 0.1.0
sourceRef:
kind: HelmRepository
name: starlingx
interval: 5m
timeout: 30m
test:
enable: false
install:
disableHooks: false
upgrade:
disableHooks: false
dependsOn:
- name: placement
namespace: openstack
valuesFrom:
- kind: Secret
name: nova-api-proxy-static-overrides
valuesKey: nova-api-proxy-static-overrides.yaml
- kind: Secret
name: nova-api-proxy-system-overrides
valuesKey: nova-api-proxy-system-overrides.yaml
...

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
namespace: openstack
resources:
- helmrelease.yaml
secretGenerator:
- name: nova-api-proxy-static-overrides
files:
- nova-api-proxy-static-overrides.yaml
- name: nova-api-proxy-system-overrides
files:
- nova-api-proxy-system-overrides.yaml
generatorOptions:
disableNameSuffixHash: true
...

View File

@ -0,0 +1,51 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
release_group: osh-openstack-nova-api-proxy
images:
tags:
nova_api_proxy: docker.io/starlingx/stx-nova-api-proxy:master-centos-stable-latest
ks_endpoints: docker.io/starlingx/stx-heat:master-centos-stable-latest
endpoints:
identity:
force_public_endpoint: true
pod:
affinity:
anti:
type:
default: requiredDuringSchedulingIgnoredDuringExecution
tolerations:
nova_api_proxy:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
nova:
enabled: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: openstack-compute-node
operator: Exists
effect: NoSchedule
conf:
nova_api_proxy:
DEFAULT:
osapi_proxy_listen: "::"
nfvi_compute_listen: "::"
osapi_compute_listen: nova-api-internal.openstack.svc.cluster.local
osapi_compute_listen_port: 80
keystone_authtoken:
interface: internal
auth_uri: http://keystone.openstack.svc.cluster.local:80/v3
auth_url: http://keystone.openstack.svc.cluster.local:80/v3
...

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: "helm.toolkit.fluxcd.io/v2beta1"
kind: HelmRelease
metadata:
name: nova
labels:
chart_group: openstack-nova
spec:
releaseName: osh-openstack-nova
chart:
spec:
chart: nova
version: 0.2.21
sourceRef:
kind: HelmRepository
name: starlingx
interval: 5m
timeout: 30m
test:
enable: false
install:
disableHooks: false
upgrade:
disableHooks: false
dependsOn:
- name: placement
namespace: openstack
valuesFrom:
- kind: Secret
name: nova-static-overrides
valuesKey: nova-static-overrides.yaml
- kind: Secret
name: nova-system-overrides
valuesKey: nova-system-overrides.yaml
...

View File

@ -0,0 +1,20 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
namespace: openstack
resources:
- helmrelease.yaml
secretGenerator:
- name: nova-static-overrides
files:
- nova-static-overrides.yaml
- name: nova-system-overrides
files:
- nova-system-overrides.yaml
generatorOptions:
disableNameSuffixHash: true
...

View File

@ -0,0 +1,259 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
release_group: osh-openstack-nova
endpoints:
oslo_messaging:
statefulset:
name: osh-openstack-rabbitmq-rabbitmq
compute:
hosts:
public: nova-api-internal
manifests:
job_ks_endpoints: false
ingress_osapi: true
service_ingress_osapi: true
cron_job_cell_setup: false
statefulset_compute_ironic: false
deployment_placement: false
ingress_placement: false
job_db_init_placement: false
job_ks_placement_endpoints: false
job_ks_placement_service: false
job_ks_placement_user: false
pdb_placement: false
secret_keystone_placement: false
service_ingress_placement: false
service_placement: false
deployment_consoleauth: false
labels:
agent:
compute:
node_selector_key: openstack-compute-node
node_selector_value: enabled
compute_ironic:
node_selector_key: openstack-ironic
node_selector_value: enabled
api_metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
conductor:
node_selector_key: openstack-control-plane
node_selector_value: enabled
consoleauth: