Remove helm status from deployment scripts for common

With the move to helm v3, helm status requires a namespace to be specified, but doing so breaks helm v2 compatability. This change removes the usage of helm serve in openstack-helm-infra's deployment scripts.

Change-Id: I8e035d70dd652d5253f534ad6b28042347158ff4
This commit is contained in:
jayonlau 2021-10-13 12:12:22 -04:00
parent 38f529faca
commit 4df5e23c06
17 changed files with 0 additions and 55 deletions

View File

@ -52,11 +52,6 @@ helm upgrade --install docker-registry ./registry \
#NOTE: Wait for deployments
./tools/deployment/common/wait-for-pods.sh docker-registry
#NOTE: Validate Deployment info
helm status docker-registry-nfs-provisioner
helm status docker-registry-redis
helm status docker-registry
# Delete the test pod if it still exists
kubectl delete pods -l application=redis,release_group=docker-registry-redis,component=test --namespace=docker-registry --ignore-not-found
#NOTE: Run helm tests

View File

@ -32,9 +32,6 @@ helm upgrade --install ingress-kube-system ./ingress \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Display info
helm status ingress-kube-system
#NOTE: Deploy namespace ingress
helm upgrade --install ingress-osh-infra ./ingress \
--namespace=osh-infra \
@ -43,6 +40,3 @@ helm upgrade --install ingress-osh-infra ./ingress \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Display info
helm status ingress-osh-infra

View File

@ -30,6 +30,3 @@ helm upgrade --install nfs-provisioner \
#NOTE: Wait for deployment
./tools/deployment/common/wait-for-pods.sh nfs
#NOTE: Validate Deployment info
helm status nfs-provisioner

View File

@ -27,6 +27,3 @@ helm upgrade --install ldap ./ldap \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status ldap

View File

@ -26,6 +26,3 @@ helm upgrade --install prometheus-kube-state-metrics \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Validate Deployment info
helm status prometheus-kube-state-metrics

View File

@ -26,6 +26,3 @@ helm upgrade --install prometheus-node-exporter \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Validate Deployment info
helm status prometheus-node-exporter

View File

@ -26,6 +26,3 @@ helm upgrade --install prometheus-process-exporter \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Validate Deployment info
helm status prometheus-process-exporter

View File

@ -23,6 +23,3 @@ helm upgrade --install falco ./falco \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Validate Deployment info
helm status falco

View File

@ -23,6 +23,3 @@ helm upgrade --install prometheus-blackbox-exporter \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status prometheus-blackbox-exporter

View File

@ -48,9 +48,6 @@ until [[ $daemonjob_controller_status == 'Running' ]] || [ $NEXT_WAIT_TIME -eq 5
NEXT_WAIT_TIME=$((NEXT_WAIT_TIME+1))
done
#NOTE: Validate DaemonjobController Deployment info
helm status daemonjob-controller
#NOTE: Create sample-daemonjob.yaml
tee /tmp/sample-daemonjob.yaml << EOF
apiVersion: ctl.example.com/v1

View File

@ -27,6 +27,3 @@ helm upgrade --install fluentbit ./fluentbit \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status fluentbit

View File

@ -186,6 +186,3 @@ helm upgrade --install fluentd ./fluentd \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status fluentd

View File

@ -52,5 +52,3 @@ done
if test $COUNTER -eq 3; then
echo "crds created succesfully"
fi
helm status metacontroller

View File

@ -35,9 +35,6 @@ helm upgrade --install nagios ./nagios \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status nagios
#NOTE: Verify elasticsearch query clauses are functional by execing into pod
NAGIOS_POD=$(kubectl -n osh-infra get pods -l='application=nagios,component=monitoring' --output=jsonpath='{.items[0].metadata.name}')
kubectl exec $NAGIOS_POD -n osh-infra -c nagios -- cat /opt/nagios/etc/objects/query_es_clauses.json | python -m json.tool

View File

@ -33,6 +33,3 @@ helm upgrade --install kubernetes-node-problem-detector \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Validate Deployment info
helm status kubernetes-node-problem-detector

View File

@ -38,6 +38,3 @@ helm upgrade --install prometheus-openstack-exporter \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status prometheus-openstack-exporter

View File

@ -32,6 +32,3 @@ helm upgrade --install postgresql ./postgresql \
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Validate Deployment info
helm status postgresql