Add gating for helm release management

This adds gating for helm release management by including a minimal
example workload phase which consists of a HelmRelease for the nginx
ingress controller and a corresponding deployment script including
supporting validation logic.

Change-Id: Ia21a799030289c7e40a0e61292578987ea0f6c63
Relates-To: #351
This commit is contained in:
Sean Eagan 2020-09-16 15:39:02 -05:00 committed by Kostyantyn Kalynovskyi
parent 2f5402ae80
commit eb40a5700f
10 changed files with 95 additions and 0 deletions

View File

@ -91,3 +91,15 @@ config:
apiVersion: airshipit.org/v1alpha1
kind: Clusterctl
name: clusterctl_move
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: workload-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: KubernetesApply
name: kubernetes-apply
documentEntryPoint: manifests/site/test-site/target/workload

View File

@ -10,3 +10,4 @@ phaseGroups:
- name: controlplane-ephemeral
- name: initinfra-target
- name: workers-target
- name: workload-target

View File

@ -0,0 +1,2 @@
resources:
- ../../../../type/gating/target/workload

View File

@ -0,0 +1,32 @@
---
apiVersion: "helm.fluxcd.io/v1"
kind: HelmRelease
metadata:
name: ingress
spec:
wait: true
timeout: 600
# This chart doesn't have any helm tests, but this still runs a noop
# helm test flow, to ensure we can do this.
test:
enable: true
values:
defaultBackend:
enabled: true
controller:
service:
type: NodePort
nodePorts:
http: 30000
https: 30001
kind: DaemonSet
hostNetwork: "false"
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
chart:
repository: https://kubernetes-charts.storage.googleapis.com
name: nginx-ingress
version: 1.40.1

View File

@ -0,0 +1,4 @@
resources:
- namespace.yaml
- helmrelease.yaml
namespace: ingress

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress

View File

@ -0,0 +1,2 @@
resources:
- ingress

View File

@ -28,6 +28,7 @@
- ./tools/deployment/32_cluster_init_target_node.sh
- ./tools/deployment/33_cluster_move_target_node.sh
- ./tools/deployment/34_deploy_worker_node.sh
- ./tools/deployment/35_deploy_workload.sh
- name: "Run gate scripts"
include_role:

View File

@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
#Default wait timeout is 600 seconds
export TIMEOUT=${TIMEOUT:-600s}
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
export TARGET_IP=${TARGET_IP:-"10.23.25.102"}
export TARGET_PORT=${TARGET_PORT:-"30000"}
echo "Deploy workload"
airshipctl phase run workload-target --debug
# TODO: Remove this after migrating to HelmRelease v2 (helm-controller) which
# supports a Ready status condition, which `airshipctl phase run` above will consume.
echo "Waiting $TIMEOUT for HelmReleases to become ready."
kubectl --timeout $TIMEOUT --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait hr --for condition=released --all --all-namespaces
echo "Ensure we can reach ingress controller default backend"
if "404" != "$(curl --head --write-out '%{http_code}' --silent --output /dev/null $TARGET_IP:$TARGET_PORT/should-404)"; then
echo -e "\nFailed to reach ingress controller default backend."
exit 1
fi

View File

@ -99,6 +99,7 @@
- ./tools/deployment/32_cluster_init_target_node.sh
- ./tools/deployment/33_cluster_move_target_node.sh
- ./tools/deployment/34_deploy_worker_node.sh
- ./tools/deployment/35_deploy_workload.sh
serve_dir: /srv/iso
serve_port: 8099
log_roles: