Add phase plan for airship-core gating

Uplift airshipctl to version with new phases.
Add phases from zuul job in deploy-airship-core-gating.

Relates-To: #166
Change-Id: I98b605d16940fab2a728e4646e52c7d1ad992c2d
This commit is contained in:
Vladislav Kuzmin 2021-06-09 18:42:21 +04:00
parent 1da8f2cc03
commit b6ce028dcc
23 changed files with 216 additions and 33 deletions

View File

@ -0,0 +1,29 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
hosts=$(kubectl \
--context $KCTL_CONTEXT \
--request-timeout 10s get nodes -o name)
# Annotate node for hostconfig-operator
for host in $hosts
do
kubectl \
--context $KCTL_CONTEXT \
--request-timeout 10s \
annotate --overwrite $host \
secret=hco-ssh-auth
done

View File

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-annotate-node-for-hostconfig-operator
options:
disableNameSuffixHash: true
files:
- script=kubectl_annotate_node.sh

View File

@ -1,2 +1,3 @@
resources:
- calicoctl
- calicoctl
- annotate_node_for_hostconfig_operator

View File

@ -0,0 +1,11 @@
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-annotate-node-for-hostconfig-operator
spec:
image: quay.io/airshipit/toolbox:latest
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-annotate-node-for-hostconfig-operator
apiVersion: v1

View File

@ -2,5 +2,9 @@ resources:
- ../../../../../airshipctl/manifests/phases/
- lma-infra-target.yaml
- validation-config.yaml
- ../../../function/phase-helper/
- executors.yaml
- phases.yaml
- plan.yaml
transformers:
- ../../../../../airshipctl/manifests/function/validator

View File

@ -0,0 +1,10 @@
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-annotate-node-for-hostconfig-operator-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-annotate-node-for-hostconfig-operator

View File

@ -0,0 +1,149 @@
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: deploy-aiship-core-gating
description: "Phase plan for airship-core deployment on gating"
phases:
# Deploy ephemeral node using redfish with iso
- name: remotedirect-ephemeral
# Wait for apiserver to become available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-node
- name: kubectl-wait-node-ephemeral
# Waiting for any pods to be available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_any/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-any
- name: kubectl-wait-pods-any-ephemeral
# Deploy calico using tigera operator
- name: initinfra-networking-ephemeral
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait_tigera
- name: kubectl-wait-tigera-ephemeral
# Deploy metal3.io components to ephemeral node
- name: initinfra-ephemeral
# Getting pods as debug information"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# Deploy cluster-api components to ephemeral node
- name: clusterctl-init-ephemeral
# Waiting for clusterapi pods to come up
# Scripts for this phase placed in manifests/function/phase-helpers/wait_deploy/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-deploy
- name: kubectl-wait-deploy-ephemeral
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-eject-cdrom-images/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-eject-cdrom-images
- name: virsh-eject-cdrom-images
# Create target k8s cluster resources
- name: controlplane-ephemeral
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
- name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Deploy calico using tigera operator
- name: initinfra-networking-target
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-tigera
- name: kubectl-wait-tigera-target
# Deploy infra to cluster
- name: initinfra-target
# List all pods
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Annotate node for hostconfig-operator
# Scripts for this phase placed in manifests/function/phase-helpers/annotate_node_for_hostconfig_operator/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-annotate-node-for-hostconfig-operator
- name: kubectl-annotate-node-for-hostconfig-operator-target
# Deploy CAPI components to target cluster
- name: clusterctl-init-target
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Move Cluster Object to Target Cluster
- name: clusterctl-move
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Wait till crds are created
# Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster
- name: kubectl-wait-cluster-target
# (TODO) Need to add manifests for controlplane-targer phase
# Create target k8s cluster resources
# - name: controlplane-target
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
# - name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
# - name: kubectl-get-pods-target
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
- name: virsh-destroy-vms
# Deploy worker node
- name: workers-target
# Waiting for node to be provisioned
# Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-label-node
- name: kubectl-wait-label-node-target
# Annotate node for hostconfig-operator
# Scripts for this phase placed in manifests/function/phase-helpers/annotate_node_for_hostconfig_operator/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-annotate-node-for-hostconfig-operator
- name: kubectl-annotate-node-for-hostconfig-operator-target
# Deploy workload
- name: workload-target
# Ensure we can reach ingress controller default backend
# Scripts for this phase placed in manifests/function/phase-helpers/check_ingress_ctrl/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-check-ingress-ctrl
- name: kubectl-check-ingress-ctrl-target
# (TODO) Need to verify these phases
# - name: lma-infra
# - name: lma-stack
# - name: lma-configs
# - name: deliver-network-policy

View File

@ -25,14 +25,7 @@
- ./tools/deployment/airship-core/23_pull_documents.sh
- ./tools/deployment/airship-core/23_generate_secrets.sh
- ./tools/deployment/airship-core/24_build_images.sh
- ./tools/deployment/airship-core/25_deploy_ephemeral_node.sh
- ./tools/deployment/airship-core/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/airship-core/30_deploy_controlplane.sh
- ./tools/deployment/airship-core/31_deploy_initinfra_target_node.sh
- ./tools/deployment/airship-core/32_cluster_init_target_node.sh
- ./tools/deployment/airship-core/33_cluster_move_target_node.sh
- ./tools/deployment/airship-core/35_deploy_worker_node.sh
- ./tools/deployment/airship-core/36_deploy_workload.sh
- ./tools/deployment/airship-core/25_deploy_gating.sh
- name: "Run gate scripts"
include_role:

View File

@ -1 +0,0 @@
../common/25_deploy_ephemeral_node.sh

View File

@ -14,7 +14,4 @@
set -xe
echo "Deploying LMA stack"
airshipctl phase run --debug lma-infra
airshipctl phase run --debug lma-stack
airshipctl phase run --debug lma-configs
airshipctl plan run deploy-aiship-core-gating --debug

View File

@ -1 +0,0 @@
../common/26_deploy_capi_ephemeral_node.sh

View File

@ -1 +0,0 @@
../common/30_deploy_controlplane.sh

View File

@ -1 +0,0 @@
../common/31_deploy_initinfra_target_node.sh

View File

@ -1 +0,0 @@
../common/32_cluster_init_target_node.sh

View File

@ -1 +0,0 @@
../common/33_cluster_move_target_node.sh

View File

@ -1 +0,0 @@
../common/34_deploy_controlplane_target.sh

View File

@ -1 +0,0 @@
../common/35_deploy_worker_node.sh

View File

@ -1 +0,0 @@
../common/36_deploy_workload.sh

View File

@ -1 +0,0 @@
../common/37_verify_hwcc_profiles.sh

View File

@ -1 +0,0 @@
../common/40_deploy_network_policies.sh

View File

@ -18,3 +18,4 @@ set -xe
cd ${AIRSHIPCTL_PROJECT}
./tools/deployment/25_deploy_ephemeral_node.sh

View File

@ -75,14 +75,7 @@
- ./tools/deployment/airship-core/23_pull_documents.sh
- ./tools/deployment/airship-core/23_generate_secrets.sh
- ./tools/deployment/airship-core/24_build_images.sh
- ./tools/deployment/airship-core/25_deploy_ephemeral_node.sh
- ./tools/deployment/airship-core/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/airship-core/30_deploy_controlplane.sh
- ./tools/deployment/airship-core/31_deploy_initinfra_target_node.sh
- ./tools/deployment/airship-core/32_cluster_init_target_node.sh
- ./tools/deployment/airship-core/33_cluster_move_target_node.sh
- ./tools/deployment/airship-core/35_deploy_worker_node.sh
- ./tools/deployment/airship-core/36_deploy_workload.sh
- ./tools/deployment/airship-core/25_deploy_gating.sh
serve_dir: /srv/images
serve_port: 8099
voting: true

View File

@ -14,7 +14,7 @@
vars:
# NOTE(drewwalters96): Set AIRSHIPCTL_REF to a commit SHA in order to pin
# the cloned version of airshipctl to a known/compatible reference.
AIRSHIPCTL_REF: "c7d65d4114bf0f97bfbce3f02b6bf3d80d5ffe3e"
AIRSHIPCTL_REF: "16da661959b69ae54be83266cf7db0920083a2d4"
sphinx_build_dir: docs/build
check:
jobs: