Browse Source

Switch gating deployment to phase plan

Add all needed phases for deployment to deploy-gating
phase plan.

Relates-To: #517
Change-Id: I9d5af24b0877d90ab1ba24b31cca6ec7127e0f1d
changes/15/791715/31
Vladislav Kuzmin 1 year ago
parent
commit
16da661959
  1. 3
      manifests/function/phase-helpers/kustomization.yaml
  2. 5
      manifests/function/phase-helpers/wait_node/kubectl_wait_node.sh
  3. 38
      manifests/function/phase-helpers/wait_pods_any/kubectl_wait_pods_any.sh
  4. 4
      manifests/function/phase-helpers/wait_pods_any/kustomization.yaml
  5. 0
      manifests/function/phase-helpers/wait_pods_ready/kubectl_wait_pods_ready.sh
  6. 6
      manifests/function/phase-helpers/wait_pods_ready/kustomization.yaml
  7. 18
      manifests/phases/executors.yaml
  8. 2
      manifests/phases/kustomization.yaml
  9. 15
      manifests/phases/phases.yaml
  10. 26
      manifests/phases/plan.yaml
  11. 2
      manifests/type/gating/kustomization.yaml
  12. 143
      manifests/type/gating/plan.yaml
  13. 2
      pkg/phase/executors/clusterctl.go
  14. 19
      pkg/phase/executors/clusterctl_test.go
  15. 3
      pkg/phase/executors/container.go
  16. 10
      playbooks/airshipctl-gate-runner.yaml
  17. 10
      tools/airship-in-a-pod/runner/assets/entrypoint.sh
  18. 30
      tools/deployment/25_deploy_ephemeral_node.sh
  19. 18
      tools/deployment/25_deploy_gating.sh
  20. 59
      tools/deployment/26_deploy_capi_ephemeral_node.sh
  21. 39
      tools/deployment/30_deploy_controlplane.sh
  22. 33
      tools/deployment/31_deploy_initinfra_target_node.sh
  23. 29
      tools/deployment/32_cluster_init_target_node.sh
  24. 35
      tools/deployment/33_cluster_move_target_node.sh
  25. 30
      tools/deployment/34_deploy_controlplane_target.sh
  26. 30
      tools/deployment/35_deploy_worker_node.sh
  27. 30
      tools/deployment/36_deploy_workload.sh
  28. 10
      zuul.d/jobs.yaml

3
manifests/function/phase-helpers/kustomization.yaml

@ -4,7 +4,8 @@ resources:
- wait_tigera
- wait_deploy
- get_node
- wait_pods
- wait_pods_ready
- wait_pods_any
- pause_bmh
- wait_cluster
- virsh-eject-cdrom-images

5
manifests/function/phase-helpers/wait_node/kubectl_wait_node.sh

@ -19,7 +19,10 @@ MAX_RETRY=30
DELAY=60
until [ "$N" -ge ${MAX_RETRY} ]
do
if timeout 20 kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get node 1>&2; then
if [ "$(timeout 20 \
kubectl --context $KCTL_CONTEXT \
get node -o name | wc -l)" -ge "1" ]; then
timeout 20 kubectl --context $KCTL_CONTEXT get node
break
fi

38
manifests/function/phase-helpers/wait_pods_any/kubectl_wait_pods_any.sh

@ -0,0 +1,38 @@
#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
N=0
MAX_RETRY=30
DELAY=60
until [ "$N" -ge ${MAX_RETRY} ]
do
if [ "$(kubectl --context $KCTL_CONTEXT \
--request-timeout 10s \
get pods \
--all-namespaces -o name | wc -l)" -ge "1" ]; then
kubectl --context $KCTL_CONTEXT --request-timeout 10s get pods --all-namespaces 1>&2
break
fi
N=$((N+1))
echo "$N: Retrying to get any pods" 1>&2
sleep ${DELAY}
done
if [ "$N" -ge ${MAX_RETRY} ]; then
echo "Could not get any pods" 1>&2
exit 1
fi

4
manifests/function/phase-helpers/wait_pods/kustomization.yaml → manifests/function/phase-helpers/wait_pods_any/kustomization.yaml

@ -1,6 +1,6 @@
configMapGenerator:
- name: kubectl-wait-pods
- name: kubectl-wait-pods-any
options:
disableNameSuffixHash: true
files:
- script=kubectl_wait_pods.sh
- script=kubectl_wait_pods_any.sh

0
manifests/function/phase-helpers/wait_pods/kubectl_wait_pods.sh → manifests/function/phase-helpers/wait_pods_ready/kubectl_wait_pods_ready.sh

6
manifests/function/phase-helpers/wait_pods_ready/kustomization.yaml

@ -0,0 +1,6 @@
configMapGenerator:
- name: kubectl-wait-pods-ready
options:
disableNameSuffixHash: true
files:
- script=kubectl_wait_pods_ready.sh

18
manifests/phases/executors.yaml

@ -352,7 +352,7 @@ configRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-pods
name: kubectl-wait-pods-ready
labels:
airshipit.org/deploy-k8s: "false"
spec:
@ -361,7 +361,21 @@ spec:
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-pods
name: kubectl-wait-pods-ready
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-wait-pods-any
labels:
airshipit.org/deploy-k8s: "false"
spec:
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-wait-pods-any
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1

2
manifests/phases/kustomization.yaml

@ -1,6 +1,6 @@
resources:
- phases.yaml
- plan.yaml
- ../type/gating
- executors.yaml
- cluster-map.yaml
- ../function/clusterctl

15
manifests/phases/phases.yaml

@ -358,13 +358,24 @@ config:
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-wait-pods-target
name: kubectl-wait-pods-any-ephemeral
clusterName: ephemeral-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-pods-any
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-wait-pods-ready-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-wait-pods
name: kubectl-wait-pods-ready
---
apiVersion: airshipit.org/v1alpha1
kind: Phase

26
manifests/phases/plan.yaml

@ -1,26 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: phasePlan
description: "Default phase plan"
phases:
- name: initinfra-ephemeral
- name: initinfra-networking-ephemeral
- name: clusterctl-init-ephemeral
- name: controlplane-ephemeral
- name: clusterctl-init-target
- name: initinfra-target
- name: initinfra-networking-target
- name: workers-target
- name: workload-target
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: iso
description: "Runs phases to build iso image"
phases:
- name: iso-cloud-init-data
- name: iso-build-image

2
manifests/type/gating/kustomization.yaml

@ -0,0 +1,2 @@
resources:
- plan.yaml

143
manifests/type/gating/plan.yaml

@ -0,0 +1,143 @@
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: deploy-gating
description: "Phase plan for test-site deployment"
phases:
# Deploy ephemeral node using redfish with iso
- name: remotedirect-ephemeral
# Wait for apiserver to become available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-node
- name: kubectl-wait-node-ephemeral
# Waiting for any pods to be available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_any/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-any
- name: kubectl-wait-pods-any-ephemeral
# Deploy calico using tigera operator
- name: initinfra-networking-ephemeral
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait_tigera
- name: kubectl-wait-tigera-ephemeral
# Deploy metal3.io components to ephemeral node
- name: initinfra-ephemeral
# Getting pods as debug information"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# Deploy cluster-api components to ephemeral node
- name: clusterctl-init-ephemeral
# Waiting for clusterapi pods to come up
# Scripts for this phase placed in manifests/function/phase-helpers/wait_deploy/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-deploy
- name: kubectl-wait-deploy-ephemeral
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-eject-cdrom-images/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-eject-cdrom-images
- name: virsh-eject-cdrom-images
# Create target k8s cluster resources
- name: controlplane-ephemeral
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
- name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Deploy calico using tigera operator
- name: initinfra-networking-target
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-tigera
- name: kubectl-wait-tigera-target
# Deploy infra to cluster
- name: initinfra-target
# List all pods
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Deploy CAPI components to target cluster
- name: clusterctl-init-target
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Move Cluster Object to Target Cluster
- name: clusterctl-move
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Wait till crds are created
# Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster
- name: kubectl-wait-cluster-target
# Create target k8s cluster resources
- name: controlplane-target
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
- name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
- name: virsh-destroy-vms
# Deploy worker node
- name: workers-target
# Waiting for node to be provisioned
# Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-label-node
- name: kubectl-wait-label-node-target
# Deploy workload
- name: workload-target
# Ensure we can reach ingress controller default backend
# Scripts for this phase placed in manifests/function/phase-helpers/check_ingress_ctrl/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-check-ingress-ctrl
- name: kubectl-check-ingress-ctrl-target
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: iso
description: "Runs phases to build iso image"
phases:
- name: iso-cloud-init-data
- name: iso-build-image

2
pkg/phase/executors/clusterctl.go

@ -178,7 +178,7 @@ func (c *ClusterctlExecutor) Validate() error {
return phaseerrors.ErrInvalidPhase{Reason: "ClusterctlExecutor.Action is empty"}
case airshipv1.Init:
if c.options.InitOptions.CoreProvider == "" {
return phaseerrors.ErrInvalidPhase{Reason: "ClusterctlExecutor.InitOptions.CoreProvider is empty"}
log.Printf("ClusterctlExecutor.InitOptions.CoreProvider is empty")
}
case airshipv1.Move:
default:

19
pkg/phase/executors/clusterctl_test.go

@ -76,19 +76,6 @@ providers:
versions:
v0.3.3: manifests/function/capi/v0.3.3`
executorConfigTmplBad = `
apiVersion: airshipit.org/v1alpha1
kind: Clusterctl
metadata:
name: clusterctl-v1
action: %s
somefield: %s
providers:
- name: "cluster-api"
type: "CoreProvider"
versions:
v0.3.2: functions/capi/infrastructure/v0.3.2`
renderedDocs = `---
apiVersion: v1
kind: Namespace
@ -246,12 +233,6 @@ func TestClusterctlExecutorValidate(t *testing.T) {
executorConfigTmpl: executorConfigTmplGood,
expectedErrString: "invalid phase: ClusterctlExecutor.Action is empty",
},
{
name: "Error empty init option",
actionType: "init",
executorConfigTmpl: executorConfigTmplBad,
expectedErrString: "invalid phase: ClusterctlExecutor.InitOptions.CoreProvider is empty",
},
}
for _, test := range testCases {
tt := test

3
pkg/phase/executors/container.go

@ -166,7 +166,8 @@ func bundleReader(bundle document.Bundle) (io.Reader, error) {
// Validate executor configuration and documents
func (c *ContainerExecutor) Validate() error {
return commonerrors.ErrNotImplemented{}
log.Printf("Method Validate() for container isn't implemented")
return nil
}
// Render executor documents

10
playbooks/airshipctl-gate-runner.yaml

@ -32,20 +32,12 @@
- name: "set default gate scripts"
set_fact:
gate_scripts_default:
- ./tools/deployment/01_install_kubectl.sh
- ./tools/deployment/21_systemwide_executable.sh
- ./tools/deployment/22_test_configs.sh
- ./tools/deployment/23_pull_documents.sh
- ./tools/deployment/23_generate_secrets.sh
- ./tools/deployment/24_build_images.sh
- ./tools/deployment/25_deploy_ephemeral_node.sh
- ./tools/deployment/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/30_deploy_controlplane.sh
- ./tools/deployment/31_deploy_initinfra_target_node.sh
- ./tools/deployment/32_cluster_init_target_node.sh
- ./tools/deployment/33_cluster_move_target_node.sh
- ./tools/deployment/35_deploy_worker_node.sh
- ./tools/deployment/36_deploy_workload.sh
- ./tools/deployment/25_deploy_gating.sh
- name: "Run gate scripts"
include_role:

10
tools/airship-in-a-pod/runner/assets/entrypoint.sh

@ -57,14 +57,6 @@ else
tar -czf "$ARTIFACTS_DIR/iso.tar.gz" --directory=/srv/images .
fi
./tools/deployment/25_deploy_ephemeral_node.sh
./tools/deployment/26_deploy_capi_ephemeral_node.sh
./tools/deployment/30_deploy_controlplane.sh
./tools/deployment/31_deploy_initinfra_target_node.sh
./tools/deployment/32_cluster_init_target_node.sh
./tools/deployment/33_cluster_move_target_node.sh
./tools/deployment/35_deploy_worker_node.sh
./tools/deployment/36_deploy_workload.sh
./tools/deployment/37_verify_hwcc_profiles.sh
./tools/deployment/25_deploy_gating.sh
/signal_complete runner

30
tools/deployment/25_deploy_ephemeral_node.sh

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Deploy ephemeral node using redfish with iso"
airshipctl phase run remotedirect-ephemeral --debug
echo "Wait for apiserver to become available"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
airshipctl phase run kubectl-wait-node-ephemeral --debug
echo "List all pods"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-ephemeral --debug

18
tools/deployment/25_deploy_gating.sh

@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Deploy gating"
airshipctl plan run deploy-gating --debug

59
tools/deployment/26_deploy_capi_ephemeral_node.sh

@ -1,59 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
export PROVIDER=${PROVIDER:-"metal3"}
if [ "$PROVIDER" = "metal3" ]; then
echo "Deploy calico using tigera operator"
airshipctl phase run initinfra-networking-ephemeral --debug
# "Wait for Calico to be deployed using tigera"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait_tigera
airshipctl phase run kubectl-wait-tigera-ephemeral --debug
echo "Deploy metal3.io components to ephemeral node"
airshipctl phase run initinfra-ephemeral --debug
echo "Getting pods as debug information"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-ephemeral --debug
echo "Deploy cluster-api components to ephemeral node"
airshipctl phase run clusterctl-init-ephemeral --debug
else
echo "Deploy cluster-api components to ephemeral node"
airshipctl phase run clusterctl-init-ephemeral --debug
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-ephemeral --debug
fi
echo "Waiting for clusterapi pods to come up"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_deploy/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-deploy
airshipctl phase run kubectl-wait-deploy-ephemeral --debug
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-ephemeral --debug

39
tools/deployment/30_deploy_controlplane.sh

@ -1,39 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
EPHEMERAL_DOMAIN_NAME="air-ephemeral"
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-eject-cdrom-images/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-eject-cdrom-images
airshipctl phase run virsh-eject-cdrom-images --debug
echo "Create target k8s cluster resources"
airshipctl phase run controlplane-ephemeral --debug
echo "List all nodes in target cluster"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
airshipctl phase run kubectl-get-node-target --debug
echo "List all pods in target cluster"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug

33
tools/deployment/31_deploy_initinfra_target_node.sh

@ -1,33 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Deploy calico using tigera operator"
airshipctl phase run initinfra-networking-target --debug
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait_tigera
airshipctl phase run kubectl-wait-tigera-target --debug
echo "Deploy infra to cluster"
airshipctl phase run initinfra-target --debug
echo "List all pods"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target

29
tools/deployment/32_cluster_init_target_node.sh

@ -1,29 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Deploy CAPI components to target cluster"
airshipctl phase run clusterctl-init-target --debug
echo "Waiting for pods to be ready"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods
airshipctl phase run kubectl-wait-pods-target --debug
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug

35
tools/deployment/33_cluster_move_target_node.sh

@ -1,35 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Move Cluster Object to Target Cluster"
airshipctl phase run clusterctl-move
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods
airshipctl phase run kubectl-wait-pods-target --debug
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug
#Wait till crds are created
# Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster
airshipctl phase run kubectl-wait-cluster-target --debug

30
tools/deployment/34_deploy_controlplane_target.sh

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
echo "Create target k8s cluster resources"
airshipctl phase run controlplane-target --debug
echo "List all nodes in target cluster"
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
airshipctl phase run kubectl-get-node-target --debug
echo "List all pods in target cluster"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug

30
tools/deployment/35_deploy_worker_node.sh

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
airshipctl phase run virsh-destroy-vms --debug
echo "Deploy worker node"
airshipctl phase run workers-target --debug
# Waiting for node to be provisioned."
# Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-label-node
airshipctl phase run kubectl-wait-label-node-target --debug

30
tools/deployment/36_deploy_workload.sh

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
echo "Deploy workload"
airshipctl phase run workload-target --debug
echo "List all pods in target cluster"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
airshipctl phase run kubectl-get-pods-target --debug
# Ensure we can reach ingress controller default backend
# Scripts for this phase placed in manifests/function/phase-helpers/check_ingress_ctrl/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-check-ingress-ctrl
airshipctl phase run kubectl-check-ingress-ctrl-target --debug

10
zuul.d/jobs.yaml

@ -69,7 +69,6 @@
irrelevant-files: *noncodefiles
vars:
gate_scripts:
- ./tools/deployment/01_install_kubectl.sh
- ./tools/deployment/21_systemwide_executable.sh
- ./tools/deployment/22_test_configs.sh
- ./tools/validate_docs
@ -123,14 +122,7 @@
- ./tools/deployment/23_pull_documents.sh
- ./tools/deployment/23_generate_secrets.sh
- ./tools/deployment/24_build_images.sh
- ./tools/deployment/25_deploy_ephemeral_node.sh
- ./tools/deployment/26_deploy_capi_ephemeral_node.sh
- ./tools/deployment/30_deploy_controlplane.sh
- ./tools/deployment/31_deploy_initinfra_target_node.sh
- ./tools/deployment/32_cluster_init_target_node.sh
- ./tools/deployment/33_cluster_move_target_node.sh
- ./tools/deployment/35_deploy_worker_node.sh
- ./tools/deployment/36_deploy_workload.sh
- ./tools/deployment/25_deploy_gating.sh
serve_dir: /srv/images
serve_port: 8099
log_roles:

Loading…
Cancel
Save