apiVersion: airshipit.org/v1alpha1 kind: PhasePlan metadata: name: deploy-gating description: "Phase plan for test-site deployment" phases: # Deploy ephemeral node using redfish with iso - name: remotedirect-ephemeral # Wait for apiserver to become available # Scripts for this phase placed in manifests/function/phase-helpers/wait_node/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-node - name: kubectl-wait-node-ephemeral # Waiting for any pods to be available # Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_any/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-pods-any - name: kubectl-wait-pods-any-ephemeral # Deploy calico using tigera operator - name: initinfra-networking-ephemeral # Deploy metal3.io components to ephemeral node - name: initinfra-ephemeral # Getting pods as debug information" # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-ephemeral # Deploy cluster-api components to ephemeral node - name: clusterctl-init-ephemeral # Waiting for clusterapi pods to come up # Scripts for this phase placed in manifests/function/phase-helpers/wait_deploy/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-deploy - name: kubectl-wait-deploy-ephemeral # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-ephemeral # TODO (dukov) this is needed due to sushy tools inserts cdrom image to # all vms. This can be removed once sushy tool is fixed # Scripts for this phase placed in manifests/function/phase-helpers/virsh-eject-cdrom-images/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name virsh-eject-cdrom-images - name: virsh-eject-cdrom-images # Create target k8s cluster resources - name: controlplane-ephemeral # Wait till the cluster is created # Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-cluster - name: kubectl-wait-cluster-ephemeral # List all nodes in target cluster # Scripts for this phase placed in manifests/function/phase-helpers/get_node/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-node - name: kubectl-get-node-target # List all pods in target cluster # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-target # Deploy calico using tigera operator - name: initinfra-networking-target # Deploy infra to cluster - name: initinfra-target # List all pods # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-target # Deploy CAPI components to target cluster - name: clusterctl-init-target # Waiting for pods to be ready # Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-pods-ready - name: kubectl-wait-pods-ready-target # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-target # Move Cluster Object to Target Cluster - name: clusterctl-move # Waiting for pods to be ready # Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-pods-ready - name: kubectl-wait-pods-ready-target # Scripts for this phase placed in manifests/function/phase-helpers/get_pods/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-pods - name: kubectl-get-pods-target # Wait till crds are created # Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-cluster - name: kubectl-wait-cluster-target # Eject virtual media Ephemeral baremetal host to ensure it can reused. - name: eject-virtual-media-ephemeral # Power off Ephemeral baremetal host avoid DHCP conflict - name: power-off-ephemeral # Create target k8s cluster resources - name: controlplane-target # List all nodes in target cluster # Scripts for this phase placed in manifests/function/phase-helpers/get_node/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-get-node - name: kubectl-get-node-target # Deploy worker node - name: workers-target # Waiting for node to be provisioned # Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-wait-label-node - name: kubectl-wait-label-node-target # Deploy workload - name: workload-target # Ensure we can reach ingress controller default backend # Scripts for this phase placed in manifests/function/phase-helpers/check_ingress_ctrl/ # To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap` # and find ConfigMap with name kubectl-check-ingress-ctrl - name: kubectl-check-ingress-ctrl-target validation: kindsToSkip: - Clusterctl - VariableCatalogue crdList: - airshipctl/manifests/function/airshipctl-schemas - airshipctl/manifests/function/baremetal-operator/v0.5.0/upstream/crd --- apiVersion: airshipit.org/v1alpha1 kind: PhasePlan metadata: name: iso description: "Runs phases to build iso image" phases: - name: iso-cloud-init-data - name: iso-build-image validation: kindsToSkip: - RemoteDirectConfiguration - VariableCatalogue crdList: - airshipctl/manifests/function/airshipctl-schemas - airshipctl/manifests/function/baremetal-operator/v0.5.0/upstream/crd