Merge "Fix validation doc gate"

This commit is contained in:
Zuul 2021-08-31 18:50:36 +00:00 committed by Gerrit Code Review
commit a64524867e
17 changed files with 206 additions and 166 deletions

View File

@ -1,4 +1,5 @@
resources:
- ../catalogues
- ../../../../type/airship-core/ephemeral/initinfra-networking
transformers:
- ../../../../type/airship-core/ephemeral/initinfra-networking/replacements

View File

@ -1,4 +1,5 @@
resources:
- ../catalogues
- ../../../../type/airship-core/target/initinfra-networking
transformers:
- ../../../../type/airship-core/target/initinfra-networking/replacements

View File

@ -1,4 +1,5 @@
resources:
- ../catalogues
- ../../../../type/multi-tenant/ephemeral/initinfra-networking
transformers:
- ../../../../type/multi-tenant/ephemeral/initinfra-networking/replacements

View File

@ -1,4 +1,5 @@
resources:
- ../catalogues
- ../../../../type/multi-tenant/target/initinfra-networking
commonLabels:

View File

@ -1,4 +1,5 @@
resources:
- ../../../../type/multi-tenant/ephemeral/initinfra-networking
- ../catalogues
transformers:
- ../../../../type/multi-tenant/ephemeral/initinfra-networking
- ../../../../type/multi-tenant/ephemeral/initinfra-networking/replacements

View File

@ -1,6 +1,9 @@
resources:
- ../kubeconfig
- ../../../type/multi-tenant/phases
- ../target/catalogues
transformers:
- ../../../../../airshipctl/manifests/function/clusterctl/replacements
## TODO Consider making a catalogue combined with variable substitution instead
patchesStrategicMerge:
- phase-patch.yaml

View File

@ -2,4 +2,4 @@ resources:
- ../../../../type/multi-tenant/target/initinfra-networking
- ../catalogues
transformers:
- ../../../../type/multi-tenant/target/initinfra-networking
- ../../../../type/multi-tenant/target/initinfra-networking/replacements

View File

@ -3,5 +3,6 @@ kind: Kustomization
resources:
- nodes
- ../../../../type/multi-tenant/target/workers
- ../catalogues
transformers:
- ../../../../type/multi-tenant/target/workers/replacements
- ../../../../type/multi-tenant/target/workers/replacements

View File

@ -3,9 +3,9 @@ kind: GenericContainer
metadata:
name: kubectl-annotate-node-for-hostconfig-operator
spec:
image: quay.io/airshipit/toolbox:latest
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-annotate-node-for-hostconfig-operator
apiVersion: v1
apiVersion: v1

View File

@ -105,7 +105,7 @@ phases:
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster
- name: kubectl-wait-cluster-target
# (TODO) Need to add manifests for controlplane-targer phase
# (TODO) Need to add manifests for controlplane-target phase
# Create target k8s cluster resources
# - name: controlplane-target
# List all nodes in target cluster
@ -152,8 +152,6 @@ validation:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- airshipctl/manifests/function/airshipctl-schemas
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan

View File

@ -1,79 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: KubevalOptions
metadata:
name: kubeval-options
labels:
airshipit.org/deploy-k8s: "false"
siteConfig:
strict: true
kubernetesVersion: "1.16.0"
ignoreMissingSchemas: false
planName: AIRSHIPCTL_CURRENT_PLAN
planConfigs:
phasePlan:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
phaseName: AIRSHIPCTL_CURRENT_PHASE
phaseConfigs:
initinfra-ephemeral:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
clusterctl-init-ephemeral:
crdList:
- function/cert-manager/v1.1.0/upstream/cert-manager.yaml
controlplane-ephemeral:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/capi/v0.3.7/crd/bases/cluster.x-k8s.io_clusters.yaml
- function/cacpk/v0.3.7/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3clusters.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3machinetemplates.yaml
- global/crd/baremetal-operator/metal3.io_baremetalhosts_crd.yaml
clusterctl-init-target:
crdList:
- function/cert-manager/v1.1.0/upstream/cert-manager.yaml
initinfra-target:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
workers-target:
crdList:
- global/crd/baremetal-operator/metal3.io_baremetalhosts_crd.yaml
workers-classification:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/cabpk/v0.3.7/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml
- function/capi/v0.3.7/crd/bases/cluster.x-k8s.io_machinedeployments.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3machinetemplates.yaml
- function/hwcc/crd/bases/metal3.io_hardwareclassifications.yaml
workload-target:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/flux/helm-controller/upstream/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml
- function/flux/source-controller/upstream/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml

View File

@ -25,3 +25,15 @@ configRef:
kind: ConfigMap
name: calicoctl-delete
apiVersion: v1
---
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: kubectl-annotate-node-for-hostconfig-operator
spec:
image: localhost/toolbox
hostNetwork: true
configRef:
kind: ConfigMap
name: kubectl-annotate-node-for-hostconfig-operator
apiVersion: v1

View File

@ -8,3 +8,4 @@ resources:
- ../../../function/phase-helper
patchesStrategicMerge:
- cluster_map_patch.yaml
- plan-patch.yaml

View File

@ -33,3 +33,14 @@ config:
kind: KubernetesApply
name: kubernetes-apply
documentEntryPoint: target/workload-config
---
apiVersion: airshipit.org/v1alpha1
kind: Phase
metadata:
name: kubectl-annotate-node-for-hostconfig-operator-target
clusterName: target-cluster
config:
executorRef:
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
name: kubectl-annotate-node-for-hostconfig-operator

View File

@ -0,0 +1,163 @@
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: deploy-gating
description: "Phase plan for multi tenant bare metal deployment"
phases:
# Deploy ephemeral node using redfish with iso
- name: remotedirect-ephemeral
# Wait for apiserver to become available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-node
- name: kubectl-wait-node-ephemeral
# Waiting for any pods to be available
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_any/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-any
- name: kubectl-wait-pods-any-ephemeral
# Deploy calico using tigera operator
- name: initinfra-networking-ephemeral
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait_tigera
- name: kubectl-wait-tigera-ephemeral
# Deploy metal3.io components to ephemeral node
- name: initinfra-ephemeral
# Getting pods as debug information"
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# Deploy cluster-api components to ephemeral node
- name: clusterctl-init-ephemeral
# Waiting for clusterapi pods to come up
# Scripts for this phase placed in manifests/function/phase-helpers/wait_deploy/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-deploy
- name: kubectl-wait-deploy-ephemeral
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-ephemeral
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-eject-cdrom-images/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-eject-cdrom-images
- name: virsh-eject-cdrom-images
# Create target k8s cluster resources
- name: controlplane-ephemeral
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
- name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Deploy calico using tigera operator
- name: initinfra-networking-target
# Wait for Calico to be deployed using tigera
# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-tigera
- name: kubectl-wait-tigera-target
# Deploy infra to cluster
- name: initinfra-target
# List all pods
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Annotate node for hostconfig-operator
# Scripts for this phase placed in manifests/function/phase-helpers/annotate_node_for_hostconfig_operator/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-annotate-node-for-hostconfig-operator
- name: kubectl-annotate-node-for-hostconfig-operator-target
# Deploy CAPI components to target cluster
- name: clusterctl-init-target
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Move Cluster Object to Target Cluster
- name: clusterctl-move
# Waiting for pods to be ready
# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods_ready/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-pods-ready
- name: kubectl-wait-pods-ready-target
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
- name: kubectl-get-pods-target
# Wait till crds are created
# Scripts for this phase placed in manifests/function/phase-helpers/wait_cluster/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-cluster
- name: kubectl-wait-cluster-target
# (TODO) Need to add manifests for controlplane-target phase
# Create target k8s cluster resources
# - name: controlplane-target
# List all nodes in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-node
# - name: kubectl-get-node-target
# List all pods in target cluster
# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-get-pods
# - name: kubectl-get-pods-target
# all vms. This can be removed once sushy tool is fixed
# Scripts for this phase placed in manifests/function/phase-helpers/virsh-destroy-vms/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name virsh-destroy-vms
- name: virsh-destroy-vms
# Deploy worker node
- name: workers-target
# Waiting for node to be provisioned
# Scripts for this phase placed in manifests/function/phase-helpers/wait_label_node/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-wait-label-node
- name: kubectl-wait-label-node-target
# Annotate node for hostconfig-operator
# Scripts for this phase placed in manifests/function/phase-helpers/annotate_node_for_hostconfig_operator/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-annotate-node-for-hostconfig-operator
- name: kubectl-annotate-node-for-hostconfig-operator-target
# Deploy workload
- name: workload-target
# Ensure we can reach ingress controller default backend
# Scripts for this phase placed in manifests/function/phase-helpers/check_ingress_ctrl/
# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
# and find ConfigMap with name kubectl-check-ingress-ctrl
- name: kubectl-check-ingress-ctrl-target
#- name: lma-infra
#- name: lma-stack
#- name: lma-configs
#- name: deliver-network-policy
validation:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
---
apiVersion: airshipit.org/v1alpha1
kind: PhasePlan
metadata:
name: iso
validation:
kindsToSkip:
- RemoteDirectConfiguration
- VariableCatalogue
- StorageCatalogue

View File

@ -1,79 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: KubevalOptions
metadata:
name: kubeval-options
labels:
airshipit.org/deploy-k8s: "false"
siteConfig:
strict: true
kubernetesVersion: "1.16.0"
ignoreMissingSchemas: false
planName: AIRSHIPCTL_CURRENT_PLAN
planConfigs:
phasePlan:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
phaseName: AIRSHIPCTL_CURRENT_PHASE
phaseConfigs:
initinfra-ephemeral:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
clusterctl-init-ephemeral:
crdList:
- function/cert-manager/v1.1.0/upstream/cert-manager.yaml
controlplane-ephemeral:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/capi/v0.3.7/crd/bases/cluster.x-k8s.io_clusters.yaml
- function/cacpk/v0.3.7/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3clusters.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3machinetemplates.yaml
- global/crd/baremetal-operator/metal3.io_baremetalhosts_crd.yaml
clusterctl-init-target:
crdList:
- function/cert-manager/v1.1.0/upstream/cert-manager.yaml
initinfra-target:
kindsToSkip:
- Clusterctl
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
workers-target:
crdList:
- global/crd/baremetal-operator/metal3.io_baremetalhosts_crd.yaml
workers-classification:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/cabpk/v0.3.7/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml
- function/capi/v0.3.7/crd/bases/cluster.x-k8s.io_machinedeployments.yaml
- function/capm3/v0.4.0/crd/bases/infrastructure.cluster.x-k8s.io_metal3machinetemplates.yaml
- function/hwcc/crd/bases/metal3.io_hardwareclassifications.yaml
workload-target:
kindsToSkip:
- VariableCatalogue
- StorageCatalogue
crdList:
- function/airshipctl-schemas/airshipit.org_networkcatalogues.yaml
- function/airshipctl-schemas/airshipit.org_versionscatalogues.yaml
- function/flux/helm-controller/upstream/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml
- function/flux/source-controller/upstream/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml

View File

@ -26,6 +26,10 @@
run: playbooks/airship-treasuremap-gate-runner.yaml
nodeset: airship-treasuremap-single-node
vars:
job_environment:
# Override AIRSHIPCTL default 'test-site' validation so gate jobs will run
# on all the sites present in TREASUREMAP repo
SITE: ""
gate_scripts:
- ./tools/deployment/common/01_install_essentials.sh
- ./tools/deployment/common/21_systemwide_executable.sh