Uplift provider components in test site

* uplift capi, cabpk, cacpk and capm3 in test site defination
  * uplift images in version catalogue
  * add api timeout in controlplane
  * add timeout for phases

Change-Id: I9de15c4e1979c12eeedb55fbe721b5e7adfe496b
Signed-off-by: Sreejith Punnapuzha <sreejith.punnapuzha@outlook.com>
This commit is contained in:
Sreejith Punnapuzha 2020-09-14 08:28:23 -05:00 committed by Sreejith Punnapuzha
parent cf1f55f3ca
commit f48456a69f
5 changed files with 22 additions and 17 deletions

View File

@ -26,13 +26,13 @@ images:
manager: quay.io/metal3-io/cluster-api-provider-metal3 manager: quay.io/metal3-io/cluster-api-provider-metal3
auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
cacpk: cacpk:
manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.3 manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.7
auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
cabpk: cabpk:
manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.3 manager: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.7
auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
capi: capi:
manager: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.3 manager: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.7
auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 auth_proxy: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
baremetal_operator: baremetal_operator:
ironic: # ironic Deployment ironic: # ironic Deployment

View File

@ -5,40 +5,40 @@ metadata:
airshipit.org/deploy-k8s: "false" airshipit.org/deploy-k8s: "false"
name: clusterctl-v1 name: clusterctl-v1
init-options: init-options:
core-provider: "cluster-api:v0.3.3" core-provider: "cluster-api:v0.3.7"
bootstrap-providers: bootstrap-providers:
- "kubeadm:v0.3.3" - "kubeadm:v0.3.7"
infrastructure-providers: infrastructure-providers:
- "metal3:v0.3.1" - "metal3:v0.3.2"
control-plane-providers: control-plane-providers:
- "kubeadm:v0.3.3" - "kubeadm:v0.3.7"
providers: providers:
- name: "metal3" - name: "metal3"
type: "InfrastructureProvider" type: "InfrastructureProvider"
variable-substitution: true variable-substitution: true
versions: versions:
v0.3.1: manifests/function/capm3/v0.3.1 v0.3.2: manifests/function/capm3/v0.3.2
- name: "kubeadm" - name: "kubeadm"
type: "BootstrapProvider" type: "BootstrapProvider"
variable-substitution: true variable-substitution: true
versions: versions:
v0.3.3: manifests/function/cabpk/v0.3.3 v0.3.7: manifests/function/cabpk/v0.3.7
- name: "cluster-api" - name: "cluster-api"
type: "CoreProvider" type: "CoreProvider"
variable-substitution: true variable-substitution: true
versions: versions:
v0.3.3: manifests/function/capi/v0.3.3 v0.3.7: manifests/function/capi/v0.3.7
- name: "kubeadm" - name: "kubeadm"
type: "ControlPlaneProvider" type: "ControlPlaneProvider"
variable-substitution: true variable-substitution: true
versions: versions:
v0.3.3: manifests/function/cacpk/v0.3.3 v0.3.7: manifests/function/cacpk/v0.3.7
# These default images can be overridden via the `replacements/` entrypoint # These default images can be overridden via the `replacements/` entrypoint
additional-vars: additional-vars:
CONTAINER_CAPM3_MANAGER: quay.io/metal3-io/cluster-api-provider-metal3:v0.3.1 CONTAINER_CAPM3_MANAGER: quay.io/metal3-io/cluster-api-provider-metal3:v0.3.2
CONTAINER_CACPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.3 CONTAINER_CACPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-control-plane-controller:v0.3.7
CONTAINER_CABPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.3 CONTAINER_CABPK_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/kubeadm-bootstrap-controller:v0.3.7
CONTAINER_CAPI_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.3 CONTAINER_CAPI_MANAGER: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.3.7
CONTAINER_CAPM3_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 CONTAINER_CAPM3_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
CONTAINER_CACPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 CONTAINER_CACPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
CONTAINER_CABPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 CONTAINER_CABPK_AUTH_PROXY: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1

View File

@ -10,6 +10,9 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
name: cluster-controlplane name: cluster-controlplane
kubeadmConfigSpec: kubeadmConfigSpec:
clusterConfiguration:
apiServer:
timeoutForControlPlane: 1000s
preKubeadmCommands: preKubeadmCommands:
- echo 'root:r00tme' | chpasswd - echo 'root:r00tme' | chpasswd
- echo 'ubuntu:r00tme' | chpasswd - echo 'ubuntu:r00tme' | chpasswd

View File

@ -17,6 +17,7 @@ set -ex
TARGET_IMAGE_DIR="/srv/iso" TARGET_IMAGE_DIR="/srv/iso"
EPHEMERAL_DOMAIN_NAME="air-ephemeral" EPHEMERAL_DOMAIN_NAME="air-ephemeral"
TARGET_IMAGE_URL="https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" TARGET_IMAGE_URL="https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img"
export WAIT_TIMEOUT=${WAIT_TIMEOUT:-"2000s"}
# TODO (dukov) this is needed due to sushy tools inserts cdrom image to # TODO (dukov) this is needed due to sushy tools inserts cdrom image to
# all vms. This can be removed once sushy tool is fixed # all vms. This can be removed once sushy tool is fixed
@ -46,7 +47,7 @@ fi
md5sum /srv/iso/target-image.qcow2 | cut -d ' ' -f 1 > ${TARGET_IMAGE_DIR}/target-image.qcow2.md5sum md5sum /srv/iso/target-image.qcow2 | cut -d ' ' -f 1 > ${TARGET_IMAGE_DIR}/target-image.qcow2.md5sum
echo "Create target k8s cluster resources" echo "Create target k8s cluster resources"
airshipctl phase apply controlplane airshipctl phase apply controlplane --wait-timeout $WAIT_TIMEOUT --debug
echo "Get kubeconfig from secret" echo "Get kubeconfig from secret"
KUBECONFIG="" KUBECONFIG=""

View File

@ -17,6 +17,7 @@ set -xe
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"} export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export TIMEOUT=${TIMEOUT:-60} export TIMEOUT=${TIMEOUT:-60}
NODENAME="node01" NODENAME="node01"
export WAIT_TIMEOUT=${WAIT_TIMEOUT:-"2000s"}
# TODO need to run another config command after use-context to update kubeconfig # TODO need to run another config command after use-context to update kubeconfig
echo "Switch context to target cluster and set manifest" echo "Switch context to target cluster and set manifest"
@ -44,7 +45,7 @@ done
kubectl --kubeconfig $KUBECONFIG taint node $NODENAME node-role.kubernetes.io/master- kubectl --kubeconfig $KUBECONFIG taint node $NODENAME node-role.kubernetes.io/master-
echo "Deploy infra to cluster" echo "Deploy infra to cluster"
airshipctl phase apply initinfra --debug --wait-timeout 1000s airshipctl phase apply initinfra --debug --wait-timeout $WAIT_TIMEOUT
echo "List all pods" echo "List all pods"
kubectl --kubeconfig $KUBECONFIG get pods --all-namespaces kubectl --kubeconfig $KUBECONFIG get pods --all-namespaces