Integrate CAPD

* Below manifests integrates capd to v1alpha4

* Cluster templates updated to v1alpha4

Closes: #647

Change-Id: I53f25cb2dd1eeed901d8582ddb873f0894bc45b9
This commit is contained in:
Snehal 2021-10-01 13:06:33 +00:00
parent ebc4d99215
commit a20eb0b4ba
15 changed files with 49 additions and 129 deletions

View File

@ -41,7 +41,7 @@ spec:
capd:
manager:
repository: gcr.io/k8s-staging-cluster-api
tag: v20201019-v0.3.10-86-gc1647481f
tag: v0.4.2
auth_proxy:
repository: gcr.io/kubebuilder
tag: v0.4.0

View File

@ -1,27 +1,25 @@
---
apiVersion: cluster.x-k8s.io/v1alpha3
apiVersion: cluster.x-k8s.io/v1alpha4
kind: Cluster
metadata:
name: "target-cluster"
namespace: default
spec:
clusterNetwork:
services:
cidrBlocks: ["10.128.0.0/12"]
cidrBlocks: ["10.96.0.0/12"]
pods:
cidrBlocks: ["192.168.0.0/16"]
serviceDomain: "cluster.local"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerCluster
name: "target-cluster"
controlPlaneRef:
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
name: "target-cluster-control-plane"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerCluster
metadata:
name: "target-cluster"
namespace: default

View File

@ -1,9 +1,8 @@
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerMachineTemplate
metadata:
name: "target-cluster-control-plane"
namespace: default
spec:
template:
spec:
@ -11,29 +10,41 @@ spec:
- containerPath: "/var/run/docker.sock"
hostPath: "/var/run/docker.sock"
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
metadata:
name: "target-cluster-control-plane"
namespace: default
spec:
replicas: ${ CONTROL_PLANE_MACHINE_COUNT }
infrastructureTemplate:
kind: DockerMachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
name: "target-cluster-control-plane"
version: v1.21.2
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerMachineTemplate
name: "target-cluster-control-plane"
kubeadmConfigSpec:
clusterConfiguration:
controllerManager:
extraArgs: {enable-hostpath-provisioner: 'true'}
apiServer:
certSANs: [localhost, 127.0.0.1]
certSANs:
- localhost
- 127.0.0.1
- 0.0.0.0
controllerManager:
extraArgs:
enable-hostpath-provisioner: "true"
initConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'}
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
joinConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'}
version: "v1.21.2"
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%

View File

@ -1,25 +1,25 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerMachineTemplate
metadata:
name: "target-cluster-md-0"
namespace: default
spec:
template:
spec: {}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
kind: KubeadmConfigTemplate
metadata:
name: "target-cluster-md-0"
namespace: default
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'}
kubeletExtraArgs:
cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
---
apiVersion: cluster.x-k8s.io/v1alpha3
apiVersion: cluster.x-k8s.io/v1alpha4
kind: MachineDeployment
metadata:
name: "target-cluster-md-0"
@ -36,11 +36,9 @@ spec:
bootstrap:
configRef:
name: "target-cluster-md-0"
namespace: default
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
kind: KubeadmConfigTemplate
infrastructureRef:
name: "target-cluster-md-0"
namespace: default
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: DockerMachineTemplate

View File

@ -9,7 +9,7 @@ namespace: target-infra
patchesJson6902:
- target:
group: controlplane.cluster.x-k8s.io
version: v1alpha3
version: v1alpha4
kind: KubeadmControlPlane
name: "target-cluster-control-plane"
path: machine_count.json

View File

@ -1,45 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: Clusterctl
metadata:
name: clusterctl_init
init-options:
bootstrap-providers: kubeadm:v0.3.7
control-plane-providers: kubeadm:v0.3.7
core-provider: cluster-api:v0.3.7
providers:
- name: docker
type: InfrastructureProvider
url: airshipctl/manifests/function/capd/v0.3.11
- name: kubeadm
type: BootstrapProvider
url: airshipctl/manifests/function/cabpk/v0.3.7
- name: cluster-api
type: CoreProvider
url: airshipctl/manifests/function/capi/v0.3.7
- name: kubeadm
type: ControlPlaneProvider
url: airshipctl/manifests/function/cacpk/v0.3.7
images:
cert-manager:
repository: "quay.io/jetstack"
cluster-api/cluster-api-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
cluster-api/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"
bootstrap-kubeadm/kubeadm-bootstrap-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
bootstrap-kubeadm/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"
control-plane-kubeadm/kubeadm-control-plane-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
control-plane-kubeadm/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"

View File

@ -19,4 +19,4 @@ map:
clusterAPI:
clusterNamespacedName:
name: target-cluster
namespace: default
namespace: target-infra

View File

@ -1,6 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: GenericContainer
metadata:
name: clusterctl
spec:
image: localhost/clusterctl:v0.3

View File

@ -3,5 +3,5 @@ kind: Clusterctl
metadata:
name: clusterctl_move
move-options:
namespace: default
namespace: target-infra
action: move

View File

@ -1,7 +1,7 @@
[{
"op": "replace",
"path": "/init-options/infrastructure-providers",
"value": "docker:v0.3.11"
"value": "docker:v0.4.2"
},
{
"op": "replace",
@ -9,7 +9,7 @@
"value": {
"name": "docker",
"type": "InfrastructureProvider",
"url": "airshipctl/manifests/function/capd/v0.3.11"
"url": "airshipctl/manifests/function/capd/v0.4.2"
}
}
]

View File

@ -12,13 +12,6 @@ patchesStrategicMerge:
- plan_patch.yaml
- cluster_map_patch.yaml
- executor_patch.yaml
# clusterctl_image_patch.yaml replaces localhost/clusterctl:latest image to clusterctl v0.3.22 utility image
# capi_provider_patch.yaml pins v1alpha3 CAPI components providers and images for docker-test-site
- clusterctl_image_patch.yaml
- capi_provider_patch.yaml
transformers:
# Commenting clusterctl replacements to used v1alpha3 CAPI component images
# This needsto uncomment again once docker provider is upgraded to v1alpha4
# and above 2 patches can be deleted
# - ../../../function/clusterctl/replacements
- ../../../function/clusterctl/replacements
- ../../../phases/replacements

View File

@ -15,6 +15,7 @@ phases:
- name: kubectl-get-pods-target
- name: clusterctl-init-target
- name: kubectl-wait-pods-any-ephemeral
- name: kubectl-wait-pods-ready-target
- name: clusterctl-move
- name: workers-target
validation:

View File

@ -1,31 +0,0 @@
apiVersion: airshipit.org/v1alpha1
kind: Clusterctl
metadata:
labels:
airshipit.org/deploy-k8s: "false"
name: clusterctl-v1
init-options:
core-provider: "cluster-api:v0.3.3"
bootstrap-providers:
- "kubeadm:v0.3.3"
infrastructure-providers:
- "docker:v0.3.7"
control-plane-providers:
- "kubeadm:v0.3.3"
providers:
- name: "docker"
type: "InfrastructureProvider"
versions:
v0.3.7: airshipctl/manifests/function/capd/v0.3.7
- name: "kubeadm"
type: "BootstrapProvider"
versions:
v0.3.3: airshipctl/manifests/function/cabpk/v0.3.3
- name: "cluster-api"
type: "CoreProvider"
versions:
v0.3.3: airshipctl/manifests/function/capi/v0.3.3
- name: "kubeadm"
type: "ControlPlaneProvider"
versions:
v0.3.3: airshipctl/manifests/function/cacpk/v0.3.3

View File

@ -3,10 +3,12 @@ kind: Kustomization
resources:
- ../../../../function/workers-capd
namespace: target-infra
patchesJson6902:
- target:
group: cluster.x-k8s.io
version: v1alpha3
version: v1alpha4
kind: MachineDeployment
name: "target-cluster-md-0"
path: machine_count.json

View File

@ -40,7 +40,6 @@ set -xe
: ${TIMEOUT:=3600}
: ${KIND_CONFIG:=""}
export KIND_EXPERIMENTAL_DOCKER_NETWORK=bridge
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
echo "cluster name: $CLUSTER";