Integrate capi v0.4.2 and capm3 v0.5.0

The below manifest changes integrates capi v0.4.2 with capm3
v0.5.0 version. It changes the required manifests files
in the airshipctl. It upgrades capi to v1alpha4 and capm3 to
v1alpha5.

Closes: #518 #560
Change-Id: Ia9ea82ad8052e55f0e70f1038497a919ac7b9270
This commit is contained in:
SirishaGopigiri 2021-08-17 06:29:02 +00:00 committed by Ruslan Aliev
parent 61a316ce1c
commit ad555d4d24
22 changed files with 103 additions and 153 deletions

View File

@ -10,7 +10,7 @@ FROM ${PLUGINS_BUILD_IMAGE} as ctls
RUN apk update && apk add curl
COPY ./certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates
ARG CCTL_VERSION=0.3.23
ARG CCTL_VERSION=0.4.4
RUN curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v${CCTL_VERSION}/clusterctl-linux-amd64 -o /clusterctl
RUN chmod +x /clusterctl

View File

@ -24,7 +24,7 @@ spec:
provisioningIp: "10.23.25.102"
dhcpRange: "10.23.25.200,10.23.25.250"
ironicAutomatedClean: ""
httpPort: ""
httpPort: "80"
ironicFastTrack: ""
deployKernelUrl: ""
deployRamdiskUrl: ""
@ -72,4 +72,4 @@ spec:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
- 3.pool.ntp.org

View File

@ -26,27 +26,18 @@ spec:
capm3: # Images specific to the camp3 function; etc.
manager:
repository: quay.io/metal3-io
tag: v0.4.0
tag: v0.5.0
auth_proxy:
repository: gcr.io/kubebuilder
tag: v0.4.0
ipam-manager:
repository: quay.io/metal3-io
tag: v0.0.4
tag: v0.8.0
cacpk:
manager:
repository: us.gcr.io/k8s-artifacts-prod/cluster-api
tag: v0.3.7
auth_proxy:
repository: gcr.io/kubebuilder
tag: v0.4.1
repository: gcr.io/k8s-staging-cluster-api
tag: v0.4.2
cabpk:
manager:
repository: us.gcr.io/k8s-artifacts-prod/cluster-api
tag: v0.3.7
auth_proxy:
repository: gcr.io/kubebuilder
tag: v0.4.1
repository: gcr.io/k8s-staging-cluster-api
tag: v0.4.2
capd:
manager:
repository: gcr.io/k8s-staging-cluster-api
@ -70,11 +61,8 @@ spec:
tag: v0.4.1
capi:
manager:
repository: us.gcr.io/k8s-artifacts-prod/cluster-api
tag: v0.3.7
auth_proxy:
repository: gcr.io/kubebuilder
tag: v0.4.1
repository: gcr.io/k8s-staging-cluster-api
tag: v0.4.2
capz:
manager:
repository: gcr.io/k8s-staging-cluster-api-azure
@ -90,27 +78,33 @@ spec:
# <container>:
baremetal_operator:
ironic: # ironic Deployment
init_bootstrap:
image: quay.io/centos/centos:8.3.2011
init_images:
image: quay.io/airshipit/ipa:latest
image: quay.io/airshipit/ipa-wallaby:latest
qcow_bundle:
image: quay.io/airshipit/qcow-bundle:latest-ubuntu_focal
image: quay.io/airshipit/qcow-bundle:k8s-1.19-latest-ubuntu_focal
dnsmasq:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
image: quay.io/metal3-io/ironic:capm3-v0.5.0
mariadb:
image: quay.io/metal3-io/ironic:capm3-v0.5.0
httpd:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic:
image: quay.io/metal3-io/ironic:capm3-v0.4.0
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_api:
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_conductor:
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_inspector:
image: quay.io/metal3-io/ironic-inspector:capm3-v0.4.0
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_log_watch:
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_inspector_log_watch:
image: quay.io/metal3-io/ironic:capm3-v0.5.0
ironic_endpoint_keepalived:
image: quay.io/metal3-io/keepalived:capm3-v0.5.0
metal3_baremetal_operator: # metal3-baremetal-operator Deployment
baremetal_operator:
image: quay.io/metal3-io/baremetal-operator:capm3-v0.4.0
ironic_proxy:
image: quay.io/airshipit/socat:1.7.4.1
ironic_inspector_proxy:
image: quay.io/airshipit/socat:1.7.4.1
image: quay.io/metal3-io/baremetal-operator:capm3-v0.5.0
kube_rbac_proxy:
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
calico_v3:
node: # calico-node DaemonSet
upgrade_ipam:
@ -158,9 +152,9 @@ spec:
image_builder:
image_builder:
image_builder:
image: quay.io/airshipit/image-builder:611fd18363c52ee322a83e9c68a8b56f863b8a79-ubuntu_focal
image: quay.io/airshipit/image-builder:k8s-1.19-latest-ubuntu_focal
kubernetes: v1.18.6
kubernetes: v1.19.14
image_repositories:
cni:

View File

@ -78,15 +78,14 @@ spec:
- tag
type: object
required:
- auth_proxy
- manager
type: object
description: capi_images defines collections of images used by cluster
API. The name of each key in this section should correspond to the
airshipctl function in which the images will be used, such as "capm3".
Each capi_image object must have a "manager" and "auth_proxy" object,
each of which must have "repository" and "tag" properties defined.
capi_images may also include an optional "ipam-manager" object,
Each capi_image object must have a "manager" object, which must have
"repository" and "tag" properties defined. capi_images may also include
an optional "ipam-manager" or "auth_proxy" object,
which must also have "repository" and "tag" properties defined.
type: object
charts:

View File

@ -5,23 +5,23 @@ metadata:
airshipit.org/deploy-k8s: "false"
name: clusterctl_init
init-options:
core-provider: "cluster-api:v0.3.7"
bootstrap-providers: "kubeadm:v0.3.7"
infrastructure-providers: "metal3:v0.4.0"
control-plane-providers: "kubeadm:v0.3.7"
core-provider: "cluster-api:v0.4.2"
bootstrap-providers: "kubeadm:v0.4.2"
infrastructure-providers: "metal3:v0.5.0"
control-plane-providers: "kubeadm:v0.4.2"
providers:
- name: "metal3"
type: "InfrastructureProvider"
url: airshipctl/manifests/function/capm3/v0.4.0
url: airshipctl/manifests/function/capm3/v0.5.0
- name: "kubeadm"
type: "BootstrapProvider"
url: airshipctl/manifests/function/cabpk/v0.3.7
url: airshipctl/manifests/function/cabpk/v0.4.2
- name: "cluster-api"
type: "CoreProvider"
url: airshipctl/manifests/function/capi/v0.3.7
url: airshipctl/manifests/function/capi/v0.4.2
- name: "kubeadm"
type: "ControlPlaneProvider"
url: airshipctl/manifests/function/cacpk/v0.3.7
url: airshipctl/manifests/function/cacpk/v0.4.2
# The default image repository and tag for a specific component
# can be overriden here
@ -29,32 +29,20 @@ images:
cert-manager:
repository: "quay.io/jetstack"
cluster-api/cluster-api-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
cluster-api/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"
repository: "gcr.io/k8s-staging-cluster-api"
tag: "v0.4.2"
bootstrap-kubeadm/kubeadm-bootstrap-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
bootstrap-kubeadm/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"
repository: "gcr.io/k8s-staging-cluster-api"
tag: "v0.4.2"
control-plane-kubeadm/kubeadm-control-plane-controller:
repository: "us.gcr.io/k8s-artifacts-prod/cluster-api"
tag: "v0.3.7"
control-plane-kubeadm/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.1"
repository: "gcr.io/k8s-staging-cluster-api"
tag: "v0.4.2"
infrastructure-metal3/cluster-api-provider-metal3:
repository: "quay.io/metal3-io"
tag: "v0.4.0"
tag: "v0.5.0"
infrastructure-metal3/kube-rbac-proxy:
repository: "gcr.io/kubebuilder"
tag: "v0.4.0"
infrastructure-metal3/ip-address-manager:
repository: "quay.io/metal3-io"
tag: "v0.4.0"
tag: "v0.8.0"
infrastructure-docker/capd-manager:
repository: "gcr.io/k8s-staging-cluster-api"
tag: "v20201019-v0.3.10-86-gc1647481f"

View File

@ -29,16 +29,6 @@ replacements:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.infrastructure-metal3/kube-rbac-proxy}"]
- source:
objref:
kind: VersionsCatalogue
name: versions-airshipctl
fieldref: "{.spec.capi_images.capm3.ipam-manager}"
target:
objref:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.infrastructure-metal3/ip-address-manager}"]
# Replace cacpk versions
- source:
objref:
@ -50,16 +40,6 @@ replacements:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.control-plane-kubeadm/kubeadm-control-plane-controller}"]
- source:
objref:
kind: VersionsCatalogue
name: versions-airshipctl
fieldref: "{.spec.capi_images.cacpk.auth_proxy}"
target:
objref:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.control-plane-kubeadm/kube-rbac-proxy}"]
# Replace cabpk versions
- source:
objref:
@ -71,16 +51,6 @@ replacements:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.bootstrap-kubeadm/kubeadm-bootstrap-controller}"]
- source:
objref:
kind: VersionsCatalogue
name: versions-airshipctl
fieldref: "{.spec.capi_images.cabpk.auth_proxy}"
target:
objref:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.bootstrap-kubeadm/kube-rbac-proxy}"]
# Replace capd versions
- source:
objref:
@ -155,16 +125,6 @@ replacements:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.cluster-api/cluster-api-controller}"]
- source:
objref:
kind: VersionsCatalogue
name: versions-airshipctl
fieldref: "{.spec.capi_images.capi.auth_proxy}"
target:
objref:
kind: Clusterctl
name: clusterctl_init
fieldrefs: ["{.images.cluster-api/kube-rbac-proxy}"]
# Replace capz versions
- source:

View File

@ -9,8 +9,8 @@ metadata:
hardwareProfile:
firmware:
sriovEnabled: false
virtualizationDisabled: false
simultaneousMultithreadingDisabled: false
virtualizationEnabled: false
simultaneousMultithreadingEnabled: false
raid:
hardwareRAIDVolumes:
- name: "VirtualDisk1"

View File

@ -20,11 +20,14 @@ values:
# See function/hardwareprofile-example for an example of how to do this.
hardwareProfiles:
default:
raid:
hardwareRAIDVolumes: []
softwareRAIDVolumes: []
# Reference: https://github.com/metal3-io/metal3-docs/blob/master/design/baremetal-operator/bios-config.md
firmware:
sriovEnabled: false
virtualizationDisabled: false
simultaneousMultithreadingDisabled: false
#firmware:
# sriovEnabled: false
# virtualizationEnabled: false
# simultaneousMultithreadingEnabled: false
template: |
{{- $envAll := . }}
@ -39,9 +42,6 @@ template: |
{{- if not $hardwareProfile -}}
{{- fail (printf "can't find hardwareProfile %s" $host.hardwareProfile) -}}
{{- end -}}
{{- if not $hardwareProfile.firmware -}}
{{- fail (printf "hardwareProfile %s doesn't have firmware field" $host.hardwareProfile) -}}
{{- end -}}
---
apiVersion: metal3.io/v1alpha1
kind: BareMetalHost
@ -63,10 +63,12 @@ template: |
address: {{ $host.bmcAddress }}
credentialsName: {{ $hostName }}-bmc-secret
disableCertificateVerification: {{ default false $host.disableCertificateVerification }}
{{- if $hardwareProfile.firmware }}
firmware:
{{ toYaml $hardwareProfile.firmware | indent 4 }}
{{- /* If no raid is defined for a host, simply skip. There is no default setting for raid */ -}}
{{- /* Reference for RAID: https://github.com/metal3-io/metal3-docs/pull/134 */ -}}
{{- end }}
{{- if $hardwareProfile.raid }}
raid:
{{ toYaml $hardwareProfile.raid | indent 4 }}

View File

@ -21,7 +21,7 @@ spec:
- sh
- -c
- "cp /qcows/*.qcow2 /qcows/*.qcow2.md5sum /shared/html/images/"
image: "quay.io/sirishagopigiri/qcow-bundle:v1.19.1"
image: "quay.io/airshipit/qcow-bundle:k8s-1.19-latest-ubuntu_focal"
imagePullPolicy: Always
volumeMounts:
- mountPath: "/shared"

View File

@ -8,7 +8,7 @@ data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWRENDQWp5Z0F3SUJBZ0lVTUNwc09vRXhyRzdnRTVMOVJSamdnT01UOG53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dURVhNQlVHQTFVRUF3d09TM1ZpWlhKdVpYUmxjeUJCVUVrd0hoY05NakF3T1RFMU1ERXdORE0zV2hjTgpNekF3T1RFek1ERXdORE0zV2pBWk1SY3dGUVlEVlFRRERBNUxkV0psY201bGRHVnpJRUZRU1RDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtBZFo0UWJHZmlLTExpTXNHcFJKS3d5ZkRGWVI5U0MKbGtVb3hlTU1BZVBkeVNNU0paTTlFMFBOaDM5TUtTVjNSZDRIZWt1eGdHK3J4em83WmcrZU1aY1hyNFk3ektQMwo1SW0vaERkMm1TYThsMEkxZTRwV3B0Z25vZjdvRWJpSXVIU2YxQmRhMU4wWm1EUUdtckxyQnFOZFE3c1BVenNWCllPejZVUFZlamNIeEFjMXBvMWZsQXYrWVNZejVXa28wRVRnTXZYRGtxT0hrWFc1WnhPcHBVbiszOVpvWTZMK3gKVmUwUHFQdHlmSVZ1M3dtcnZFNGd4SmxtWEk3dUxmdzZONHpwS2RuK0k0K1RJRWF5aE1EMWRRenNwQzRMM0IrcApYcHFPMWNWM2ZKMlBycS9mNU14SnIxWTVHUTZlQlZyTGVod1ZWTEhEMzF3ZWFpZ3UzeStyM3RVQ0F3RUFBYU9CCmt6Q0JrREFkQmdOVkhRNEVGZ1FVT1d5YTNFd2J5c25UUy9ZajFWTEtjMGh4aDRvd1ZBWURWUjBqQkUwd1M0QVUKT1d5YTNFd2J5c25UUy9ZajFWTEtjMGh4aDRxaEhhUWJNQmt4RnpBVkJnTlZCQU1NRGt0MVltVnlibVYwWlhNZwpRVkJKZ2hRd0ttdzZnVEdzYnVBVGt2MUZHT0NBNHhQeWZEQU1CZ05WSFJNRUJUQURBUUgvTUFzR0ExVWREd1FFCkF3SUJCakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTVp1U2tJbTdQdlA4MW5HSjlYOVZFOFVZTVdDSU5GMEEKYit1UURFaHRGc0dxdnZFZHhQcURUWUpwdlF1SUJlOVd0cmlWRzh0MENIL1NnZ0g2TlJod0wyYkJwMm5WaEFVVwphK3hZL1RpTmMzUEl5RHNFeEY3VHVENGJzaW1BQUJTZ2ZtbXRxV1dqajRyOStodS9vZ09jLzQyYk9JT0JWbHNkCi9VNzBiR3dZQjU5QXgvL2dIWVJmVDl3L3p0VHBvY2tzdEhhSjZsVDd5SFlqYUkzaU5EWnZNSnFRSWNxME4vTEMKcVBjWjBWQXBMUTZRUHRpMWpVSzBGM1VlZEF6TVc3ZFF4NkV3Qjd5UHo4NWdZS3ZJdWdyaStrc2YwbGMyeHVDRwpXTGg2YjFNWk9Cc1NZNkppVHpSUUpYdXNCRUdaTGN5VkRJSEU3Y0Q4NWhOQmZpdDAvejFmZlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRQ2dIV2VFR3huNGlpeTQKakxCcVVTU3NNbnd4V0VmVWdwWkZLTVhqREFIajNja2pFaVdUUFJORHpZZC9UQ2tsZDBYZUIzcExzWUJ2cThjNgpPMllQbmpHWEY2K0dPOHlqOStTSnY0UTNkcGttdkpkQ05YdUtWcWJZSjZIKzZCRzRpTGgwbjlRWFd0VGRHWmcwCkJwcXk2d2FqWFVPN0QxTTdGV0RzK2xEMVhvM0I4UUhOYWFOWDVRTC9tRW1NK1ZwS05CRTRETDF3NUtqaDVGMXUKV2NUcWFWSi90L1dhR09pL3NWWHRENmo3Y255RmJ0OEpxN3hPSU1TWlpseU83aTM4T2plTTZTblovaU9Qa3lCRwpzb1RBOVhVTTdLUXVDOXdmcVY2YWp0WEZkM3lkajY2djMrVE1TYTlXT1JrT25nVmF5M29jRlZTeHc5OWNIbW9vCkx0OHZxOTdWQWdNQkFBRUNnZ0VBWjNRNFUySlRlSVNHK3NOa3BYMUNiY1M4L0FFbmdFYlVJMkdCNHY3NkphcEMKOE5jajBpdnZTNnI3OXFOV0hyQWZRNk9mUUZNelFuUkNhUHpDS0NzMXJZT1BWUE5FZVZtTm4vZFB6YXBpc0dYQQpjZll1bWFiOWJNTEc1L1k0cFB3cCtxamVtQ3lIUjBqblVBNUlYSHlCTUlMdFpXczBnd09BT2Y1TzJ3dTZHbW5CCnlDbk5FaFFjbmZUdnN0MVBINUlTeEZ0TjFtODlnQ2orNEhsdHkybUdOdUNEczZ2QnZyWmEyeEJRZUtTcTl1eXEKY1dsdDkrRzI2ZmZFRmw0UEwwWit2Y0wrVDZLd0I4NFl2ampUd25pNEMzb1BmV3lZeEpkSjF0U3NFK1lSM3NIOApEdmlobmMwak00c0Fxb3hrMk5UV2kvNHpUUkppdytRcGhyRnNJWFNvUVFLQmdRRFNRaU44djFEUFk4VzdJUkFXClp6MTN5SWdrRTJHZXUzc2tFMkhweDN6ZFdMZWdmazF6SW42eUtpN0RzVTZhV2xjQURpMm5mU3BVSCtZQ2M0dVkKcmlnZ0xFMG9iZHJLVEFGdm1YYjA1WFRhR3FtcFE4TGxiWmxMTXVycXp0aG5BWmN3M2NySmxMb25kNkZISW9icAp3QU50K2p5a3pTaGs1T3MwSi9XajZiL3prUUtCZ1FEQzhxTkdndWVIbGw5QVNKeHJmM2hTeGJXWFhhTnJqMTJGCkEwNHlnNTFkbHg1QksweUVsZ3VZQ3RtZW5mZWgySWw5U1E3Yng4Z0Z5OWdWeDI5d1Z4eFp2b0xMaU16WU16RGMKWU5UTW9ETEtITjA3dHZ6ZDVza3piWFA3cFUwc25SMDV2RkJ2SHZtTHhWVlkvVHd6ZTdpZDVlNGtISTRWRmUxWQpnUDBYV0ZITkJRS0JnQTRXWGxoU1hUQzRCNXlGRjVYWXJ2YWltZlNJMCthVnV5ZHNvUWZQMU43anZkSGtCSDV0ClZqM0xzN3hxMmRCZnN5cU95S0pMTVpYWFdVcmF3UVNtem90eFRHNGtCaCs5dmU3alFtUWdKNWNoYURLdUZwWFcKcFFtenpLZVUya3owZjFQSDJIbHZISlhlWHhEc0VFd0RFSGZDNTJOSFY2aUM2ZnRobmdTd2VhcnhBb0dBRVd0Ywo2NUFHNERhdmpDN3d5eW80dGl5MGJUSVF5Q3VuVDV0Y0FXZUJTRHVZbUhvbC9ETHNGa25oNkNwMVZpRGpLQzYvCkJTUjAydys3M3paUzN0YnAwWnNVVk51RWNrMGdzSkIyYzFKZE4zSWMwcGtuUHl6QURiaGFCTUpnZ3Z3SEFJR3oKTGpxMlVhYndXV05IWGRKUVRNdWUyOXN4VnZEK3BFbmlVNU93dTRFQ2dZQmptRkRMZEZuT0VtOGlOZEFNVFM0SQpXM0VnckgvQVhsZ0dIOTVJVElKZCtuTmM3QUJQcktYUGEzTTlPRytCTUlpVlJZZ2JMVitXWU1hbHUvZWFJeWFXClR3TGpxaW5nak8rQXhKWFJkT1M3NGpOamFSWGtmM0RWQ1F3WUd0T0xqdE1qRVhmTWdORnViQXZ4TFVOUmZFblIKa09nR0pWQkgwSyttcVVaNVFiVWUwZz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: cluster.x-k8s.io/v1alpha3
apiVersion: cluster.x-k8s.io/v1alpha4
kind: Cluster
metadata:
name: target-cluster
@ -22,15 +22,15 @@ spec:
cidrBlocks: ["192.168.0.0/18"]
serviceDomain: "cluster.local"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3Cluster
name: target-cluster
controlPlaneRef:
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
name: cluster-controlplane
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3Cluster
metadata:
name: target-cluster

View File

@ -1,14 +1,15 @@
kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
metadata:
name: cluster-controlplane
spec:
replicas: 1
version: v1.18.6
infrastructureTemplate:
kind: Metal3MachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
name: cluster-controlplane
version: v1.19.14
machineTemplate:
infrastructureRef:
kind: Metal3MachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
name: cluster-controlplane
kubeadmConfigSpec:
clusterConfiguration:
imageRepository: k8s.gcr.io
@ -21,7 +22,6 @@ spec:
service-cluster-ip-range: 10.0.0.0/20
service-node-port-range: 80-32767
enable-admission-plugins: NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction
feature-gates: PodShareProcessNamespace=true
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
tls-min-version: VersionTLS12
v: "2"
@ -93,7 +93,7 @@ spec:
- REPLACE_HOST_SSH_KEY
sudo: ALL=(ALL) NOPASSWD:ALL
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3MachineTemplate
metadata:
name: cluster-controlplane

View File

@ -16,6 +16,7 @@ set -xe
export TIMEOUT=${TIMEOUT:-3600}
export CONDITION=${CONDITION:-"controlPlaneReady"}
export CHECK=${CHECK:-"true"}
end=$(($(date +%s) + $TIMEOUT))
echo "Waiting $TIMEOUT seconds for cluster to reach $CONDITION condition" 1>&2
@ -25,7 +26,7 @@ while true; do
--request-timeout 20s \
--context $KCTL_CONTEXT \
get -f $RENDERED_BUNDLE_PATH \
-o jsonpath={.status.$CONDITION})" == "true" ]
-o jsonpath={.status.$CONDITION})" == $CHECK ]
then
echo "Getting information about cluster" 1>&2
kubectl \

View File

@ -1,4 +1,4 @@
apiVersion: cluster.x-k8s.io/v1alpha3
apiVersion: cluster.x-k8s.io/v1alpha4
kind: MachineDeployment
metadata:
name: worker-1
@ -16,14 +16,14 @@ spec:
cluster.x-k8s.io/cluster-name: target-cluster
spec:
clusterName: target-cluster
version: v1.18.3
version: v1.19.14
bootstrap:
configRef:
name: worker-1
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
kind: KubeadmConfigTemplate
infrastructureRef:
name: worker-1
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: Metal3MachineTemplate
---

View File

@ -1,4 +1,4 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: Metal3MachineTemplate
metadata:
name: worker-1

View File

@ -286,7 +286,7 @@ spec:
- /bin/bash
- -c
- /usr/bin/local/entrypoint.sh 1>&2
image: quay.io/airshipit/image-builder:latest-ubuntu_focal
image: quay.io/airshipit/image-builder:k8s-1.19-latest-ubuntu_focal
mounts:
- type: bind
src: /srv/images
@ -459,7 +459,7 @@ spec:
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=cluster.x-k8s.io
- RESOURCE_VERSION_FILTER=v1alpha3
- RESOURCE_VERSION_FILTER=v1alpha4
- RESOURCE_KIND_FILTER=Cluster
configRef:
kind: ConfigMap
@ -475,9 +475,10 @@ spec:
hostNetwork: true
envVars:
- RESOURCE_GROUP_FILTER=cluster.x-k8s.io
- RESOURCE_VERSION_FILTER=v1alpha3
- RESOURCE_VERSION_FILTER=v1alpha4
- RESOURCE_KIND_FILTER=Cluster
- CONDITION=controlPlaneInitialized
- CONDITION=conditions[?(@.type=="ControlPlaneInitialized")].status
- CHECK=True
configRef:
kind: ConfigMap
name: kubectl-wait-cluster
@ -596,4 +597,4 @@ spec:
configRef:
kind: ConfigMap
name: merge-kubeconfig
apiVersion: v1
apiVersion: v1

View File

@ -24,3 +24,4 @@ status:
hardwareProfile: ""
operationalStatus: ""
poweredOn: false
errorCount: 0

View File

@ -1,9 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../function/baremetal-operator
- ../../../../function/baremetal-operator/v0.5.0
- ../../../../function/hwcc
- ../../../../function/clusterctl
- ../../../../function/ironic-deployment/v0.5.0
commonLabels:
airshipit.org/stage: initinfra

View File

@ -1,4 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../../function/baremetal-operator/replacements
- ../../../../../function/baremetal-operator/v0.5.0/replacements
- ../../../../../function/ironic-deployment/v0.5.0/replacements

View File

@ -144,4 +144,4 @@ validation:
- VariableCatalogue
crdList:
- airshipctl/manifests/function/airshipctl-schemas
- airshipctl/manifests/global/crd/baremetal-operator
- airshipctl/manifests/function/baremetal-operator/v0.5.0/upstream/crd

View File

@ -1,10 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../function/baremetal-operator
- ../../../../function/baremetal-operator/v0.5.0
- ../../../../function/hwcc
- ../../../../function/clusterctl
- ../../../../composite/flux-helm
- ../../../../function/ironic-deployment/v0.5.0
commonLabels:
airshipit.org/stage: initinfra

View File

@ -1,5 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../../function/baremetal-operator/replacements
- ../../../../../function/baremetal-operator/v0.5.0/replacements
- ../../../../../composite/flux-helm/replacements
- ../../../../../function/ironic-deployment/v0.5.0/replacements

View File

@ -73,7 +73,7 @@ type ImageURLSpec struct {
// CAPIImageProperties defines the spec for CAPI images
type CAPIImageProperties struct {
Manager ImageURLSpec `json:"manager"`
AuthProxy ImageURLSpec `json:"auth_proxy"`
AuthProxy ImageURLSpec `json:"auth_proxy,omitempty"`
IPAMManager ImageURLSpec `json:"ipam-manager,omitempty"`
}
@ -143,9 +143,9 @@ type VersionsCatalogueSpec struct {
// capi_images defines collections of images used by cluster API.
// The name of each key in this section should correspond to the airshipctl
// function in which the images will be used, such as "capm3". Each capi_image
// object must have a "manager" and "auth_proxy" object, each of which must have
// "repository" and "tag" properties defined. capi_images may also include an
// optional "ipam-manager" object, which must also have "repository" and "tag"
// object must have a "manager" object, which must have "repository" and "tag"
// properties defined. capi_images may also include an optional "ipam-manager"
// or "auth_proxy" object, which must also have "repository" and "tag"
// properties defined.
CAPIImages CAPIImageSpec `json:"capi_images,omitempty"`