airshipctl/manifests/function/k8scontrol/controlplane.yaml

110 lines
4.3 KiB
YAML

kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
metadata:
name: cluster-controlplane
spec:
replicas: 3
version: v1.21.2
machineTemplate:
infrastructureRef:
kind: Metal3MachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
name: cluster-controlplane
kubeadmConfigSpec:
clusterConfiguration:
imageRepository: k8s.gcr.io
apiServer:
timeoutForControlPlane: 1000s
extraArgs:
allow-privileged: "true"
kubelet-preferred-address-types: InternalIP,ExternalIP,Hostname
authorization-mode: Node,RBAC
service-cluster-ip-range: 10.0.0.0/20
service-node-port-range: 80-32767
enable-admission-plugins: NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
tls-min-version: VersionTLS12
v: "2"
requestheader-group-headers: X-Remote-Group
requestheader-username-headers: X-Remote-User
requestheader-allowed-names: front-proxy-client
controllerManager:
extraArgs:
cluster-cidr: 192.168.16.0/20
node-monitor-period: 5s
node-monitor-grace-period: 20s
pod-eviction-timeout: 60s
terminated-pod-gc-threshold: "1000"
bind-address: 127.0.0.1
port: "0"
use-service-account-credentials: "true"
configure-cloud-routes: "false"
enable-hostpath-provisioner: "true"
v: "2"
networking:
dnsDomain: cluster.local
podSubnet: 192.168.16.0/20
serviceSubnet: 10.0.0.0/20
files:
- path: "/etc/systemd/system/containerd.service.d/http-proxy.conf"
content: |
[Service]
Environment="HTTP_PROXY=REPLACEMENT_HTTP_PROXY"
Environment="HTTPS_PROXY=REPLACEMENT_HTTPS_PROXY"
Environment="NO_PROXY=REPLACEMENT_NO_PROXY"
# Since we are using containerd as a default CRI, we do not need
# any adjustments and settings for docker anymore. However, if for
# some reason docker has to be presented in the system, we need to
# create http-proxy.conf.
# TODO: add download sources to the versions catalogue
preKubeadmCommands:
# Restart docker to apply any proxy settings
- export HOME=/root
- mkdir -p /etc/containerd
- containerd config default | sed -r -e '/\[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc\]$/a\ SystemdCgroup = true' | tee /etc/containerd/config.toml
- systemctl daemon-reload
- systemctl restart containerd
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
node-labels: 'metal3.io/uuid={{ ds.meta_data.uuid }},node-type=controlplane'
cgroup-driver: "systemd"
container-runtime: remote
criSocket: "unix:///run/containerd/containerd.sock"
joinConfiguration:
controlPlane: {}
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
node-labels: 'metal3.io/uuid={{ ds.meta_data.uuid }},node-type=controlplane'
cgroup-driver: "systemd"
container-runtime: remote
criSocket: "unix:///run/containerd/containerd.sock"
ntp:
servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
users:
- name: deployer
sshAuthorizedKeys:
- REPLACE_HOST_SSH_KEY
sudo: ALL=(ALL) NOPASSWD:ALL
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3MachineTemplate
metadata:
name: cluster-controlplane
spec:
template:
spec:
image:
# NOTE (dukov) this should be overridden on lower levels
url: https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img
checksum: 2c890254ecbd4e6b4931f864ef72b337
hostSelector:
matchLabels:
airshipit.org/k8s-role: controlplane-host