Merge "Uprev the container networking images for k8s 1.22.5, 1.23.1"

This commit is contained in:
Zuul
2022-06-10 20:29:00 +00:00
committed by Gerrit Code Review
9 changed files with 5054 additions and 4 deletions

View File

@@ -0,0 +1,282 @@
---
# Multus Version v3.8
# Based on:
# https://raw.githubusercontent.com/intel/multus-cni/v3.8/images/
# multus-daemonset.yml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/multus-cni/blob/v3.8/LICENSE
#
# The following modifications have been made:
#
# - The multus CNI configuration file has been explicitly specified to ensure
# it has a lower lexographic order than the calico CNI configuration file.
# - The configMap has been modified to work with Calico rather than Flannel
# - The tuning plugin is used to update sysctl tcp_keepalive timers.
# - The portmap plugin is enabled to allow forwarding from one or more ports
# on the host to the container
# - The cnibin volume hostPath is made variable
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The attributes confDir, cniDir and binDir are added to the configmap of
# multus-cni-config.
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config.v1
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"cniVersion": "0.3.1",
"confDir": "/etc/cni/net.d",
"cniDir": "/var/lib/cni/multus",
"binDir": "/opt/cni/bin",
"logFile": "/var/log/multus.log",
"logLevel": "debug",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "chain",
"plugins": [
{
"cniVersion": "0.3.1",
"name": "k8s-pod-network",
"type": "calico",
"masterplugin": true,
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
},
{
"name": "sysctl-tuning",
"type": "tuning",
"sysctl": {
"net.ipv4.tcp_keepalive_intvl": "1",
"net.ipv4.tcp_keepalive_probes": "5",
"net.ipv4.tcp_keepalive_time": "5"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
serviceAccountName: multus
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-multus
image: "{{ local_registry }}/{{ multus_img }}"
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -cex
- |
#!/bin/bash
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/images/05-multus.conf
{% if cluster_network_ipv6 -%}
sed -i 's#//\${KUBERNETES_SERVICE_HOST}#//\[\${KUBERNETES_SERVICE_HOST}\]#' /entrypoint.sh
{% endif -%}
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/images/05-multus.conf
resources:
requests:
memory: "50Mi"
limits:
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: {{ kubelet_cni_bin_dir }}
- name: multus-cfg
configMap:
name: multus-cni-config.v1
items:
- key: cni-conf.json
path: 05-multus.conf
---

View File

@@ -0,0 +1,76 @@
# SRIOV-CNI Release v2
# Based on:
# https://github.com/k8snetworkplumbingwg/sriov-cni/blob/v2.6.2/images/k8s-v1.16/sriov-cni-daemonset.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/sriov-cni/blob/v2.6.2/LICENSE
#
# The following modifications have been made:
#
# - The daemonset is modified to tolerate all NoSchedule taints
# - The cnibin volume hostPath is made variable
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The image is set to a stable starlingX version
# - The 'imagePullPolicy: Never' is omitted
# - For k8s 1.19, the matchLabels are the same as the k8s 1.18 labels to
# allow a rolling update to succeed.
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
namespace: kube-system
labels:
tier: node
app: sriov-cni
spec:
selector:
matchLabels:
tier: node
app: sriov-cni
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: sriov-cni
tier: node
app: sriov-cni
spec:
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-sriov-cni
image: "{{ local_registry }}/{{ sriov_cni_img }}"
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "50Mi"
limits:
memory: "50Mi"
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
volumes:
- name: cnibin
hostPath:
path: {{ kubelet_cni_bin_dir }}

View File

@@ -0,0 +1,110 @@
# SRIOV device CNI plugin version 3.4.0
# Based on:
# https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin/blob/v3.4.0/deployments/k8s-v1.16/sriovdp-daemonset.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin/blob/v3.4.0/LICENSE
#
# The following modifications have been made:
#
# - A nodeSelector of 'sriovdp' has been added to ensure the sriov device plugin
# pods only run on appropriately labelled nodes.
# - The config hostPath is explicitly set to 'File'
# - The daemonset is modified to tolerate all NoSchedule taints
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The image is set to a stable starlingX version
# - The default configMap is not used. Rather, a hostPath to the config.json file
# is used, as resources are populated and based on datanetwork names.
# - For k8s 1.19, the matchLabels are the same as the k8s 1.18 labels to
# allow a rolling update to succeed.
# - Set CPU requests to 0
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sriov-device-plugin
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
selector:
matchLabels:
tier: node
app: sriovdp
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: sriov-device-plugin
tier: node
app: sriovdp
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
sriovdp: enabled
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-sriovdp
image: "{{ local_registry }}/{{ sriov_network_device_img }}"
args:
- --log-dir=sriovdp
- --log-level=10
securityContext:
privileged: true
resources:
requests:
cpu: "0"
memory: "40Mi"
limits:
cpu: 1
memory: "200Mi"
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/
readOnly: false
- name: log
mountPath: /var/log
- name: config
mountPath: /etc/pcidp/config.json
readOnly: true
- name: device-info
mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/
- name: log
hostPath:
path: /var/log
- name: device-info
hostPath:
path: /var/run/k8s.cni.cncf.io/devinfo/dp
type: DirectoryOrCreate
- name: config
hostPath:
path: /etc/pcidp/config.json
type: File

View File

@@ -0,0 +1,31 @@
---
# System images that are pre-pulled and pushed to local registry
n3000_opae_img: docker.io/starlingx/n3000-opae:stx.6.0-v1.0.1
tiller_img: ghcr.io/helm/tiller:v2.16.9
armada_img: quay.io/airshipit/armada:ddbdd7256c20f138737f6cbd772312f7a19f58b8-ubuntu_bionic
kubernetes_entrypoint_img: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
calico_cni_img: quay.io/calico/cni:v3.22.2
calico_node_img: quay.io/calico/node:v3.22.2
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.22.2
calico_flexvol_img: quay.io/calico/pod2daemon-flexvol:v3.22.2
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.8
sriov_cni_img: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.6.2
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.4.0
# Nginx images
nginx_ingress_controller_img: k8s.gcr.io/ingress-nginx/controller:v1.1.1
nginx_kube_webhook_certgen_img: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1
default_backend_img: k8s.gcr.io/defaultbackend:1.4
# Cert-manager images
cert_manager_acmesolver_img: quay.io/jetstack/cert-manager-acmesolver:v1.7.1
cert_manager_cainjector_img: quay.io/jetstack/cert-manager-cainjector:v1.7.1
cert_manager_controller_img: quay.io/jetstack/cert-manager-controller:v1.7.1
cert_manager_webhook_img: quay.io/jetstack/cert-manager-webhook:v1.7.1
cert_manager_ctl_img: quay.io/jetstack/cert-manager-ctl:v1.7.1
# Keep the snapshot-controller image in sync with the one provided at:
# cluster/addons/volumesnapshots/volume-snapshot-controller/volume-snapshot-controller-deployment.yaml
# in the kubernetes github repo
snapshot_controller_img: quay.io/k8scsi/snapshot-controller:v2.0.0-rc2
rvmc_img: docker.io/starlingx/rvmc:stx.5.0-v1.0.0
pause_img: k8s.gcr.io/pause:3.4.1
flux_helm_controller_img: docker.io/fluxcd/helm-controller:v0.15.0
flux_source_controller_img: docker.io/fluxcd/source-controller:v0.20.1