From 2ab874a5be951a6eba4f9d4f54c106bc0c53d9b1 Mon Sep 17 00:00:00 2001 From: Spyros Trigazis Date: Tue, 28 Aug 2018 15:25:51 +0200 Subject: [PATCH] [k8s] Make flannel self-hosted Similar to calico, deploy flannel as a DS. Flannel can use the kubernetes API to store data, so it doesn't need to contact the etcd server directly anymore. This patch drops to relatively large files for flannel's config, flannel-config-service.sh and write-flannel-config.sh. All required config is in the manifests. Additional options to the controller manager: --allocate-node-cidrs=true and --cluster-cidr. Change-Id: I4f1129e155e2602299394b5866165260f4ea0df8 story: 2002751 task: 24870 --- doc/source/user/index.rst | 18 +- .../fragments/configure-kubernetes-master.sh | 13 +- .../fragments/configure-kubernetes-minion.sh | 41 +-- .../fragments/flannel-config-service.sh | 73 ----- .../kubernetes/fragments/flannel-service.sh | 309 ++++++++++++------ .../kubernetes/fragments/make-cert-client.sh | 2 - .../fragments/write-flannel-config.sh | 28 -- .../fragments/write-heat-params-master.yaml | 1 + .../fragments/write-heat-params.yaml | 1 - .../drivers/heat/k8s_fedora_template_def.py | 2 +- .../templates/kubecluster.yaml | 12 +- .../templates/kubemaster.yaml | 26 +- .../templates/kubeminion.yaml | 12 - .../unit/drivers/test_template_definition.py | 4 + .../notes/flannel-cni-4a5c9f574325761e.yaml | 8 + 15 files changed, 260 insertions(+), 290 deletions(-) delete mode 100644 magnum/drivers/common/templates/kubernetes/fragments/flannel-config-service.sh delete mode 100644 magnum/drivers/common/templates/kubernetes/fragments/write-flannel-config.sh create mode 100644 releasenotes/notes/flannel-cni-4a5c9f574325761e.yaml diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index dd5127316c..8773745ca0 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -323,6 +323,8 @@ the table are linked to more details elsewhere in the user guide. +---------------------------------------+--------------------+---------------+ | `flannel_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ +| `flannel_cni_tag`_ | see below | see below | ++---------------------------------------+--------------------+---------------+ | `heat_container_agent_tag`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `kube_dashboard_enabled`_ | - true | true | @@ -1132,10 +1134,20 @@ _`etcd_tag` _`flannel_tag` This label allows users to select `a specific flannel version, - based on its container tag - `_. - If unset, the current Magnum version's a default flannel version. + based on its container tag: + Queens `_ + Rocky `_ + If unset, the default version will be used. For queens, v0.9.0 + For stein, v0.10.0-amd64 + +_`flannel_cni_tag` + This label allows users to select `a specific flannel_cni version, + based on its container tag. This container adds the cni plugins in + the host under /opt/cni/bin + `_. + If unset, the current Magnum version's a default flannel version. + For stein, v0.3.0 _`heat_container_agent_tag` This label allows users to select `a specific heat_container_agent diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh index 3a97b355c4..f2d686321c 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh @@ -43,12 +43,6 @@ users: as-user-extra: {} EOF - -if [ "$NETWORK_DRIVER" = "flannel" ]; then - atomic install --storage ostree --system --system-package=no \ - --name=flanneld ${_prefix}flannel:${FLANNEL_TAG} -fi - sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_MASTER=/ s|=.*|="--master=http://127.0.0.1:8080"| @@ -131,6 +125,8 @@ sed -i ' # Add controller manager args KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-name=${CLUSTER_UUID}" +KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --allocate-node-cidrs=true" +KUBE_CONTROLLER_MANAGER_ARGS="${KUBE_CONTROLLER_MANAGER_ARGS} --cluster-cidr=${PODS_NETWORK_CIDR}" KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/service_account_private.key --root-ca-file=$CERT_DIR/ca.crt" @@ -172,9 +168,7 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker fi -if [ "$NETWORK_DRIVER" = "calico" ]; then - KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" -fi +KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=CriticalAddonsOnly=True:NoSchedule,dedicated=master:NoSchedule" KUBELET_ARGS="${KUBELET_ARGS} --node-labels=node-role.kubernetes.io/master=\"\"" @@ -245,3 +239,4 @@ sed -i ' /^KUBELET_HOSTNAME=/ s/=.*/=""/ /^KUBELET_ARGS=/ s|=.*|="'"\$(/etc/kubernetes/get_require_kubeconfig.sh) ${KUBELET_ARGS}"'"| ' /etc/kubernetes/kubelet + diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh index 2e961cd57c..951f954d39 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh @@ -7,10 +7,10 @@ echo "configuring kubernetes (minion)" _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} _addtl_mounts='' -if [ "$NETWORK_DRIVER" = "calico" ]; then - mkdir -p /opt/cni - _addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}' +mkdir -p /opt/cni +_addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}' +if [ "$NETWORK_DRIVER" = "calico" ]; then if [ "`systemctl status NetworkManager.service | grep -o "Active: active"`" = "Active: active" ]; then CALICO_NM=/etc/NetworkManager/conf.d/calico.conf [ -f ${CALICO_NM} ] || { @@ -168,9 +168,7 @@ fi EOF chmod +x /etc/kubernetes/get_require_kubeconfig.sh -if [ "$NETWORK_DRIVER" = "calico" ]; then - KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" -fi +KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" sed -i ' /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ @@ -183,37 +181,6 @@ cat > /etc/kubernetes/proxy << EOF KUBE_PROXY_ARGS="--kubeconfig=${PROXY_KUBECONFIG} --cluster-cidr=${PODS_NETWORK_CIDR}" EOF -if [ "$NETWORK_DRIVER" = "flannel" ]; then - atomic install --storage ostree --system --system-package=no \ - --name=flanneld ${_prefix}flannel:${FLANNEL_TAG} - if [ "$TLS_DISABLED" = "True" ]; then - FLANNEL_OPTIONS="" - ETCD_CURL_OPTIONS="" - else - FLANNEL_CERT_DIR=/etc/flanneld/certs - FLANNEL_OPTIONS="-etcd-cafile $FLANNEL_CERT_DIR/ca.crt" - FLANNEL_OPTIONS="$FLANNEL_OPTIONS -etcd-certfile $FLANNEL_CERT_DIR/proxy.crt" - FLANNEL_OPTIONS="$FLANNEL_OPTIONS -etcd-keyfile $FLANNEL_CERT_DIR/proxy.key" - ETCD_CURL_OPTIONS="--cacert $FLANNEL_CERT_DIR/ca.crt --cert $FLANNEL_CERT_DIR/proxy.crt --key $FLANNEL_CERT_DIR/proxy.key" - fi - FLANNELD_CONFIG=/etc/sysconfig/flanneld - - cat >> $FLANNELD_CONFIG <> /etc/environment < $FLANNEL_CONFIG_BIN <&2 - exit 1 -fi - -if [ -z "$FLANNEL_ETCD_ENDPOINTS" ] || [ -z "$FLANNEL_ETCD_PREFIX" ]; then - echo "ERROR: missing required configuration" >&2 - exit 1 -fi - -echo "creating flanneld config in etcd" -while ! curl -sf -L $ETCD_CURL_OPTIONS \ - $FLANNEL_ETCD_ENDPOINTS/v2/keys${FLANNEL_ETCD_PREFIX}/config \ - -X PUT --data-urlencode value@${FLANNEL_JSON}; do - echo "waiting for etcd" - sleep 1 -done -EOF - -cat > $FLANNEL_CONFIG_SERVICE < ${FLANNEL_DEPLOY} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "$FLANNEL_NETWORK_CIDR", + "Subnetlen": $FLANNEL_NETWORK_SUBNETLEN, + "Backend": { + "Type": "$FLANNEL_BACKEND" + } + } + magnum-install-cni.sh: | + #!/bin/sh + set -e -x; + if [ -w "/host/opt/cni/bin/" ]; then + cp /opt/cni/bin/* /host/opt/cni/bin/; + echo "Wrote CNI binaries to /host/opt/cni/bin/"; + fi; +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + template: + metadata: + labels: + tier: node + app: flannel + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + # Make sure flannel gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: flannel + initContainers: + - name: install-cni-plugins + image: ${_prefix}flannel-cni:${FLANNEL_CNI_TAG} + command: + - sh + args: + - /etc/kube-flannel/magnum-install-cni.sh + volumeMounts: + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: install-cni + image: ${_prefix}flannel:${FLANNEL_TAG} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: ${_prefix}flannel:${FLANNEL_TAG} + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +EOF + } + + if [ "$MASTER_INDEX" = "0" ]; then + + until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] + do + echo "Waiting for Kubernetes API..." + sleep 5 + done + fi + + /usr/bin/kubectl apply -f "${FLANNEL_DEPLOY}" --namespace=kube-system fi - -SYSTEMD_UNITS_DIR=/etc/systemd/system/ -FLANNEL_DOCKER_BRIDGE_BIN=/usr/local/bin/flannel-docker-bridge -FLANNEL_DOCKER_BRIDGE_SERVICE=/etc/systemd/system/flannel-docker-bridge.service -FLANNEL_IPTABLES_FORWARD_ACCEPT_SERVICE=flannel-iptables-forward-accept.service -DOCKER_FLANNEL_CONF=/etc/systemd/system/docker.service.d/flannel.conf -FLANNEL_DOCKER_BRIDGE_CONF=/etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf - -mkdir -p /etc/systemd/system/docker.service.d -mkdir -p /etc/systemd/system/flanneld.service.d - -cat >> $FLANNEL_DOCKER_BRIDGE_BIN <&2 - exit 1 -fi - -# NOTE(mnaser): Since Docker 1.13, it does not set the default forwarding -# policy to ACCEPT which will cause CNI networking to fail. -iptables -P FORWARD ACCEPT - -mkdir -p /run/flannel/ -cat > /run/flannel/docker <> $FLANNEL_DOCKER_BRIDGE_SERVICE <> $DOCKER_FLANNEL_CONF <> $FLANNEL_DOCKER_BRIDGE_CONF <> "${SYSTEMD_UNITS_DIR}${FLANNEL_IPTABLES_FORWARD_ACCEPT_SERVICE}" < /etc/sysconfig/flanneld < $FLANNEL_JSON <