#!/bin/sh step="calico-service" printf "Starting to run ${step}\n" . /etc/sysconfig/heat-params if [ "$NETWORK_DRIVER" = "calico" ]; then _prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml [ -f ${CALICO_DEPLOY} ] || { echo "Writing File: $CALICO_DEPLOY" mkdir -p $(dirname ${CALICO_DEPLOY}) cat << EOF > ${CALICO_DEPLOY} --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: magnum:podsecuritypolicy:calico namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/cluster-service: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: magnum:podsecuritypolicy:privileged subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- # Calico Version v3.3.6 # https://docs.projectcalico.org/v3.3/releases#v3.3.6 kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node rules: - apiGroups: [""] resources: - namespaces - serviceaccounts verbs: - get - list - watch - apiGroups: [""] resources: - pods/status verbs: - patch - apiGroups: [""] resources: - pods verbs: - get - list - watch - apiGroups: [""] resources: - services verbs: - get - apiGroups: [""] resources: - endpoints verbs: - get - apiGroups: [""] resources: - nodes verbs: - get - list - update - watch - apiGroups: ["extensions"] resources: - networkpolicies verbs: - get - list - watch - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - globalnetworkpolicies - globalnetworksets - networkpolicies - clusterinformations - hostendpoints verbs: - create - get - list - update - watch --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- # Calico Version v3.3.6 # https://docs.projectcalico.org/v3.3/releases#v3.3.6 # This manifest includes the following component versions: # calico/node:v3.3.6 # calico/cni:v3.3.6 # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is # essential. typha_service_name: "none" # Configure the Calico backend to use. calico_backend: "bird" # Configure the MTU to use veth_mtu: "1440" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "host-local", "subnet": "usePodCidr" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- # This manifest creates a Service, which will be backed by Calico's Typha daemon. # Typha sits in between Felix and the API server, reducing Calico's load on the API server. apiVersion: v1 kind: Service metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha spec: ports: - port: 5473 protocol: TCP targetPort: calico-typha name: calico-typha selector: k8s-app: calico-typha --- # This manifest creates a Deployment of Typha to back the above service. apiVersion: apps/v1beta1 kind: Deployment metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha spec: # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the # typha_service_name variable in the calico-config ConfigMap above. # # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. replicas: 0 revisionHistoryLimit: 2 template: metadata: labels: k8s-app: calico-typha annotations: # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical # add-on, ensuring it gets priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists # Since Calico can't network a pod until Typha is up, we need to run Typha itself # as a host-networked pod. serviceAccountName: calico-node containers: - image: "${CONTAINER_INFRA_PREFIX:-docker.io/calico/}typha:${CALICO_TAG}" name: calico-typha ports: - containerPort: 5473 name: calico-typha protocol: TCP env: # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - name: TYPHA_LOGSEVERITYSCREEN value: "info" # Disable logging to file and syslog since those don't make sense in Kubernetes. - name: TYPHA_LOGFILEPATH value: "none" - name: TYPHA_LOGSEVERITYSYS value: "none" # Monitor the Kubernetes API to find the number of running instances and rebalance # connections. - name: TYPHA_CONNECTIONREBALANCINGMODE value: "kubernetes" - name: TYPHA_DATASTORETYPE value: "kubernetes" - name: TYPHA_HEALTHENABLED value: "true" # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, # this opens a port on the host, which may need to be secured. #- name: TYPHA_PROMETHEUSMETRICSENABLED # value: "true" #- name: TYPHA_PROMETHEUSMETRICSPORT # value: "9093" livenessProbe: exec: command: - calico-typha - check - liveness periodSeconds: 30 initialDelaySeconds: 30 readinessProbe: exec: command: - calico-typha - check - readiness periodSeconds: 10 --- # This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha spec: maxUnavailable: 1 selector: matchLabels: k8s-app: calico-typha --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: "${CONTAINER_INFRA_PREFIX:-docker.io/calico/}node:${CALICO_TAG}" env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Typha support: controlled by the ConfigMap. - name: FELIX_TYPHAK8SSERVICENAME valueFrom: configMapKeyRef: name: calico-config key: typha_service_name # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within '--cluster-cidr'. - name: CALICO_IPV4POOL_CIDR value: ${CALICO_IPV4POOL} - name: CALICO_IPV4POOL_IPIP value: "off" - name: CALICO_IPV4POOL_NAT_OUTGOING value: "true" # Set noderef for node controller. - name: CALICO_K8S_NODE_REF valueFrom: fieldRef: fieldPath: spec.nodeName # Disable file logging so 'kubectl logs' works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -bird-ready - -felix-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: "${CONTAINER_INFRA_PREFIX:-docker.io/calico/}cni:${CALICO_TAG}" command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system --- # Create all the CustomResourceDefinitions needed for # Calico policy and networking mode. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: FelixConfiguration plural: felixconfigurations singular: felixconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPPeer plural: bgppeers singular: bgppeer --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPConfiguration plural: bgpconfigurations singular: bgpconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPPool plural: ippools singular: ippool --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: HostEndpoint plural: hostendpoints singular: hostendpoint --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: ClusterInformation plural: clusterinformations singular: clusterinformation --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkSet plural: globalnetworksets singular: globalnetworkset --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkPolicy plural: networkpolicies singular: networkpolicy EOF } until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] do echo "Waiting for Kubernetes API..." sleep 5 done /usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system fi printf "Finished running ${step}\n"