#!/bin/bash # # lib/kuryr # Utilities for kuryr-kubernetes devstack # bind_for_kubelet # Description: Creates an OVS internal port so that baremetal kubelet will be # able to make both liveness and readiness http/tcp probes. # Params: # project - Id or name of the project used for kuryr devstack # Dependencies: # (none) function ovs_bind_for_kubelet() { local port_id local port_mac local port_ips local port_subnets local prefix local project_id local security_group local ifname project_id="$1" security_group=$(openstack security group list \ --project "$project_id" -f value | \ awk '/default/ {print $1}') port_id=$(openstack port create \ --device-owner compute:kuryr \ --project "$project_id" \ --security-group "$security_group" \ --host "${HOSTNAME}" \ --network "${KURYR_NEUTRON_DEFAULT_POD_NET}" \ -f value -c id \ kubelet-"${HOSTNAME}") ifname="kubelet${port_id}" ifname="${ifname:0:14}" port_mac=$(openstack port show "$port_id" -c mac_address -f value) port_ips=($(openstack port show "$port_id" -f value -c fixed_ips | \ awk -F"'" '{print $2}')) port_subnets=($(openstack port show "$port_id" -f value -c fixed_ips | \ awk -F"'" '{print $4}')) sudo ovs-vsctl -- --may-exist add-port br-int "$ifname" \ -- set Interface "$ifname" type=internal \ -- set Interface "$ifname" external-ids:iface-status=active \ -- set Interface "$ifname" external-ids:attached-mac="$port_mac" \ -- set Interface "$ifname" external-ids:iface-id="$port_id" sudo ip link set dev "$ifname" address "$port_mac" sudo ip link set dev "$ifname" up for ((i=0; i < ${#port_ips[@]}; i++)); do prefix=$(openstack subnet show "${port_subnets[$i]}" \ -c cidr -f value | \ cut -f2 -d/) sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname" done } # get_container # Description: Pulls a container from Dockerhub # Params: # image_name - the name of the image in docker hub # version - The version of the image to pull. Defaults to 'latest' function get_container { local image local image_name local version image_name="$1" version="${2:-latest}" if [ "$image_name" == "" ]; then return 0 fi image="${image_name}:${version}" if [ -z "$(docker images -q "$image")" ]; then docker pull "$image" fi } # run_container # Description: Runs a container and attaches devstack's logging to it # Params: # name - Name of the container to run # args - arguments to run the container with function run_container { # Runs a detached container and uses devstack's run process to monitor # its logs local name local docker_bin docker_bin=$(which docker) name="$1" shift args="$@" $docker_bin create --name $name $args run_process "$name" \ "$docker_bin start --attach $name" } # stop_container # Description: stops a container and its devstack logging # Params: # name - Name of the container to stop function stop_container { local name name="$1" docker kill "$name" docker rm "$name" stop_process "$name" } # prepare_etcd_legacy # Description: Creates datadir for etcd and fetches its container image function prepare_etcd_legacy { # Make Etcd data directory sudo install -d -o "$STACK_USER" "$KURYR_ETCD_DATA_DIR" # Get Etcd container get_container "$KURYR_ETCD_IMAGE" "$KURYR_ETCD_VERSION" } # run_etcd_legacy # Description: Deprecated way of running etcd for Kubernetes (based on # coreos upstream image. function run_etcd_legacy { run_container etcd \ --net host \ --volume="${KURYR_ETCD_DATA_DIR}:/var/etcd:rw" \ "${KURYR_ETCD_IMAGE}:${KURYR_ETCD_VERSION}" \ /usr/local/bin/etcd \ --name devstack \ --data-dir /var/etcd/data \ --initial-advertise-peer-urls "$KURYR_ETCD_ADVERTISE_PEER_URL" \ --listen-peer-urls "$KURYR_ETCD_LISTEN_PEER_URL" \ --listen-client-urls "$KURYR_ETCD_LISTEN_CLIENT_URL" \ --advertise-client-urls "$KURYR_ETCD_ADVERTISE_CLIENT_URL" \ --initial-cluster-token etcd-cluster-1 \ --initial-cluster "devstack=$KURYR_ETCD_ADVERTISE_PEER_URL" \ --initial-cluster-state new } # _allocation_range # Description: Writes out tab separated usable ip range for a CIDR # Params: # cidr - The cidr to get the range for # gateway_position - Whether to reserve at 'beginning' or at 'end' function _allocation_range { python - <> "${output_dir}/config_map.yml" << EOF apiVersion: v1 kind: ConfigMap metadata: name: kuryr-config namespace: kube-system data: kuryr.conf: | EOF cat $controller_conf_path | indent >> "${output_dir}/config_map.yml" # kuryr-cni config (different token_file location) # token_file = /etc/kuryr/token # ssl_ca_crt_file = /etc/kuryr/ca.crt # ssl_verify_server_crt = true cat >> "${output_dir}/config_map.yml" << EOF kuryr-cni.conf: | EOF cat $cni_conf_path | indent >> "${output_dir}/config_map.yml" } function generate_kuryr_service_account() { output_dir=$1 mkdir -p "$output_dir" rm -f ${output_dir}/service_account.yml cat >> "${output_dir}/service_account.yml" << EOF --- apiVersion: v1 kind: ServiceAccount metadata: name: kuryr-controller namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: kuryr-controller rules: - apiGroups: - "" verbs: ["*"] resources: - deployments - endpoints - ingress - pods - policies - nodes - services - services/status --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: kuryr-controller-global subjects: - kind: ServiceAccount name: kuryr-controller namespace: kube-system roleRef: kind: ClusterRole name: kuryr-controller apiGroup: rbac.authorization.k8s.io EOF } function generate_controller_deployment() { output_dir=$1 readiness_probe=${2:-False} mkdir -p "$output_dir" rm -f ${output_dir}/controller_deployment.yml cat >> "${output_dir}/controller_deployment.yml" << EOF apiVersion: apps/v1beta1 kind: Deployment metadata: labels: name: kuryr-controller name: kuryr-controller namespace: kube-system spec: replicas: 1 template: metadata: labels: name: kuryr-controller name: kuryr-controller spec: serviceAccountName: kuryr-controller automountServiceAccountToken: true hostNetwork: true containers: - image: kuryr/controller:latest imagePullPolicy: Never name: controller terminationMessagePath: "/dev/termination-log" volumeMounts: - name: config-volume mountPath: "/etc/kuryr/kuryr.conf" subPath: kuryr.conf EOF # Add readiness probe if ports pool functionality is enabled. The rationale # behind is to make the controller not ready until the precreated ports are # loaded into the pools if [ "$readiness_probe" ]; then cat >> "${output_dir}/controller_deployment.yml" << EOF readinessProbe: exec: command: - cat - /tmp/pools_loaded EOF fi cat >> "${output_dir}/controller_deployment.yml" << EOF volumes: - name: config-volume configMap: name: kuryr-config restartPolicy: Always EOF } function generate_cni_daemon_set() { output_dir=$1 cni_bin_dir=${2:-/opt/cni/bin} cni_conf_dir=${3:-/etc/cni/net.d} mkdir -p "$output_dir" rm -f ${output_dir}/cni_ds.yml cat >> "${output_dir}/cni_ds.yml" << EOF apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: kuryr-cni-ds namespace: kube-system labels: tier: node app: kuryr spec: template: metadata: labels: tier: node app: kuryr spec: hostNetwork: true tolerations: - key: node-role.kubernetes.io/master operator: Exists effect: NoSchedule serviceAccountName: kuryr-controller containers: - name: kuryr-cni image: kuryr/cni:latest imagePullPolicy: Never command: [ "cni_ds_init" ] securityContext: privileged: true volumeMounts: - name: bin mountPath: /opt/cni/bin - name: net-conf mountPath: /etc/cni/net.d - name: config-volume mountPath: /tmp/kuryr/kuryr.conf subPath: kuryr-cni.conf - name: etc mountPath: /etc volumes: - name: bin hostPath: path: ${cni_bin_dir} - name: net-conf hostPath: path: ${cni_conf_dir} - name: config-volume configMap: name: kuryr-config - name: etc hostPath: path: /etc EOF }