Add calico-node on k8s master node

By current design, pods under kube-system will run on minion nodes. And
given now we're not running kubelet on master node, so calico-node is
not running on k8s master node. As a result, kubectl proxy is not
working to access dashboard. And it's confirmed with calico team that
the calico-node container must be running on master node if user want
to use kubectl proxy, see [1]. So, the solution is enabling kubelet
on master but disallow the other pods scheduled on master with
taint/tolerations.

Besides, this patch includes another fix about running calico on
Fedora Atomic. Because Fedora Atomic is using NetworkManager, it
manipulates the routing table for interfaces in the default network
namespace where Calico veth pairs are anchored for connections to
containers. This can interfere with the Calico agent’s ability to
route correctly. Please see more information about this at [2].

[1] https://docs.projectcalico.org/v3.0/getting-started/kubernetes/
    installation/integration#about-the-calico-components
[2] https://docs.projectcalico.org/master/usage/troubleshooting/
    #configure-networkmanager

Closes-Bug: #1751978

Change-Id: Iacd964806a28b3ca6ba3e037c60060f0957d44aa
This commit is contained in:
Feilong Wang 2018-02-27 16:56:31 +13:00
parent d9e590bdc6
commit 79c002ce7a
6 changed files with 156 additions and 36 deletions

View File

@ -107,11 +107,17 @@ spec:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
tolerations:
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.

View File

@ -5,6 +5,15 @@
echo "configuring kubernetes (master)"
_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/}
# TODO(flwang): We should revisit this part to figure out if it's possible to
# only run the calico-node container as a systemd service before starting the
# minion nodes.
if [ "$NETWORK_DRIVER" = "calico" ]; then
mkdir -p /opt/cni
_addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}'
atomic install --storage ostree --system --set=ADDTL_MOUNTS=${_addtl_mounts} --system-package=no --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG}
fi
atomic install --storage ostree --system --system-package=no --name=kube-apiserver ${_prefix}kubernetes-apiserver:${KUBE_TAG}
atomic install --storage ostree --system --system-package=no --name=kube-controller-manager ${_prefix}kubernetes-controller-manager:${KUBE_TAG}
atomic install --storage ostree --system --system-package=no --name=kube-scheduler ${_prefix}kubernetes-scheduler:${KUBE_TAG}
@ -74,7 +83,7 @@ sed -i '
sed -i '/^KUBE_SCHEDULER_ARGS=/ s/=.*/="--leader-elect=true"/' /etc/kubernetes/scheduler
HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//')
KUBELET_ARGS="--register-node=true --register-schedulable=false --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${HOSTNAME_OVERRIDE}"
KUBELET_ARGS="--register-node=true --register-schedulable=false --pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=0 --hostname-override=${HOSTNAME_OVERRIDE}"
KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}"
KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}"
@ -84,3 +93,60 @@ sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker
if [ -n "${INSECURE_REGISTRY_URL}" ]; then
echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker
fi
if [ "$NETWORK_DRIVER" = "calico" ]; then
KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --register-with-taints=CriticalAddonsOnly=True:NoSchedule,dedicated=master:NoSchedule"
KUBELET_KUBECONFIG=/etc/kubernetes/kubelet-config.yaml
HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//')
cat << EOF >> ${KUBELET_KUBECONFIG}
apiVersion: v1
clusters:
- cluster:
certificate-authority: ${CERT_DIR}/ca.crt
server: http://127.0.0.1:8080
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: system:node:${HOSTNAME_OVERRIDE}
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: system:node:${HOSTNAME_OVERRIDE}
user:
as-user-extra: {}
client-certificate: ${CERT_DIR}/server.crt
client-key: ${CERT_DIR}/server.key
EOF
cat > /etc/kubernetes/get_require_kubeconfig.sh <<EOF
#!/bin/bash
KUBE_VERSION=\$(kubelet --version | awk '{print \$2}')
min_version=v1.8.0
if [[ "\${min_version}" != \$(echo -e "\${min_version}\n\${KUBE_VERSION}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "\${KUBE_VERSION}" != "devel" ]]; then
echo "--require-kubeconfig"
fi
EOF
chmod +x /etc/kubernetes/get_require_kubeconfig.sh
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --kubeconfig ${KUBELET_KUBECONFIG}"
# specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd"
if [ -z "${KUBE_NODE_IP}" ]; then
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
KUBELET_ARGS="${KUBELET_ARGS} --address=${KUBE_NODE_IP} --port=10250 --read-only-port=0 --anonymous-auth=false --authorization-mode=Webhook --authentication-token-webhook=true"
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=${KUBE_NODE_IP}"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_ARGS=/ s|=.*|="'"\$(/etc/kubernetes/get_require_kubeconfig.sh) ${KUBELET_ARGS}"'"|
' /etc/kubernetes/kubelet
fi

View File

@ -10,6 +10,19 @@ _addtl_mounts=''
if [ "$NETWORK_DRIVER" = "calico" ]; then
mkdir -p /opt/cni
_addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}'
if [ "`systemctl status NetworkManager.service | grep -o "Active: active"`" = "Active: active" ]; then
CALICO_NM=/etc/NetworkManager/conf.d/calico.conf
[ -f ${CALICO_NM} ] || {
echo "Writing File: $CALICO_NM"
mkdir -p $(dirname ${CALICO_NM})
cat << EOF > ${CALICO_NM}
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
}
systemctl restart NetworkManager
fi
fi
atomic install --storage ostree --system --system-package=no --set=ADDTL_MOUNTS=${_addtl_mounts} --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG}

View File

@ -19,3 +19,9 @@ for service in etcd docker kube-apiserver kube-controller-manager kube-scheduler
systemctl enable $service
systemctl --no-block start $service
done
if [ "$NETWORK_DRIVER" = "calico" ]; then
echo "activating service kubelet"
systemctl enable kubelet
systemctl start kubelet
fi

View File

@ -65,14 +65,16 @@ sans="${sans},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,D
cert_dir=/etc/kubernetes/certs
mkdir -p "$cert_dir"
CA_CERT=$cert_dir/ca.crt
SERVER_CERT=$cert_dir/server.crt
SERVER_CSR=$cert_dir/server.csr
SERVER_KEY=$cert_dir/server.key
#Get a token by user credentials and trust
auth_json=$(cat << EOF
function generate_certificates {
_CERT=$cert_dir/${1}.crt
_CSR=$cert_dir/${1}.csr
_KEY=$cert_dir/${1}.key
_CONF=$2
#Get a token by user credentials and trust
auth_json=$(cat << EOF
{
"auth": {
"identity": {
@ -91,16 +93,35 @@ auth_json=$(cat << EOF
EOF
)
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
# Get CA certificate for this cluster
curl $VERIFY_CA -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
# Get CA certificate for this cluster
curl $VERIFY_CA -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
# Generate server's private key and csr
openssl genrsa -out "${_KEY}" 4096
chmod 400 "${_KEY}"
openssl req -new -days 1000 \
-key "${_KEY}" \
-out "${_CSR}" \
-reqexts req_ext \
-config "${_CONF}"
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
curl $VERIFY_CA -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${_CERT}
}
# Create config for server's csr
cat > ${cert_dir}/server.conf <<EOF
@ -115,23 +136,28 @@ subjectAltName = ${sans}
extendedKeyUsage = clientAuth,serverAuth
EOF
# Generate server's private key and csr
openssl genrsa -out "${SERVER_KEY}" 4096
chmod 400 "${SERVER_KEY}"
openssl req -new -days 1000 \
-key "${SERVER_KEY}" \
-out "${SERVER_CSR}" \
-reqexts req_ext \
-config "${cert_dir}/server.conf"
#Kubelet Certs
INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//')
cat > ${cert_dir}/kubelet.conf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = system:node:${INSTANCE_NAME}
O=system:nodes
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
subjectAltName = ${sans}
keyUsage=critical,digitalSignature,keyEncipherment
extendedKeyUsage=clientAuth,serverAuth
EOF
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${SERVER_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
curl $VERIFY_CA -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${SERVER_CERT}
generate_certificates server ${cert_dir}/server.conf
generate_certificates kubelet ${cert_dir}/kubelet.conf
# Common certs and key are created for both etcd and kubernetes services.
# Both etcd and kube user should have permission to access the certs and key.
@ -140,6 +166,6 @@ usermod -a -G kube_etcd etcd
usermod -a -G kube_etcd kube
chmod 550 "${cert_dir}"
chown -R kube:kube_etcd "${cert_dir}"
chmod 440 $SERVER_KEY
chmod 440 $cert_dir/server.key
mkdir -p /etc/etcd/certs
cp ${cert_dir}/* /etc/etcd/certs

View File

@ -527,6 +527,9 @@ resources:
- protocol: tcp
port_range_min: 6443
port_range_max: 6443
- protocol: tcp
port_range_min: 10250
port_range_max: 10250
- protocol: tcp
port_range_min: 30000
port_range_max: 32767