k8s: Fix kubelet, add RBAC and pass e2e tests

Due to a few several small connected patches for the
fedora atomic driver, this patch includes 4 smaller patches.

Patch 1:
k8s: Do not start kubelet and kube-proxy on master

Patch [1], misses the removal of kubelet and kube-proxy from
enable-services-master.sh and therefore they are started if they
exist in the image or the script will fail.

https://review.openstack.org/#/c/533593/
Closes-Bug: #1726482

Patch 2:
k8s: Set require-kubeconfig when needed

From kubernetes 1.8 [1] --require-kubeconfig is deprecated and
in kubernetes 1.9 it is removed.

Add --require-kubeconfig only for k8s <= 1.8.

[1] https://github.com/kubernetes/kubernetes/issues/36745

Closes-Bug: #1718926

https://review.openstack.org/#/c/534309/

Patch 3:
k8s_fedora: Add RBAC configuration

* Make certificates and kubeconfigs compatible
  with NodeAuthorizer [1].
* Add CoreDNS roles and rolebindings.
* Create the system:kube-apiserver-to-kubelet ClusterRole.
* Bind the system:kube-apiserver-to-kubelet ClusterRole to
  the kubernetes user.
* remove creation of kube-system namespaces, it is created
  by default
* update client cert generation in the conductor with
  kubernetes' requirements
* Add --insecure-bind-address=127.0.0.1 to work on
  multi-master too. The controller manager on each
  node needs to contact the apiserver (on the same node)
  on 127.0.0.1:8080

[1] https://kubernetes.io/docs/admin/authorization/node/

Closes-Bug: #1742420
Depends-On: If43c3d0a0d83c42ff1fceffe4bcc333b31dbdaab
https://review.openstack.org/#/c/527103/

Patch 4:
k8s_fedora: Update coredns config to pass e2e

To pass the e2e conformance tests, coredns needs to
be configured with POD-MODE verified. Otherwise, pods
won't be resolvable [1].

[1] https://github.com/coredns/coredns/tree/master/plugin/kubernetes

https://review.openstack.org/#/c/528566/
Closes-Bug: #1738633

Change-Id: Ibd5245ca0f5a11e1d67a2514cebb2ffe8aa5e7de
This commit is contained in:
Spyros Trigazis 2018-01-15 11:16:02 +01:00 committed by Spyros Trigazis
parent 4c4d7db96c
commit 2329cb7fb4
19 changed files with 310 additions and 170 deletions

View File

@ -397,7 +397,12 @@ that specifies a few values.::
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = Your Name
CN = admin
O = system:masters
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
extendedKeyUsage = clientAuth
END

View File

@ -1749,6 +1749,25 @@ Signed Certificate
extendedKeyUsage = clientAuth
END
For RBAC enabled kubernetes clusters you need to use the name admin and
system:masters as Organization (O=)::
$ cat > client.conf << END
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = admin
O = system:masters
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
extendedKeyUsage = clientAuth
END
Once you have client.conf, you can run the openssl 'req' command to
generate the CSR::

View File

@ -46,20 +46,23 @@ def generate_ca_certificate(subject_name, encryption_password=None):
)
def generate_client_certificate(issuer_name, subject_name, ca_key,
def generate_client_certificate(issuer_name, subject_name,
organization_name, ca_key,
encryption_password=None,
ca_key_password=None):
"""Generate Client Certificate
:param issuer_name: issuer name
:param subject_name: subject name of client
:param organization_name: Organization name of client
:param ca_key: private key of CA
:param encryption_password: encryption passsword for private key
:param ca_key_password: private key password for given ca key
:returns: generated private key and certificate pair
"""
return _generate_certificate(issuer_name, subject_name,
_build_client_extentions(), ca_key=ca_key,
_build_client_extentions(),
organization_name, ca_key=ca_key,
encryption_password=encryption_password,
ca_key_password=ca_key_password)
@ -97,11 +100,14 @@ def _generate_self_signed_certificate(subject_name, extensions,
encryption_password=encryption_password)
def _generate_certificate(issuer_name, subject_name, extensions, ca_key=None,
def _generate_certificate(issuer_name, subject_name, extensions,
organization_name=None, ca_key=None,
encryption_password=None, ca_key_password=None):
if not isinstance(subject_name, six.text_type):
subject_name = six.text_type(subject_name.decode('utf-8'))
if organization_name and not isinstance(organization_name, six.text_type):
organization_name = six.text_type(organization_name.decode('utf-8'))
private_key = rsa.generate_private_key(
public_exponent=65537,
@ -111,9 +117,11 @@ def _generate_certificate(issuer_name, subject_name, extensions, ca_key=None,
# subject name is set as common name
csr = x509.CertificateSigningRequestBuilder()
csr = csr.subject_name(x509.Name([
x509.NameAttribute(x509.OID_COMMON_NAME, subject_name),
]))
name_attributes = [x509.NameAttribute(x509.OID_COMMON_NAME, subject_name)]
if organization_name:
name_attributes.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME,
organization_name))
csr = csr.subject_name(x509.Name(name_attributes))
for extention in extensions:
csr = csr.add_extension(extention.value, critical=extention.critical)

View File

@ -56,9 +56,18 @@ def _generate_client_cert(issuer_name, ca_cert, ca_password, context=None):
:returns: Magnum client cert uuid
"""
client_password = short_id.generate_id()
# TODO(strigazi): set subject name and organization per driver
# For RBAC kubernetes cluster we need the client to have:
# subject_name: admin
# organization_name system:masters
# Non kubernetes drivers are not using the certificates fields
# for authorization
subject_name = 'admin'
organization_name = 'system:masters'
client_cert = x509.generate_client_certificate(
issuer_name,
CONDUCTOR_CLIENT_NAME,
subject_name,
organization_name,
ca_cert['private_key'],
encryption_password=client_password,
ca_key_password=ca_password,

View File

@ -23,15 +23,15 @@ if [ "$TLS_DISABLED" == "True" ]; then
else
KUBE_API_ADDRESS="--bind-address=0.0.0.0 --secure-port=$KUBE_API_PORT"
# insecure port is used internaly
KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-port=8080"
KUBE_API_ARGS="$KUBE_API_ARGS --tls-cert-file=$CERT_DIR/server.crt"
KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-bind-address=127.0.0.1 --insecure-port=8080"
KUBE_API_ARGS="$KUBE_API_ARGS --authorization-mode=Node,RBAC --tls-cert-file=$CERT_DIR/server.crt"
KUBE_API_ARGS="$KUBE_API_ARGS --tls-private-key-file=$CERT_DIR/server.key"
KUBE_API_ARGS="$KUBE_API_ARGS --client-ca-file=$CERT_DIR/ca.crt"
fi
KUBE_ADMISSION_CONTROL=""
if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL_LIST}"
KUBE_ADMISSION_CONTROL="--admission-control=NodeRestriction,${ADMISSION_CONTROL_LIST}"
fi
if [ -n "$TRUST_ID" ]; then

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/sh -x
. /etc/sysconfig/heat-params
@ -11,13 +11,14 @@ atomic install --storage ostree --system --system-package=no --name=kube-proxy $
CERT_DIR=/etc/kubernetes/certs
PROTOCOL=https
FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \
-etcd-certfile $CERT_DIR/client.crt \
-etcd-keyfile $CERT_DIR/client.key"
-etcd-certfile $CERT_DIR/proxy.crt \
-etcd-keyfile $CERT_DIR/proxy.key"
ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \
--cert $CERT_DIR/client.crt --key $CERT_DIR/client.key"
--cert $CERT_DIR/proxy.crt --key $CERT_DIR/proxy.key"
ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP}
KUBE_PROTOCOL="https"
KUBECONFIG=/etc/kubernetes/kubeconfig.yaml
KUBELET_KUBECONFIG=/etc/kubernetes/kubelet-config.yaml
PROXY_KUBECONFIG=/etc/kubernetes/proxy-config.yaml
FLANNELD_CONFIG=/etc/sysconfig/flanneld
if [ "$TLS_DISABLED" = "True" ]; then
@ -35,35 +36,61 @@ EOF
KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT"
cat << EOF >> ${KUBECONFIG}
HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//')
cat << EOF >> ${KUBELET_KUBECONFIG}
apiVersion: v1
kind: Config
users:
- name: kubeclient
user:
client-certificate: ${CERT_DIR}/client.crt
client-key: ${CERT_DIR}/client.key
clusters:
- name: kubernetes
cluster:
server: ${KUBE_MASTER_URI}
- cluster:
certificate-authority: ${CERT_DIR}/ca.crt
server: ${KUBE_MASTER_URI}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubeclient
name: service-account-context
current-context: service-account-context
user: system:node:${HOSTNAME_OVERRIDE}
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: system:node:${HOSTNAME_OVERRIDE}
user:
as-user-extra: {}
client-certificate: ${CERT_DIR}/kubelet.crt
client-key: ${CERT_DIR}/kubelet.key
EOF
cat << EOF >> ${PROXY_KUBECONFIG}
apiVersion: v1
clusters:
- cluster:
certificate-authority: ${CERT_DIR}/ca.crt
server: ${KUBE_MASTER_URI}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
as-user-extra: {}
client-certificate: ${CERT_DIR}/proxy.crt
client-key: ${CERT_DIR}/proxy.key
EOF
if [ "$TLS_DISABLED" = "True" ]; then
sed -i 's/^.*user:$//' ${KUBECONFIG}
sed -i 's/^.*client-certificate.*$//' ${KUBECONFIG}
sed -i 's/^.*client-key.*$//' ${KUBECONFIG}
sed -i 's/^.*certificate-authority.*$//' ${KUBECONFIG}
sed -i 's/^.*user:$//' ${KUBELET_KUBECONFIG}
sed -i 's/^.*client-certificate.*$//' ${KUBELET_KUBECONFIG}
sed -i 's/^.*client-key.*$//' ${KUBELET_KUBECONFIG}
sed -i 's/^.*certificate-authority.*$//' ${KUBELET_KUBECONFIG}
fi
chmod 0644 ${KUBECONFIG}
chmod 0644 ${KUBELET_KUBECONFIG}
chmod 0644 ${PROXY_KUBECONFIG}
sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
@ -77,8 +104,8 @@ sed -i '
# The hostname of the node is set to be the Nova name of the instance, and
# the option --hostname-override for kubelet uses the hostname to register the node.
# Using any other name will break the load balancer and cinder volume features.
HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//')
KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=4194 --kubeconfig ${KUBECONFIG} --hostname-override=${HOSTNAME_OVERRIDE}"
mkdir -p /etc/kubernetes/manifests
KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=4194 --kubeconfig ${KUBELET_KUBECONFIG} --hostname-override=${HOSTNAME_OVERRIDE}"
KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}"
if [ -n "$TRUST_ID" ]; then
@ -99,17 +126,28 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
fi
# specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd"
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --cgroup-driver=systemd"
cat > /etc/kubernetes/get_require_kubeconfig.sh <<EOF
#!/bin/bash
KUBE_VERSION=\$(kubelet --version | awk '{print \$2}')
min_version=v1.8.0
if [[ "\${min_version}" != \$(echo -e "\${min_version}\n\${KUBE_VERSION}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "\${KUBE_VERSION}" != "devel" ]]; then
echo "--require-kubeconfig"
fi
EOF
chmod +x /etc/kubernetes/get_require_kubeconfig.sh
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
s/^KUBELET_API_SERVER=.*$//
/^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"|
/^KUBELET_ARGS=/ s|=.*|="'"\$(/etc/kubernetes/get_require_kubeconfig.sh) ${KUBELET_ARGS}"'"|
' /etc/kubernetes/kubelet
sed -i '
/^KUBE_PROXY_ARGS=/ s|=.*|=--kubeconfig='"$KUBECONFIG"'|
/^KUBE_PROXY_ARGS=/ s|=.*|=--kubeconfig='"$PROXY_KUBECONFIG"'|
' /etc/kubernetes/proxy
if [ "$NETWORK_DRIVER" = "flannel" ]; then

View File

@ -9,6 +9,47 @@ CORE_DNS=/etc/kubernetes/manifests/kube-coredns.yaml
mkdir -p $(dirname ${CORE_DNS})
cat << EOF > ${CORE_DNS}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
@ -19,7 +60,9 @@ data:
errors
log stdout
health
kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR}
kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} {
pods verified
}
proxy . /etc/resolv.conf
cache 30
}
@ -31,7 +74,6 @@ metadata:
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
@ -42,13 +84,16 @@ spec:
metadata:
labels:
k8s-app: coredns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: ${_prefix}coredns:011
image: ${_prefix}coredns:1.0.1
imagePullPolicy: Always
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
@ -61,6 +106,9 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -99,6 +147,9 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
}

View File

@ -398,7 +398,7 @@ writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
# Write the monitoring service
KUBE_MON_SERVICE_CONTENT='''[Unit]
Requires=kubelet.service
Description=Enable Prometheus monitoring stack
[Service]
Type=oneshot

View File

@ -4,7 +4,7 @@
systemctl daemon-reload
echo "starting services"
for service in etcd docker kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy; do
for service in etcd docker kube-apiserver kube-controller-manager kube-scheduler; do
echo "activating service $service"
systemctl enable $service
systemctl --no-block start $service

View File

@ -0,0 +1,47 @@
#!/bin/sh -x
. /etc/sysconfig/heat-params
echo "Waiting for Kubernetes API..."
until curl --silent "http://127.0.0.1:8080/version"
do
sleep 5
done
cat <<EOF | kubectl apply --validate=false -f -
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
EOF
cat <<EOF | kubectl apply --validate=false -f -
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF

View File

@ -120,9 +120,7 @@ KUBE_DASH_SERVICE=/etc/systemd/system/kube-dash.service
mkdir -p $(dirname ${KUBE_DASH_SERVICE})
cat << EOF > ${KUBE_DASH_SERVICE}
[Unit]
After=kube-system-namespace.service
Requires=kubelet.service
Wants=kube-system-namespace.service
Description=Enable kubernetes dashboard
[Service]
Type=oneshot

View File

@ -1,71 +0,0 @@
#!/bin/sh
# this service required because docker will start only after cloud init was finished
# due service dependencies at Fedora Atomic (docker <- docker-storage-setup <- cloud-final)
. /etc/sysconfig/heat-params
KUBE_SYSTEM_JSON=/srv/kubernetes/kube-system-namespace.json
[ -f ${KUBE_SYSTEM_JSON} ] || {
echo "Writing File: $KUBE_SYSTEM_JSON"
mkdir -p $(dirname ${KUBE_SYSTEM_JSON})
cat << EOF > ${KUBE_SYSTEM_JSON}
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "kube-system"
}
}
EOF
}
KUBE_SYSTEM_BIN=/usr/local/bin/kube-system-namespace
[ -f ${KUBE_SYSTEM_BIN} ] || {
echo "Writing File: $KUBE_SYSTEM_BIN"
mkdir -p $(dirname ${KUBE_SYSTEM_BIN})
cat << EOF > ${KUBE_SYSTEM_BIN}
#!/bin/sh
until curl -sf "http://127.0.0.1:8080/healthz"
do
echo "Waiting for Kubernetes API..."
sleep 5
done
#check for existence of namespace
/usr/bin/kubectl get namespace kube-system
if [ "\$?" != "0" ]; then
/usr/bin/kubectl create -f /srv/kubernetes/kube-system-namespace.json
fi
EOF
}
KUBE_SYSTEM_SERVICE=/etc/systemd/system/kube-system-namespace.service
[ -f ${KUBE_SYSTEM_SERVICE} ] || {
echo "Writing File: $KUBE_SYSTEM_SERVICE"
mkdir -p $(dirname ${KUBE_SYSTEM_SERVICE})
cat << EOF > ${KUBE_SYSTEM_SERVICE}
[Unit]
After=kubelet.service
Requires=kubelet.service
[Service]
Type=oneshot
Environment=HOME=/root
EnvironmentFile=-/etc/kubernetes/config
ExecStart=${KUBE_SYSTEM_BIN}
[Install]
WantedBy=multi-user.target
EOF
}
chown root:root ${KUBE_SYSTEM_BIN}
chmod 0755 ${KUBE_SYSTEM_BIN}
chown root:root ${KUBE_SYSTEM_SERVICE}
chmod 0644 ${KUBE_SYSTEM_SERVICE}
systemctl enable kube-system-namespace
systemctl start --no-block kube-system-namespace

View File

@ -35,12 +35,14 @@ cert_dir=/etc/kubernetes/certs
mkdir -p "$cert_dir"
CA_CERT=$cert_dir/ca.crt
CLIENT_CERT=$cert_dir/client.crt
CLIENT_CSR=$cert_dir/client.csr
CLIENT_KEY=$cert_dir/client.key
#Get a token by user credentials and trust
auth_json=$(cat << EOF
function generate_certificates {
_CERT=$cert_dir/${1}.crt
_CSR=$cert_dir/${1}.csr
_KEY=$cert_dir/${1}.key
_CONF=$2
#Get a token by user credentials and trust
auth_json=$(cat << EOF
{
"auth": {
"identity": {
@ -59,52 +61,76 @@ auth_json=$(cat << EOF
EOF
)
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
# Get CA certificate for this cluster
curl $VERIFY_CA -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
# Get CA certificate for this cluster
curl $VERIFY_CA -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
# Create config for client's csr
cat > ${cert_dir}/client.conf <<EOF
# Generate client's private key and csr
openssl genrsa -out "${_KEY}" 4096
chmod 400 "${_KEY}"
openssl req -new -days 1000 \
-key "${_KEY}" \
-out "${_CSR}" \
-reqexts req_ext \
-config "${_CONF}"
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
curl $VERIFY_CA -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${_CERT}
}
#Kubelet Certs
INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//')
cat > ${cert_dir}/kubelet.conf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = kubernetes.default.svc
CN = system:node:${INSTANCE_NAME}
O=system:nodes
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
keyUsage=critical,digitalSignature,keyEncipherment
extendedKeyUsage=clientAuth
subjectAltName=dirName:kubelet,dirName:kubeproxy
[kubelet]
CN=kubelet
[kubeproxy]
CN=kube-proxy
EOF
# Generate client's private key and csr
openssl genrsa -out "${CLIENT_KEY}" 4096
chmod 400 "${CLIENT_KEY}"
openssl req -new -days 1000 \
-key "${CLIENT_KEY}" \
-out "${CLIENT_CSR}" \
-reqexts req_ext \
-config "${cert_dir}/client.conf"
#kube-proxy Certs
cat > ${cert_dir}/proxy.conf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = system:kube-proxy
O=system:node-proxier
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
keyUsage=critical,digitalSignature,keyEncipherment
extendedKeyUsage=clientAuth
EOF
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${CLIENT_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
curl $VERIFY_CA -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "OpenStack-API-Version: container-infra latest" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CLIENT_CERT}
generate_certificates kubelet ${cert_dir}/kubelet.conf
generate_certificates proxy ${cert_dir}/proxy.conf
# Common certs and key are created for both etcd and kubernetes services.
# Both etcd and kube user should have permission to access the certs and key.
@ -113,4 +139,5 @@ usermod -a -G kube_etcd etcd
usermod -a -G kube_etcd kube
chmod 550 "${cert_dir}"
chown -R kube:kube_etcd "${cert_dir}"
chmod 440 $CLIENT_KEY
chmod 440 ${cert_dir}/kubelet.key
chmod 440 ${cert_dir}/proxy.key

View File

@ -109,7 +109,7 @@ distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = kubernetes.default.svc
CN = kubernetes
[req_ext]
subjectAltName = ${sans}
extendedKeyUsage = clientAuth,serverAuth

View File

@ -428,11 +428,11 @@ resources:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh}
kube_system_namespace_service:
kube_apiserver_to_kubelet_role:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh}
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
kube_ui_service:
type: OS::Heat::SoftwareConfig
@ -487,7 +487,7 @@ resources:
- config: {get_resource: write_network_config}
- config: {get_resource: network_config_service}
- config: {get_resource: network_service}
- config: {get_resource: kube_system_namespace_service}
- config: {get_resource: kube_apiserver_to_kubelet_role}
- config: {get_resource: core_dns_service}
- config: {get_resource: kube_ui_service}
- config: {get_resource: start_container_agent}

View File

@ -391,11 +391,11 @@ resources:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh}
kube_system_namespace_service:
kube_apiserver_to_kubelet_role:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh}
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
kube_ui_service:
type: OS::Heat::SoftwareConfig
@ -450,7 +450,7 @@ resources:
- config: {get_resource: write_network_config}
- config: {get_resource: network_config_service}
- config: {get_resource: network_service}
- config: {get_resource: kube_system_namespace_service}
- config: {get_resource: kube_apiserver_to_kubelet_role}
- config: {get_resource: enable_kube_controller_manager_scheduler}
- config: {get_resource: enable_kube_proxy}
- config: {get_resource: kube_ui_service}

View File

@ -272,7 +272,12 @@ distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = Your Name
CN = admin
O = system:masters
OU=OpenStack/Magnum
C=US
ST=TX
L=Austin
[req_ext]
extendedKeyUsage = clientAuth
"""

View File

@ -32,6 +32,7 @@ class TestX509(base.BaseTestCase):
super(TestX509, self).setUp()
self.issuer_name = six.u("fake-issuer")
self.subject_name = six.u("fake-subject")
self.organization_name = six.u("fake-organization")
self.ca_encryption_password = six.b("fake-ca-password")
self.encryption_password = six.b("fake-password")
@ -59,6 +60,7 @@ class TestX509(base.BaseTestCase):
keypairs = operations.generate_client_certificate(
self.issuer_name,
self.subject_name,
self.organization_name,
ca['private_key'],
encryption_password=self.encryption_password,
ca_key_password=self.ca_encryption_password,

View File

@ -63,7 +63,8 @@ class CertManagerTestCase(base.BaseTestCase):
@mock.patch('magnum.common.x509.operations.generate_client_certificate')
@mock.patch('magnum.common.short_id.generate_id')
def test_generate_client_cert(self, mock_generate_id, mock_generate_cert):
expected_name = cert_manager.CONDUCTOR_CLIENT_NAME
expected_name = 'admin'
expected_organization_name = 'system:masters'
expected_ca_name = 'ca-name'
expected_password = 'password'
expected_ca_password = 'ca-password'
@ -88,6 +89,7 @@ class CertManagerTestCase(base.BaseTestCase):
mock_generate_cert.assert_called_once_with(
expected_ca_name,
expected_name,
expected_organization_name,
expected_ca_cert['private_key'],
encryption_password=expected_password,
ca_key_password=expected_ca_password,
@ -96,7 +98,7 @@ class CertManagerTestCase(base.BaseTestCase):
certificate=expected_cert['certificate'],
private_key=expected_cert['private_key'],
private_key_passphrase=expected_password,
name=expected_name,
name=cert_manager.CONDUCTOR_CLIENT_NAME,
context=None
)