Allow qinling to connect to k8s API with certificates

By now, qinling connects to the Kubernetes API server insecurely.
kubectl proxy is used for testing purpose. However, in real production
deployments, it is not a good idea to let qinling connect to the
Kubernetes API server without any authentication and authorization.

This commit adds the support in qinling for it to connect to the
Kubernetes API server with X509 Client Certs for authentication [1].
An example file is also added for users to grant specific access to the
Kubernetes API for qinling using the RBAC authorization of
Kubernetes [2]. With these users can control qinling's access to the
Kubernetes API [3] and ensure qinling uses a secure connection to talk
with the Kubernetes API.

Devstack plugin also setups qinling to connect to Kubernetes API server
using TLS certificates by default. This makes the deployment with
devstack closer to a production-ready environment. For testing purpose,
user can set the QINLING_K8S_APISERVER_TLS variable to False in
devstack's local.conf.

Note: a HOTWO document will be added in a follow-up commit.

[1] https://kubernetes.io/docs/admin/authentication/#x509-client-certs
[2] https://kubernetes.io/docs/admin/authorization/rbac/
[3] https://kubernetes.io/docs/admin/accessing-the-api/

Change-Id: I532f131abbfc8ed90de398cc135e9b8248d2757a
This commit is contained in:
Hunt Xu 2018-03-31 00:10:01 +08:00
parent 4475e69820
commit 76d01bb325
12 changed files with 165 additions and 49 deletions

View File

@ -58,6 +58,26 @@ function mkdir_chown_stack {
}
function configure_k8s_certificates {
pushd $QINLING_DIR
mkdir_chown_stack "$QINLING_CONF_DIR"/pki/kubernetes
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /tmp/cfssl
chmod +x /tmp/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /tmp/cfssljson
chmod +x /tmp/cfssljson
sudo /tmp/cfssl gencert -ca=/etc/kubernetes/pki/ca.crt -ca-key=/etc/kubernetes/pki/ca.key -config=example/kubernetes/cfssl-ca-config.json -profile=client example/kubernetes/cfssl-client-csr.json | /tmp/cfssljson -bare client
# The command above outputs client-key.pem and client.pem
mv client-key.pem "$QINLING_CONF_DIR"/pki/kubernetes/qinling.key
mv client.pem "$QINLING_CONF_DIR"/pki/kubernetes/qinling.crt
cp /etc/kubernetes/pki/ca.crt "$QINLING_CONF_DIR"/pki/kubernetes/ca.crt
popd
}
function configure_qinling {
mkdir_chown_stack "$QINLING_AUTH_CACHE_DIR"
rm -f "$QINLING_AUTH_CACHE_DIR"/*
@ -89,6 +109,15 @@ function configure_qinling {
# Configure the database.
iniset $QINLING_CONF_FILE database connection `database_connection_url qinling`
# Configure Kubernetes API server certificates for qinling if required.
if [ "$QINLING_K8S_APISERVER_TLS" == "True" ]; then
iniset $QINLING_CONF_FILE kubernetes kube_host https://$(hostname -f):6443
configure_k8s_certificates
sudo kubectl create -f $QINLING_DIR/example/kubernetes/k8s_qinling_role.yaml
else
iniset $QINLING_CONF_FILE kubernetes use_api_certificate False
fi
}

View File

@ -24,3 +24,5 @@ QINLING_AUTH_CACHE_DIR=${QINLING_AUTH_CACHE_DIR:-/var/cache/qinling}
QINLING_FUNCTION_STORAGE_DIR=${QINLING_FUNCTION_STORAGE_DIR:-/opt/qinling/funtion/packages}
QINLING_PYTHON_RUNTIME_IMAGE=${QINLING_PYTHON_RUNTIME_IMAGE:-openstackqinling/python-runtime}
QINLING_NODEJS_RUNTIME_IMAGE=${QINLING_NODEJS_RUNTIME_IMAGE:-openstackqinling/nodejs-runtime}
QINLING_K8S_APISERVER_TLS=${QINLING_K8S_APISERVER_TLS:-True}

View File

@ -0,0 +1,17 @@
{
"signing": {
"default": {
"expiry": "168h"
},
"profiles": {
"client": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
}
}
}
}

View File

@ -0,0 +1,7 @@
{
"CN": "qinling",
"key": {
"algo": "rsa",
"size": 2048
}
}

View File

@ -0,0 +1,74 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: qinling
rules:
- apiGroups: [""]
resources: ["nodes", "namespaces"]
verbs: ["list"]
- apiGroups: [""]
resources: ["namespaces"]
resourceNames: ["qinling"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: qinling
subjects:
- kind: User
name: qinling
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: qinling
apiGroup: rbac.authorization.k8s.io
---
# The qinling namespace should be created for the role and rolebinding
apiVersion: v1
kind: Namespace
metadata:
name: qinling
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: qinling
namespace: qinling
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["list", "get", "create", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "create", "patch", "delete", "deletecollection"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["deployments"]
verbs: ["get", "create", "patch", "deletecollection"]
- apiGroups: ["extensions"]
resources: ["deployments/rollback"]
verbs: ["create"]
- apiGroups: ["extensions"]
resources: ["deployments/status"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["deletecollection"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: qinling
namespace: qinling
subjects:
- kind: User
name: qinling
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: qinling
apiGroup: rbac.authorization.k8s.io

View File

@ -146,6 +146,30 @@ kubernetes_opts = [
help='Kubernetes server address, e.g. you can start a proxy to the '
'Kubernetes API server by using "kubectl proxy" command.'
),
cfg.BoolOpt(
'use_api_certificate',
default=True,
help='Whether to use client certificates to connect to the '
'Kubernetes API server.'
),
cfg.StrOpt(
'ssl_ca_cert',
default='/etc/qinling/pki/kubernetes/ca.crt',
help='Path to the CA certificate for qinling to use to connect to '
'the Kubernetes API server.'
),
cfg.StrOpt(
'cert_file',
default='/etc/qinling/pki/kubernetes/qinling.crt',
help='Path to the client certificate for qinling to use to '
'connect to the Kubernetes API server.'
),
cfg.StrOpt(
'key_file',
default='/etc/qinling/pki/kubernetes/qinling.key',
help='Path to the client certificate key file for qinling to use to '
'connect to the Kubernetes API server.'
),
cfg.StrOpt(
'log_devel',
default='INFO',

View File

@ -22,7 +22,12 @@ from kubernetes.client import configuration as k8s_config
def get_k8s_clients(conf):
config = k8s_config.Configuration()
config.host = conf.kubernetes.kube_host
config.verify_ssl = False
if conf.kubernetes.use_api_certificate:
config.ssl_ca_cert = conf.kubernetes.ssl_ca_cert
config.cert_file = conf.kubernetes.cert_file
config.key_file = conf.kubernetes.key_file
else:
config.verify_ssl = False
client = api_client.ApiClient(configuration=config)
v1 = core_v1_api.CoreV1Api(client)
v1extention = extensions_v1beta1_api.ExtensionsV1beta1Api(client)

View File

@ -40,7 +40,4 @@ QinlingGroup = [
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the qinling service."),
cfg.StrOpt('kube_host',
default='http://127.0.0.1:8001',
help="The Kubernetes service address."),
]

View File

@ -43,19 +43,10 @@ class RuntimesTest(base.BaseQinlingTest):
)
# Wait for runtime to be available
# We don't have to check k8s resource, if runtime's status has changed
# to available, then kubernetes deployment is assumed to be ok.
self.await_runtime_available(runtime_id)
# Check k8s resource
deploy = self.k8s_v1extention.read_namespaced_deployment(
runtime_id,
namespace=self.namespace
)
self.assertEqual(runtime_id, deploy.metadata.name)
self.assertEqual(
deploy.status.replicas, deploy.status.available_replicas
)
# Delete runtime
resp = self.admin_client.delete_resource('runtimes', runtime_id)

View File

@ -21,8 +21,6 @@ from tempest.lib.common.utils import data_utils
from tempest import test
import tenacity
from qinling_tempest_plugin.tests import utils
CONF = config.CONF
@ -46,13 +44,6 @@ class BaseQinlingTest(test.BaseTestCase):
cls.alt_client = cls.os_alt.qinling.QinlingClient()
cls.admin_client = cls.os_admin.qinling.QinlingClient()
# Initilize k8s client
clients = utils.get_k8s_clients(CONF)
cls.k8s_v1 = clients['v1']
cls.k8s_v1extention = clients['v1extention']
# cls.k8s_apps_v1 = clients['apps_v1']
cls.namespace = 'qinling'
@classmethod
def resource_setup(cls):
super(BaseQinlingTest, cls).resource_setup()

View File

@ -14,30 +14,6 @@
import hashlib
from kubernetes.client import api_client
# from kubernetes.client.apis import apps_v1_api
from kubernetes.client.apis import core_v1_api
from kubernetes.client.apis import extensions_v1beta1_api
from kubernetes.client import configuration as k8s_config
def get_k8s_clients(conf):
config = k8s_config.Configuration()
config.host = conf.qinling.kube_host
config.verify_ssl = False
client = api_client.ApiClient(configuration=config)
v1 = core_v1_api.CoreV1Api(client)
v1extention = extensions_v1beta1_api.ExtensionsV1beta1Api(client)
# apps_v1 = apps_v1_api.AppsV1Api(client)
clients = {
'v1': v1,
'v1extention': v1extention
# 'apps_v1': apps_v1
}
return clients
def md5(file=None, content=None):
hash_md5 = hashlib.md5()

View File

@ -40,7 +40,10 @@ while true; do
[ $now -gt $end ] && echo "Failed to setup kubernetes cluster in time" && exit -1
done
create_k8s_screen
if [ "$QINLING_K8S_APISERVER_TLS" != "True" ]; then
# Kubernetes proxy is needed if we don't use secure connections.
create_k8s_screen
fi
#net_hosts_post_kube
#net_resolv_post_kube