Merge "Add option to specify Cgroup driver for Kubelet"

This commit is contained in:
Zuul 2018-06-28 07:49:39 +00:00 committed by Gerrit Code Review
commit 1eb1f35a75
10 changed files with 77 additions and 3 deletions

View File

@ -356,6 +356,9 @@ the table are linked to more details elsewhere in the user guide.
| `kubeproxy_options`_ | extra kubeproxy | "" | | `kubeproxy_options`_ | extra kubeproxy | "" |
| | args | | | | args | |
+---------------------------------------+--------------------+---------------+ +---------------------------------------+--------------------+---------------+
| `cgroup_driver`_ | - systemd | "systemd" |
| | - cgroupfs | |
+---------------------------------------+--------------------+---------------+
Cluster Cluster
------- -------
@ -1142,6 +1145,11 @@ _`influx_grafana_dashboard_enabled`
label is set, an influxdb and grafana instance will be deployed, label is set, an influxdb and grafana instance will be deployed,
heapster will push data to influx and grafana will project them. heapster will push data to influx and grafana will project them.
_`cgroup_driver`
This label tells kubelet which Cgroup driver to use. Ideally this
should be identical to the Cgroup driver that Docker has been
started with.
External load balancer for services External load balancer for services
----------------------------------- -----------------------------------

View File

@ -1,4 +1,4 @@
#!/bin/sh #!/bin/sh -x
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
@ -140,7 +140,22 @@ EOF
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --kubeconfig ${KUBELET_KUBECONFIG}" KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --kubeconfig ${KUBELET_KUBECONFIG}"
# specified cgroup driver # specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd" KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}"
systemctl disable docker
if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then
cp /usr/lib/systemd/system/docker.service /etc/systemd/system/
sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \
/etc/systemd/system/docker.service
else
cat > /etc/systemd/system/docker.service.d/cgroupdriver.conf << EOF
ExecStart=---exec-opt native.cgroupdriver=$CGROUP_DRIVER
EOF
fi
systemctl daemon-reload
systemctl enable docker
if [ -z "${KUBE_NODE_IP}" ]; then if [ -z "${KUBE_NODE_IP}" ]; then
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)

View File

@ -136,8 +136,25 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker
fi fi
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key"
# specified cgroup driver # specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --cgroup-driver=systemd" KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}"
systemctl disable docker
if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then
cp /usr/lib/systemd/system/docker.service /etc/systemd/system/
sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \
/etc/systemd/system/docker.service
else
cat > /etc/systemd/system/docker.service.d/cgroupdriver.conf << EOF
ExecStart=---exec-opt native.cgroupdriver=$CGROUP_DRIVER
EOF
fi
systemctl daemon-reload
systemctl enable docker
cat > /etc/kubernetes/get_require_kubeconfig.sh <<EOF cat > /etc/kubernetes/get_require_kubeconfig.sh <<EOF
#!/bin/bash #!/bin/bash

View File

@ -19,6 +19,7 @@ write_files:
DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME="$DOCKER_VOLUME"
DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE"
DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER"
CGROUP_DRIVER="$CGROUP_DRIVER"
NETWORK_DRIVER="$NETWORK_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER"
FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR"
FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN" FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN"

View File

@ -16,6 +16,7 @@ write_files:
DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_VOLUME="$DOCKER_VOLUME"
DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE"
DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER"
CGROUP_DRIVER="$CGROUP_DRIVER"
NETWORK_DRIVER="$NETWORK_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER"
REGISTRY_ENABLED="$REGISTRY_ENABLED" REGISTRY_ENABLED="$REGISTRY_ENABLED"
REGISTRY_PORT="$REGISTRY_PORT" REGISTRY_PORT="$REGISTRY_PORT"

View File

@ -93,6 +93,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
label_list = ['kube_tag', 'container_infra_prefix', label_list = ['kube_tag', 'container_infra_prefix',
'availability_zone', 'availability_zone',
'cgroup_driver',
'calico_tag', 'calico_cni_tag', 'calico_tag', 'calico_cni_tag',
'calico_kube_controllers_tag', 'calico_ipv4pool', 'calico_kube_controllers_tag', 'calico_ipv4pool',
'etcd_tag', 'flannel_tag'] 'etcd_tag', 'flannel_tag']

View File

@ -154,6 +154,13 @@ parameters:
description: docker storage driver name description: docker storage driver name
default: "devicemapper" default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
wait_condition_timeout: wait_condition_timeout:
type: number type: number
description: > description: >
@ -627,6 +634,7 @@ resources:
docker_volume_size: {get_param: docker_volume_size} docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type} docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver} docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout} wait_condition_timeout: {get_param: wait_condition_timeout}
network_driver: {get_param: network_driver} network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_cidr: {get_param: flannel_network_cidr}
@ -723,6 +731,7 @@ resources:
docker_volume_size: {get_param: docker_volume_size} docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type} docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver} docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout} wait_condition_timeout: {get_param: wait_condition_timeout}
registry_enabled: {get_param: registry_enabled} registry_enabled: {get_param: registry_enabled}
registry_port: {get_param: registry_port} registry_port: {get_param: registry_port}

View File

@ -61,6 +61,13 @@ parameters:
description: docker storage driver name description: docker storage driver name
default: "devicemapper" default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
volume_driver: volume_driver:
type: string type: string
description: volume driver to use for container storage description: volume driver to use for container storage
@ -415,6 +422,7 @@ resources:
"$DOCKER_VOLUME": {get_resource: docker_volume} "$DOCKER_VOLUME": {get_resource: docker_volume}
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
"$CGROUP_DRIVER": {get_param: cgroup_driver}
"$NETWORK_DRIVER": {get_param: network_driver} "$NETWORK_DRIVER": {get_param: network_driver}
"$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr}
"$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen}

View File

@ -51,6 +51,13 @@ parameters:
description: docker storage driver name description: docker storage driver name
default: "devicemapper" default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
tls_disabled: tls_disabled:
type: boolean type: boolean
description: whether or not to enable TLS description: whether or not to enable TLS
@ -301,6 +308,7 @@ resources:
$DOCKER_VOLUME: {get_resource: docker_volume} $DOCKER_VOLUME: {get_resource: docker_volume}
$DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size}
$DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver}
$CGROUP_DRIVER: {get_param: cgroup_driver}
$NETWORK_DRIVER: {get_param: network_driver} $NETWORK_DRIVER: {get_param: network_driver}
$REGISTRY_ENABLED: {get_param: registry_enabled} $REGISTRY_ENABLED: {get_param: registry_enabled}
$REGISTRY_PORT: {get_param: registry_port} $REGISTRY_PORT: {get_param: registry_port}

View File

@ -331,6 +331,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
pods_network_cidr = flannel_cidr pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico': elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get( ingress_controller = mock_cluster.labels.get(
'ingress_controller') 'ingress_controller')
ingress_controller_role = mock_cluster.labels.get( ingress_controller_role = mock_cluster.labels.get(
@ -386,6 +388,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
'calico_cni_tag': calico_cni_tag, 'calico_cni_tag': calico_cni_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag, 'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool, 'calico_ipv4pool': calico_ipv4pool,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr, 'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller, 'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role, 'ingress_controller_role': ingress_controller_role,
@ -472,6 +475,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
pods_network_cidr = flannel_cidr pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico': elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get( ingress_controller = mock_cluster.labels.get(
'ingress_controller') 'ingress_controller')
ingress_controller_role = mock_cluster.labels.get( ingress_controller_role = mock_cluster.labels.get(
@ -529,6 +534,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
'calico_cni_tag': calico_cni_tag, 'calico_cni_tag': calico_cni_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag, 'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool, 'calico_ipv4pool': calico_ipv4pool,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr, 'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller, 'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role, 'ingress_controller_role': ingress_controller_role,