Browse Source

Merge "Add option to specify Cgroup driver for Kubelet"

changes/00/571200/7
Zuul 3 years ago
committed by Gerrit Code Review
parent
commit
1eb1f35a75
  1. 8
      doc/source/user/index.rst
  2. 19
      magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
  3. 19
      magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
  4. 1
      magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml
  5. 1
      magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.yaml
  6. 1
      magnum/drivers/heat/k8s_fedora_template_def.py
  7. 9
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml
  8. 8
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml
  9. 8
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml
  10. 6
      magnum/tests/unit/drivers/test_template_definition.py

8
doc/source/user/index.rst

@ -356,6 +356,9 @@ the table are linked to more details elsewhere in the user guide.
| `kubeproxy_options`_ | extra kubeproxy | "" |
| | args | |
+---------------------------------------+--------------------+---------------+
| `cgroup_driver`_ | - systemd | "systemd" |
| | - cgroupfs | |
+---------------------------------------+--------------------+---------------+
Cluster
-------
@ -1142,6 +1145,11 @@ _`influx_grafana_dashboard_enabled`
label is set, an influxdb and grafana instance will be deployed,
heapster will push data to influx and grafana will project them.
_`cgroup_driver`
This label tells kubelet which Cgroup driver to use. Ideally this
should be identical to the Cgroup driver that Docker has been
started with.
External load balancer for services
-----------------------------------

19
magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/sh -x
. /etc/sysconfig/heat-params
@ -140,7 +140,22 @@ EOF
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --kubeconfig ${KUBELET_KUBECONFIG}"
# specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd"
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}"
systemctl disable docker
if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then
cp /usr/lib/systemd/system/docker.service /etc/systemd/system/
sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \
/etc/systemd/system/docker.service
else
cat > /etc/systemd/system/docker.service.d/cgroupdriver.conf << EOF
ExecStart=---exec-opt native.cgroupdriver=$CGROUP_DRIVER
EOF
fi
systemctl daemon-reload
systemctl enable docker
if [ -z "${KUBE_NODE_IP}" ]; then
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)

19
magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh

@ -136,8 +136,25 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker
fi
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key"
# specified cgroup driver
KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --cgroup-driver=systemd"
KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=${CGROUP_DRIVER}"
systemctl disable docker
if cat /usr/lib/systemd/system/docker.service | grep 'native.cgroupdriver'; then
cp /usr/lib/systemd/system/docker.service /etc/systemd/system/
sed -i "s/\(native.cgroupdriver=\)\w\+/\1$CGROUP_DRIVER/" \
/etc/systemd/system/docker.service
else
cat > /etc/systemd/system/docker.service.d/cgroupdriver.conf << EOF
ExecStart=---exec-opt native.cgroupdriver=$CGROUP_DRIVER
EOF
fi
systemctl daemon-reload
systemctl enable docker
cat > /etc/kubernetes/get_require_kubeconfig.sh <<EOF
#!/bin/bash

1
magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.yaml

@ -19,6 +19,7 @@ write_files:
DOCKER_VOLUME="$DOCKER_VOLUME"
DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE"
DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER"
CGROUP_DRIVER="$CGROUP_DRIVER"
NETWORK_DRIVER="$NETWORK_DRIVER"
FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR"
FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN"

1
magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.yaml

@ -16,6 +16,7 @@ write_files:
DOCKER_VOLUME="$DOCKER_VOLUME"
DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE"
DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER"
CGROUP_DRIVER="$CGROUP_DRIVER"
NETWORK_DRIVER="$NETWORK_DRIVER"
REGISTRY_ENABLED="$REGISTRY_ENABLED"
REGISTRY_PORT="$REGISTRY_PORT"

1
magnum/drivers/heat/k8s_fedora_template_def.py

@ -93,6 +93,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
label_list = ['kube_tag', 'container_infra_prefix',
'availability_zone',
'cgroup_driver',
'calico_tag', 'calico_cni_tag',
'calico_kube_controllers_tag', 'calico_ipv4pool',
'etcd_tag', 'flannel_tag']

9
magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml

@ -154,6 +154,13 @@ parameters:
description: docker storage driver name
default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
wait_condition_timeout:
type: number
description: >
@ -627,6 +634,7 @@ resources:
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout}
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
@ -723,6 +731,7 @@ resources:
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout}
registry_enabled: {get_param: registry_enabled}
registry_port: {get_param: registry_port}

8
magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml

@ -61,6 +61,13 @@ parameters:
description: docker storage driver name
default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
volume_driver:
type: string
description: volume driver to use for container storage
@ -415,6 +422,7 @@ resources:
"$DOCKER_VOLUME": {get_resource: docker_volume}
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
"$CGROUP_DRIVER": {get_param: cgroup_driver}
"$NETWORK_DRIVER": {get_param: network_driver}
"$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr}
"$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen}

8
magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml

@ -51,6 +51,13 @@ parameters:
description: docker storage driver name
default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "systemd"
tls_disabled:
type: boolean
description: whether or not to enable TLS
@ -301,6 +308,7 @@ resources:
$DOCKER_VOLUME: {get_resource: docker_volume}
$DOCKER_VOLUME_SIZE: {get_param: docker_volume_size}
$DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver}
$CGROUP_DRIVER: {get_param: cgroup_driver}
$NETWORK_DRIVER: {get_param: network_driver}
$REGISTRY_ENABLED: {get_param: registry_enabled}
$REGISTRY_PORT: {get_param: registry_port}

6
magnum/tests/unit/drivers/test_template_definition.py

@ -331,6 +331,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get(
'ingress_controller')
ingress_controller_role = mock_cluster.labels.get(
@ -386,6 +388,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
'calico_cni_tag': calico_cni_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role,
@ -472,6 +475,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
pods_network_cidr = flannel_cidr
elif mock_cluster_template.network_driver == 'calico':
pods_network_cidr = calico_ipv4pool
cgroup_driver = mock_cluster.labels.get(
'cgroup_driver')
ingress_controller = mock_cluster.labels.get(
'ingress_controller')
ingress_controller_role = mock_cluster.labels.get(
@ -529,6 +534,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
'calico_cni_tag': calico_cni_tag,
'calico_kube_controllers_tag': calico_kube_controllers_tag,
'calico_ipv4pool': calico_ipv4pool,
'cgroup_driver': cgroup_driver,
'pods_network_cidr': pods_network_cidr,
'ingress_controller': ingress_controller,
'ingress_controller_role': ingress_controller_role,

Loading…
Cancel
Save