Merge "[k8s] Cluster creation speedup"

This commit is contained in:
Zuul 2018-12-18 02:22:10 +00:00 committed by Gerrit Code Review
commit 40980e7bdb
13 changed files with 276 additions and 408 deletions

View File

@ -1,11 +1,11 @@
#!/bin/sh
step="calico-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "calico" ]; then
exit 0
fi
if [ "$NETWORK_DRIVER" = "calico" ]; then
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
CERT_DIR=/etc/kubernetes/certs
@ -447,15 +447,6 @@ subjects:
EOF
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..."
@ -463,3 +454,6 @@ do
done
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
fi
printf "Finished running ${step}\n"

View File

@ -2,6 +2,8 @@
. /etc/sysconfig/heat-params
set -x
if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then
attempts=60

View File

@ -1,5 +1,8 @@
#!/bin/sh
step="core-dns-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
_dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/}
@ -245,15 +248,6 @@ spec:
EOF
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
@ -261,3 +255,5 @@ do
done
kubectl apply --validate=false -f $CORE_DNS
printf "Finished running ${step}\n"

View File

@ -1,15 +1,17 @@
#!/bin/bash
#!/bin/sh
step="enable-cert-api-manager"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" = "false" ]; then
exit 0
fi
if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" != "false" ]; then
cert_dir=/etc/kubernetes/certs
echo -e "$CA_KEY" > ${cert_dir}/ca.key
chown kube.kube ${cert_dir}/ca.key
chmod 400 ${cert_dir}/ca.key
fi
printf "Finished running ${step}\n"

View File

@ -1,4 +1,7 @@
#!/bin/bash
#!/bin/sh
step="enable-ingress-controller"
printf "Starting to run ${step}\n"
# Enables the specified ingress controller.
#
@ -21,3 +24,5 @@ EOF
if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then
$enable-ingress-traefik
fi
printf "Finished running ${step}\n"

View File

@ -110,15 +110,6 @@ writeFile $INGRESS_TRAEFIK_MANIFEST "$INGRESS_TRAEFIK_MANIFEST_CONTENT"
INGRESS_TRAEFIK_BIN="/srv/magnum/kubernetes/bin/ingress-traefik"
INGRESS_TRAEFIK_SERVICE="/etc/systemd/system/ingress-traefik.service"
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
# Binary for ingress traefik
INGRESS_TRAEFIK_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]

View File

@ -1,4 +1,7 @@
#!/bin/bash
#!/bin/sh
step="enable-prometheus-monitoring"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
@ -361,19 +364,8 @@ writeFile $grafanaService_file "$grafanaService_content"
. /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then
exit 0
fi
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "true" ]; then
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
@ -502,3 +494,6 @@ set -x
systemctl daemon-reload
systemctl enable kube-enable-monitoring.service
systemctl start --no-block kube-enable-monitoring.service
fi
printf "Finished running ${step}\n"

View File

@ -1,16 +1,10 @@
#!/bin/sh -x
#!/bin/sh
step="kube-apiserver-to-kubelet-role"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
@ -84,3 +78,5 @@ EOF
}
kubectl apply --validate=false -f ${ADMIN_RBAC}
printf "Finished running ${step}\n"

View File

@ -1,11 +1,11 @@
#!/bin/bash -x
#!/bin/sh
step="kube-dashboard-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then
exit 0
fi
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
@ -335,12 +335,6 @@ spec:
EOF
}
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
@ -454,16 +448,6 @@ subjects:
EOF
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
@ -472,3 +456,6 @@ done
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
kubectl apply --validate=false -f $HEAPSTER_DEPLOY
fi
printf "Finished running ${step}\n"

View File

@ -5,7 +5,6 @@ write_files:
owner: "root:root"
permissions: "0600"
content: |
MASTER_INDEX="$MASTER_INDEX"
PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING"
KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS"
KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS"
@ -42,7 +41,6 @@ write_files:
HTTP_PROXY="$HTTP_PROXY"
HTTPS_PROXY="$HTTPS_PROXY"
NO_PROXY="$NO_PROXY"
WAIT_CURL="$WAIT_CURL"
KUBE_TAG="$KUBE_TAG"
ETCD_TAG="$ETCD_TAG"
FLANNEL_TAG="$FLANNEL_TAG"

View File

@ -655,7 +655,6 @@ resources:
list_join:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
master_index: '%index%'
prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd}
api_public_address: {get_attr: [api_lb, floating_address]}
@ -670,7 +669,6 @@ resources:
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout}
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
@ -738,6 +736,41 @@ resources:
grafana_tag: {get_param: grafana_tag}
heat_container_agent_tag: {get_param: heat_container_agent_tag}
kube_cluster_config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- "\n"
-
- get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
- get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
- get_file: ../../common/templates/kubernetes/fragments/calico-service.sh
- str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
- get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh
kube_cluster_deploy:
type: OS::Heat::SoftwareDeployment
properties:
actions: ['CREATE']
signal_transport: HEAT_SIGNAL
config:
get_resource: kube_cluster_config
server:
get_attr: [kube_masters, resource.0]
######################################################################
#

View File

@ -178,11 +178,6 @@ parameters:
type: string
description: network driver to use for instantiating container networks
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
secgroup_kube_master_id:
type: string
description: ID of the security group for kubernetes master.
@ -367,12 +362,6 @@ parameters:
whether or not to use Octavia for LoadBalancer type service.
default: False
master_index:
type: string
description: >
the index of master node, index 0 means the master node is the primary,
bootstrapping node.
kube_service_account_key:
type: string
hidden: true
@ -404,17 +393,6 @@ parameters:
description: tag of the heat_container_agent system container
resources:
master_wait_handle:
type: OS::Heat::WaitConditionHandle
master_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube-master
properties:
handle: {get_resource: master_wait_handle}
timeout: {get_param: wait_condition_timeout}
######################################################################
#
# resource that exposes the IPs of either the kube master or the API
@ -443,7 +421,6 @@ resources:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml}
params:
"$MASTER_INDEX": {get_param: master_index}
"$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring}
"$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]}
"$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]}
@ -487,7 +464,6 @@ resources:
"$FLANNEL_TAG": {get_param: flannel_tag}
"$KUBE_VERSION": {get_param: kube_version}
"$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version}
"$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]}
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
"$TRUST_ID": {get_param: trust_id}
@ -585,18 +561,6 @@ resources:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh}
kube_apiserver_to_kubelet_role:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
master_wc_notify:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh}
disable_selinux:
type: OS::Heat::SoftwareConfig
properties:
@ -633,109 +597,6 @@ resources:
- config: {get_resource: write_flannel_config}
- config: {get_resource: flannel_config_service}
- config: {get_resource: flannel_service}
- config: {get_resource: kube_apiserver_to_kubelet_role}
- config: {get_resource: master_wc_notify}
enable_cert_manager_api:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
enable_cert_manager_api_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_cert_manager_api}
server: {get_resource: kube-master}
actions: ['CREATE']
core_dns_service:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh}
core_dns_service_deployment:
type: OS::Heat::SoftwareDeployment
depends_on: enable_cert_manager_api_deployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: core_dns_service}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_prometheus_monitoring:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
enable_prometheus_monitoring_deployment:
type: OS::Heat::SoftwareDeployment
depends_on: core_dns_service_deployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_prometheus_monitoring}
server: {get_resource: kube-master}
actions: ['CREATE']
calico_service:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh}
calico_service_deployment:
type: OS::Heat::SoftwareDeployment
depends_on: enable_prometheus_monitoring_deployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: calico_service}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_ingress_controller:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
enable_ingress_controller_deployment:
type: OS::Heat::SoftwareDeployment
depends_on: calico_service_deployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_ingress_controller}
server: {get_resource: kube-master}
actions: ['CREATE']
kubernetes_dashboard:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh}
kubernetes_dashboard_deployment:
type: OS::Heat::SoftwareDeployment
depends_on: enable_ingress_controller_deployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: kubernetes_dashboard}
server: {get_resource: kube-master}
actions: ['CREATE']
######################################################################
#
@ -833,6 +694,9 @@ resources:
outputs:
OS::stack_id:
value: { get_resource: kube-master }
kube_master_ip:
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
description: >

View File

@ -0,0 +1,5 @@
features:
- |
Start Kubernetes workers installation right after the master instances are
created rather than waiting for all the services inside masters, which
could decrease the Kubernetes cluster launch time significantly.