Merge "[k8s] Cluster creation speedup" into stable/queens

This commit is contained in:
Zuul 2019-03-18 04:24:22 +00:00 committed by Gerrit Code Review
commit dd3201e093
12 changed files with 265 additions and 381 deletions

View File

@ -1,24 +1,24 @@
#!/bin/sh
step="calico-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "calico" ]; then
exit 0
fi
if [ "$NETWORK_DRIVER" = "calico" ]; then
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
CERT_DIR=/etc/kubernetes/certs
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
CERT_DIR=/etc/kubernetes/certs
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
[ -f ${CALICO_DEPLOY} ] || {
echo "Writing File: $CALICO_DEPLOY"
mkdir -p $(dirname ${CALICO_DEPLOY})
cat << EOF > ${CALICO_DEPLOY}
[ -f ${CALICO_DEPLOY} ] || {
echo "Writing File: $CALICO_DEPLOY"
mkdir -p $(dirname ${CALICO_DEPLOY})
cat << EOF > ${CALICO_DEPLOY}
# Calico Version v2.6.7
# https://docs.projectcalico.org/v2.6/releases#v2.6.7
# This manifest includes the following component versions:
@ -445,21 +445,15 @@ subjects:
name: calico-node
namespace: kube-system
EOF
}
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..."
sleep 5
done
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
fi
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..."
sleep 5
done
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
printf "Finished running ${step}\n"

View File

@ -2,6 +2,8 @@
. /etc/sysconfig/heat-params
set -x
if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then
attempts=60

View File

@ -1,5 +1,8 @@
#!/bin/sh
step="core-dns-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
_dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/}
@ -245,19 +248,12 @@ spec:
EOF
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
kubectl create --validate=false -f $CORE_DNS
kubectl apply --validate=false -f $CORE_DNS
printf "Finished running ${step}\n"

View File

@ -1,4 +1,7 @@
#!/bin/bash
#!/bin/sh
step="enable-ingress-controller"
printf "Starting to run ${step}\n"
# Enables the specified ingress controller.
#
@ -21,3 +24,5 @@ EOF
if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then
$enable-ingress-traefik
fi
printf "Finished running ${step}\n"

View File

@ -102,15 +102,6 @@ writeFile $INGRESS_TRAEFIK_MANIFEST "$INGRESS_TRAEFIK_MANIFEST_CONTENT"
INGRESS_TRAEFIK_BIN="/srv/magnum/kubernetes/bin/ingress-traefik"
INGRESS_TRAEFIK_SERVICE="/etc/systemd/system/ingress-traefik.service"
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
# Binary for ingress traefik
INGRESS_TRAEFIK_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]

View File

@ -1,4 +1,7 @@
#!/bin/bash
#!/bin/sh
step="enable-prometheus-monitoring"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
@ -348,137 +351,135 @@ writeFile $grafanaService_file "$grafanaService_content"
. /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "true" ]; then
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then
exit 0
fi
# Write the binary for enable-monitoring
KUBE_MON_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..."
sleep 5
done
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
# Write the binary for enable-monitoring
KUBE_MON_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..."
sleep 5
done
# Check if all resources exist already before creating them
# Check if configmap Prometheus exists
kubectl get configmap prometheus -n prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml
fi
# Check if deployment and service Prometheus exist
kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml
fi
# Check if configmap graf-dash exists
kubectl get configmap graf-dash -n prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then
kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring
fi
# Check if deployment and service Grafana exist
kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml
fi
# Wait for Grafana pod and then inject data source
while true
do
echo "Waiting for Grafana pod to be up and Running"
if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then
break
# Check if prometheus-monitoring namespace exist already before creating the namespace
kubectl get namespace prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml
fi
sleep 2
done
# Which node is running Grafana
NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana`
PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}`
# Check if all resources exist already before creating them
# Check if configmap Prometheus exists
kubectl get configmap prometheus -n prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml
fi
# The Grafana pod might be running but the app might still be initiating
echo "Check if Grafana is ready..."
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
until [ $? -eq 0 ]
do
sleep 2
# Check if deployment and service Prometheus exist
kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml
fi
# Check if configmap graf-dash exists
kubectl get configmap graf-dash -n prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then
kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring
fi
# Check if deployment and service Grafana exist
kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml
fi
# Wait for Grafana pod and then inject data source
while true
do
echo "Waiting for Grafana pod to be up and Running"
if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then
break
fi
sleep 2
done
# Which node is running Grafana
NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana`
PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}`
# The Grafana pod might be running but the app might still be initiating
echo "Check if Grafana is ready..."
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
done
until [ $? -eq 0 ]
do
sleep 2
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
done
# Inject Prometheus datasource into Grafana
while true
do
INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \
-H "Content-Type: application/json;charset=UTF-8" \
--data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true,
"type":"prometheus","url":"http://'''"'"'''$PROM_SERVICE_IP'''"'"''':9090","access":"proxy"}'''"'"'''\
"http://$NODE_IP:3000/api/datasources/"`
# Inject Prometheus datasource into Grafana
while true
do
INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \
-H "Content-Type: application/json;charset=UTF-8" \
--data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true,
"type":"prometheus","url":"http://'''"'"'''$PROM_SERVICE_IP'''"'"''':9090","access":"proxy"}'''"'"'''\
"http://$NODE_IP:3000/api/datasources/"`
if [[ "$INJECT" = *"Datasource added"* ]]; then
echo "Prometheus datasource injected into Grafana"
break
if [[ "$INJECT" = *"Datasource added"* ]]; then
echo "Prometheus datasource injected into Grafana"
break
fi
echo "Trying to inject Prometheus datasource into Grafana - "$INJECT
done
'''
writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
# Write the monitoring service
KUBE_MON_SERVICE_CONTENT='''[Unit]
Description=Enable Prometheus monitoring stack
[Service]
Type=oneshot
Environment=HOME=/root
EnvironmentFile=-/etc/kubernetes/config
ExecStart='''${KUBE_MON_BIN}'''
[Install]
WantedBy=multi-user.target
'''
writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT"
chown root:root ${KUBE_MON_BIN}
chmod 0755 ${KUBE_MON_BIN}
chown root:root ${KUBE_MON_SERVICE}
chmod 0644 ${KUBE_MON_SERVICE}
# Download the default JSON Grafana dashboard
# Not a crucial step, so allow it to fail
# TODO: this JSON should be passed into the minions as gzip in cloud-init
GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download"
mkdir -p $GRAFANA_DEF_DASHBOARDS
curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard"
if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then
sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE
fi
echo "Trying to inject Prometheus datasource into Grafana - "$INJECT
done
'''
writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
# Write the monitoring service
KUBE_MON_SERVICE_CONTENT='''[Unit]
Description=Enable Prometheus monitoring stack
[Service]
Type=oneshot
Environment=HOME=/root
EnvironmentFile=-/etc/kubernetes/config
ExecStart='''${KUBE_MON_BIN}'''
[Install]
WantedBy=multi-user.target
'''
writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT"
chown root:root ${KUBE_MON_BIN}
chmod 0755 ${KUBE_MON_BIN}
chown root:root ${KUBE_MON_SERVICE}
chmod 0644 ${KUBE_MON_SERVICE}
# Download the default JSON Grafana dashboard
# Not a crucial step, so allow it to fail
# TODO: this JSON should be passed into the minions as gzip in cloud-init
GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download"
mkdir -p $GRAFANA_DEF_DASHBOARDS
curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard"
if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then
sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE
# Launch the monitoring service
set -x
systemctl daemon-reload
systemctl enable kube-enable-monitoring.service
systemctl start --no-block kube-enable-monitoring.service
fi
# Launch the monitoring service
set -x
systemctl daemon-reload
systemctl enable kube-enable-monitoring.service
systemctl start --no-block kube-enable-monitoring.service
printf "Finished running ${step}\n"

View File

@ -1,16 +1,10 @@
#!/bin/sh -x
#!/bin/sh
step="kube-apiserver-to-kubelet-role"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
@ -84,3 +78,5 @@ EOF
}
kubectl apply --validate=false -f ${ADMIN_RBAC}
printf "Finished running ${step}\n"

View File

@ -1,20 +1,20 @@
#!/bin/bash -x
#!/bin/sh
step="kube-dashboard-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then
exit 0
fi
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml
KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml
[ -f ${KUBE_DASH_DEPLOY} ] || {
echo "Writing File: $KUBE_DASH_DEPLOY"
mkdir -p $(dirname ${KUBE_DASH_DEPLOY})
cat << EOF > ${KUBE_DASH_DEPLOY}
[ -f ${KUBE_DASH_DEPLOY} ] || {
echo "Writing File: $KUBE_DASH_DEPLOY"
mkdir -p $(dirname ${KUBE_DASH_DEPLOY})
cat << EOF > ${KUBE_DASH_DEPLOY}
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -197,22 +197,22 @@ spec:
selector:
k8s-app: kubernetes-dashboard
EOF
}
}
INFLUX_SINK=""
# Deploy INFLUX AND GRAFANA
if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086"
INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3"
GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3"
INFLUX_SINK=""
# Deploy INFLUX AND GRAFANA
if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086"
INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3"
GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3"
INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml
GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml
INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml
GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml
[ -f ${INFLUX_DEPLOY} ] || {
echo "Writing File: $INFLUX_DEPLOY"
mkdir -p $(dirname ${INFLUX_DEPLOY})
cat << EOF > ${INFLUX_DEPLOY}
[ -f ${INFLUX_DEPLOY} ] || {
echo "Writing File: $INFLUX_DEPLOY"
mkdir -p $(dirname ${INFLUX_DEPLOY})
cat << EOF > ${INFLUX_DEPLOY}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
@ -254,12 +254,12 @@ spec:
selector:
k8s-app: influxdb
EOF
}
}
[ -f ${GRAFANA_DEPLOY} ] || {
echo "Writing File: $GRAFANA_DEPLOY"
mkdir -p $(dirname ${GRAFANA_DEPLOY})
cat << EOF > ${GRAFANA_DEPLOY}
[ -f ${GRAFANA_DEPLOY} ] || {
echo "Writing File: $GRAFANA_DEPLOY"
mkdir -p $(dirname ${GRAFANA_DEPLOY})
cat << EOF > ${GRAFANA_DEPLOY}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
@ -333,31 +333,25 @@ spec:
selector:
k8s-app: grafana
EOF
}
}
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
kubectl apply --validate=false -f $INFLUX_DEPLOY
kubectl apply --validate=false -f $GRAFANA_DEPLOY
fi
# Deploy Heapster
HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
kubectl apply --validate=false -f $INFLUX_DEPLOY
kubectl apply --validate=false -f $GRAFANA_DEPLOY
fi
# Deploy Heapster
HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml
[ -f ${HEAPSTER_DEPLOY} ] || {
echo "Writing File: $HEAPSTER_DEPLOY"
mkdir -p $(dirname ${HEAPSTER_DEPLOY})
cat << EOF > ${HEAPSTER_DEPLOY}
[ -f ${HEAPSTER_DEPLOY} ] || {
echo "Writing File: $HEAPSTER_DEPLOY"
mkdir -p $(dirname ${HEAPSTER_DEPLOY})
cat << EOF > ${HEAPSTER_DEPLOY}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -452,23 +446,16 @@ subjects:
name: heapster
namespace: kube-system
EOF
}
}
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
kubectl apply --validate=false -f $HEAPSTER_DEPLOY
fi
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
kubectl apply --validate=false -f $HEAPSTER_DEPLOY
printf "Finished running ${step}\n"

View File

@ -5,7 +5,6 @@ write_files:
owner: "root:root"
permissions: "0600"
content: |
MASTER_INDEX="$MASTER_INDEX"
PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING"
KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS"
KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS"
@ -42,7 +41,6 @@ write_files:
HTTP_PROXY="$HTTP_PROXY"
HTTPS_PROXY="$HTTPS_PROXY"
NO_PROXY="$NO_PROXY"
WAIT_CURL="$WAIT_CURL"
KUBE_TAG="$KUBE_TAG"
ETCD_TAG="$ETCD_TAG"
FLANNEL_TAG="$FLANNEL_TAG"

View File

@ -622,7 +622,6 @@ resources:
list_join:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
master_index: '%index%'
prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd}
api_public_address: {get_attr: [api_lb, floating_address]}
@ -637,7 +636,6 @@ resources:
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout}
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
@ -700,6 +698,41 @@ resources:
kubescheduler_options: {get_param: kubescheduler_options}
octavia_enabled: {get_param: octavia_enabled}
kube_cluster_config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- "\n"
-
- get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
- get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
- get_file: ../../common/templates/kubernetes/fragments/calico-service.sh
- str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
- get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh
kube_cluster_deploy:
type: OS::Heat::SoftwareDeployment
properties:
actions: ['CREATE']
signal_transport: HEAT_SIGNAL
config:
get_resource: kube_cluster_config
server:
get_attr: [kube_masters, resource.0]
######################################################################
#
# kubernetes minions. This is an resource group that will initially

View File

@ -178,11 +178,6 @@ parameters:
type: string
description: network driver to use for instantiating container networks
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
secgroup_kube_master_id:
type: string
description: ID of the security group for kubernetes master.
@ -367,28 +362,11 @@ parameters:
whether or not to use Octavia for LoadBalancer type service.
default: False
master_index:
type: string
description: >
the index of master node, index 0 means the master node is the primary,
bootstrapping node.
cloud_provider_enabled:
type: boolean
description: Enable or disable the openstack kubernetes cloud provider
resources:
master_wait_handle:
type: OS::Heat::WaitConditionHandle
master_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube-master
properties:
handle: {get_resource: master_wait_handle}
timeout: {get_param: wait_condition_timeout}
######################################################################
#
# resource that exposes the IPs of either the kube master or the API
@ -417,7 +395,6 @@ resources:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml}
params:
"$MASTER_INDEX": {get_param: master_index}
"$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring}
"$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]}
"$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]}
@ -461,7 +438,6 @@ resources:
"$FLANNEL_TAG": {get_param: flannel_tag}
"$KUBE_VERSION": {get_param: kube_version}
"$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version}
"$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]}
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
"$TRUST_ID": {get_param: trust_id}
@ -548,24 +524,6 @@ resources:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh}
kube_apiserver_to_kubelet_role:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
core_dns_service:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh}
master_wc_notify:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh}
disable_selinux:
type: OS::Heat::SoftwareConfig
properties:
@ -601,91 +559,6 @@ resources:
- config: {get_resource: enable_services}
- config: {get_resource: write_flannel_config}
- config: {get_resource: flannel_config_service}
- config: {get_resource: kube_apiserver_to_kubelet_role}
- config: {get_resource: core_dns_service}
- config: {get_resource: master_wc_notify}
enable_prometheus_monitoring:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
enable_prometheus_monitoring_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_prometheus_monitoring}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_cert_manager_api:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
enable_cert_manager_api_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_cert_manager_api}
server: {get_resource: kube-master}
actions: ['CREATE']
calico_service:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh}
calico_service_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: calico_service}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_ingress_controller:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
enable_ingress_controller_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_ingress_controller}
server: {get_resource: kube-master}
actions: ['CREATE']
kubernetes_dashboard:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh}
kubernetes_dashboard_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: kubernetes_dashboard}
server: {get_resource: kube-master}
actions: ['CREATE']
######################################################################
#
@ -783,6 +656,9 @@ resources:
outputs:
OS::stack_id:
value: { get_resource: kube-master }
kube_master_ip:
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
description: >

View File

@ -0,0 +1,5 @@
features:
- |
Start Kubernetes workers installation right after the master instances are
created rather than waiting for all the services inside masters, which
could decrease the Kubernetes cluster launch time significantly.