[k8s] Cluster creation speedup

- Start workers as soon as the master VM is created, rather than
  waiting all the services ready.
- Move all the SoftwareDeployment outside of kubemaster stack.
- Tweak the scripts in SoftwareDeployment so that they can be combined
  into a single script.

Story: 2004573
Task: 28347
Change-Id: Ie48861253615c8f60b34a2c1e9ad6b91d3ae685e
Co-Authored-By: Lingxian Kong <anlin.kong@gmail.com>
(cherry picked from commit cae7fa21b6)
This commit is contained in:
Lingxian Kong 2018-12-13 14:06:45 +13:00 committed by Bharat Kunwar
parent 013dbdbced
commit 9e973c12e9
12 changed files with 265 additions and 381 deletions

View File

@ -1,24 +1,24 @@
#!/bin/sh #!/bin/sh
step="calico-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "calico" ]; then if [ "$NETWORK_DRIVER" = "calico" ]; then
exit 0 _prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
fi ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
CERT_DIR=/etc/kubernetes/certs
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
CERT_DIR=/etc/kubernetes/certs
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml [ -f ${CALICO_DEPLOY} ] || {
echo "Writing File: $CALICO_DEPLOY"
[ -f ${CALICO_DEPLOY} ] || { mkdir -p $(dirname ${CALICO_DEPLOY})
echo "Writing File: $CALICO_DEPLOY" cat << EOF > ${CALICO_DEPLOY}
mkdir -p $(dirname ${CALICO_DEPLOY})
cat << EOF > ${CALICO_DEPLOY}
# Calico Version v2.6.7 # Calico Version v2.6.7
# https://docs.projectcalico.org/v2.6/releases#v2.6.7 # https://docs.projectcalico.org/v2.6/releases#v2.6.7
# This manifest includes the following component versions: # This manifest includes the following component versions:
@ -445,21 +445,15 @@ subjects:
name: calico-node name: calico-node
namespace: kube-system namespace: kube-system
EOF EOF
} }
# NOTE(flwang): Let's keep the same addons yaml file on all masters, until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
# but if it's not the primary/bootstrapping master, don't try to do
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
sleep 5 sleep 5
done done
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system /usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
fi
printf "Finished running ${step}\n"

View File

@ -2,6 +2,8 @@
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
set -x
if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then
attempts=60 attempts=60

View File

@ -1,5 +1,8 @@
#!/bin/sh #!/bin/sh
step="core-dns-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
_dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/} _dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/}
@ -245,19 +248,12 @@ spec:
EOF EOF
} }
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do do
sleep 5 sleep 5
done done
kubectl create --validate=false -f $CORE_DNS kubectl apply --validate=false -f $CORE_DNS
printf "Finished running ${step}\n"

View File

@ -1,4 +1,7 @@
#!/bin/bash #!/bin/sh
step="enable-ingress-controller"
printf "Starting to run ${step}\n"
# Enables the specified ingress controller. # Enables the specified ingress controller.
# #
@ -21,3 +24,5 @@ EOF
if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then
$enable-ingress-traefik $enable-ingress-traefik
fi fi
printf "Finished running ${step}\n"

View File

@ -102,15 +102,6 @@ writeFile $INGRESS_TRAEFIK_MANIFEST "$INGRESS_TRAEFIK_MANIFEST_CONTENT"
INGRESS_TRAEFIK_BIN="/srv/magnum/kubernetes/bin/ingress-traefik" INGRESS_TRAEFIK_BIN="/srv/magnum/kubernetes/bin/ingress-traefik"
INGRESS_TRAEFIK_SERVICE="/etc/systemd/system/ingress-traefik.service" INGRESS_TRAEFIK_SERVICE="/etc/systemd/system/ingress-traefik.service"
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
# Binary for ingress traefik # Binary for ingress traefik
INGRESS_TRAEFIK_BIN_CONTENT='''#!/bin/sh INGRESS_TRAEFIK_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]

View File

@ -1,4 +1,7 @@
#!/bin/bash #!/bin/sh
step="enable-prometheus-monitoring"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
@ -348,88 +351,83 @@ writeFile $grafanaService_file "$grafanaService_content"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters, if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "true" ]; then
# but if it's not the primary/bootstrapping master, don't try to PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
# create those resources to avoid race condition issue until the KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165 KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
# fixed. GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
if [ "$MASTER_INDEX" != "0" ]; then GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
exit 0
fi
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then # Write the binary for enable-monitoring
exit 0 KUBE_MON_BIN_CONTENT='''#!/bin/sh
fi until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
# Write the binary for enable-monitoring
KUBE_MON_BIN_CONTENT='''#!/bin/sh
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
sleep 5 sleep 5
done done
# Check if all resources exist already before creating them # Check if prometheus-monitoring namespace exist already before creating the namespace
# Check if configmap Prometheus exists kubectl get namespace prometheus-monitoring
kubectl get configmap prometheus -n prometheus-monitoring if [ "$?" != "0" ] && \
if [ "$?" != "0" ] && \ [ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml
fi
# Check if all resources exist already before creating them
# Check if configmap Prometheus exists
kubectl get configmap prometheus -n prometheus-monitoring
if [ "$?" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then [ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml
fi fi
# Check if deployment and service Prometheus exist # Check if deployment and service Prometheus exist
kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \ if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then [ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml
fi fi
# Check if configmap graf-dash exists # Check if configmap graf-dash exists
kubectl get configmap graf-dash -n prometheus-monitoring kubectl get configmap graf-dash -n prometheus-monitoring
if [ "$?" != "0" ] && \ if [ "$?" != "0" ] && \
[ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then [ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then
kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring
fi fi
# Check if deployment and service Grafana exist # Check if deployment and service Grafana exist
kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \ if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then [ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml
fi fi
# Wait for Grafana pod and then inject data source # Wait for Grafana pod and then inject data source
while true while true
do do
echo "Waiting for Grafana pod to be up and Running" echo "Waiting for Grafana pod to be up and Running"
if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then
break break
fi fi
sleep 2 sleep 2
done done
# Which node is running Grafana # Which node is running Grafana
NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana` NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana`
PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}` PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}`
# The Grafana pod might be running but the app might still be initiating # The Grafana pod might be running but the app might still be initiating
echo "Check if Grafana is ready..." echo "Check if Grafana is ready..."
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1 curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
until [ $? -eq 0 ] until [ $? -eq 0 ]
do do
sleep 2 sleep 2
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1 curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
done done
# Inject Prometheus datasource into Grafana # Inject Prometheus datasource into Grafana
while true while true
do do
INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \ INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \
-H "Content-Type: application/json;charset=UTF-8" \ -H "Content-Type: application/json;charset=UTF-8" \
--data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true, --data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true,
@ -441,44 +439,47 @@ do
break break
fi fi
echo "Trying to inject Prometheus datasource into Grafana - "$INJECT echo "Trying to inject Prometheus datasource into Grafana - "$INJECT
done done
''' '''
writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT" writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
# Write the monitoring service # Write the monitoring service
KUBE_MON_SERVICE_CONTENT='''[Unit] KUBE_MON_SERVICE_CONTENT='''[Unit]
Description=Enable Prometheus monitoring stack Description=Enable Prometheus monitoring stack
[Service] [Service]
Type=oneshot Type=oneshot
Environment=HOME=/root Environment=HOME=/root
EnvironmentFile=-/etc/kubernetes/config EnvironmentFile=-/etc/kubernetes/config
ExecStart='''${KUBE_MON_BIN}''' ExecStart='''${KUBE_MON_BIN}'''
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
''' '''
writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT" writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT"
chown root:root ${KUBE_MON_BIN} chown root:root ${KUBE_MON_BIN}
chmod 0755 ${KUBE_MON_BIN} chmod 0755 ${KUBE_MON_BIN}
chown root:root ${KUBE_MON_SERVICE} chown root:root ${KUBE_MON_SERVICE}
chmod 0644 ${KUBE_MON_SERVICE} chmod 0644 ${KUBE_MON_SERVICE}
# Download the default JSON Grafana dashboard # Download the default JSON Grafana dashboard
# Not a crucial step, so allow it to fail # Not a crucial step, so allow it to fail
# TODO: this JSON should be passed into the minions as gzip in cloud-init # TODO: this JSON should be passed into the minions as gzip in cloud-init
GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download" GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download"
mkdir -p $GRAFANA_DEF_DASHBOARDS mkdir -p $GRAFANA_DEF_DASHBOARDS
curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard" curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard"
if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then
sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE
fi
# Launch the monitoring service
set -x
systemctl daemon-reload
systemctl enable kube-enable-monitoring.service
systemctl start --no-block kube-enable-monitoring.service
fi fi
# Launch the monitoring service printf "Finished running ${step}\n"
set -x
systemctl daemon-reload
systemctl enable kube-enable-monitoring.service
systemctl start --no-block kube-enable-monitoring.service

View File

@ -1,16 +1,10 @@
#!/bin/sh -x #!/bin/sh
step="kube-apiserver-to-kubelet-role"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
# but if it's not the primary/bootstrapping master, don't try to
# create those resources to avoid race condition issue until the
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
# fixed.
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do do
@ -84,3 +78,5 @@ EOF
} }
kubectl apply --validate=false -f ${ADMIN_RBAC} kubectl apply --validate=false -f ${ADMIN_RBAC}
printf "Finished running ${step}\n"

View File

@ -1,17 +1,17 @@
#!/bin/bash -x #!/bin/sh
step="kube-dashboard-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
exit 0 KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
fi HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}" KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml [ -f ${KUBE_DASH_DEPLOY} ] || {
[ -f ${KUBE_DASH_DEPLOY} ] || {
echo "Writing File: $KUBE_DASH_DEPLOY" echo "Writing File: $KUBE_DASH_DEPLOY"
mkdir -p $(dirname ${KUBE_DASH_DEPLOY}) mkdir -p $(dirname ${KUBE_DASH_DEPLOY})
cat << EOF > ${KUBE_DASH_DEPLOY} cat << EOF > ${KUBE_DASH_DEPLOY}
@ -197,11 +197,11 @@ spec:
selector: selector:
k8s-app: kubernetes-dashboard k8s-app: kubernetes-dashboard
EOF EOF
} }
INFLUX_SINK="" INFLUX_SINK=""
# Deploy INFLUX AND GRAFANA # Deploy INFLUX AND GRAFANA
if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086" INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086"
INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3" INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3"
GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3" GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3"
@ -335,12 +335,6 @@ spec:
EOF EOF
} }
if [ "$MASTER_INDEX" != "0" ]; then
exit 0
fi
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do do
@ -349,12 +343,12 @@ EOF
kubectl apply --validate=false -f $INFLUX_DEPLOY kubectl apply --validate=false -f $INFLUX_DEPLOY
kubectl apply --validate=false -f $GRAFANA_DEPLOY kubectl apply --validate=false -f $GRAFANA_DEPLOY
fi fi
# Deploy Heapster # Deploy Heapster
HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml
[ -f ${HEAPSTER_DEPLOY} ] || { [ -f ${HEAPSTER_DEPLOY} ] || {
echo "Writing File: $HEAPSTER_DEPLOY" echo "Writing File: $HEAPSTER_DEPLOY"
mkdir -p $(dirname ${HEAPSTER_DEPLOY}) mkdir -p $(dirname ${HEAPSTER_DEPLOY})
cat << EOF > ${HEAPSTER_DEPLOY} cat << EOF > ${HEAPSTER_DEPLOY}
@ -452,23 +446,16 @@ subjects:
name: heapster name: heapster
namespace: kube-system namespace: kube-system
EOF EOF
} }
# NOTE(flwang): Let's keep the same addons yaml file on all masters, echo "Waiting for Kubernetes API..."
# but if it's not the primary/bootstrapping master, don't try to until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
# create those resources to avoid race condition issue until the do
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165 sleep 5
# fixed. done
if [ "$MASTER_INDEX" != "0" ]; then kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
exit 0 kubectl apply --validate=false -f $HEAPSTER_DEPLOY
fi fi
echo "Waiting for Kubernetes API..." printf "Finished running ${step}\n"
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
kubectl apply --validate=false -f $HEAPSTER_DEPLOY

View File

@ -5,7 +5,6 @@ write_files:
owner: "root:root" owner: "root:root"
permissions: "0600" permissions: "0600"
content: | content: |
MASTER_INDEX="$MASTER_INDEX"
PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING" PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING"
KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS" KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS"
KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS" KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS"
@ -42,7 +41,6 @@ write_files:
HTTP_PROXY="$HTTP_PROXY" HTTP_PROXY="$HTTP_PROXY"
HTTPS_PROXY="$HTTPS_PROXY" HTTPS_PROXY="$HTTPS_PROXY"
NO_PROXY="$NO_PROXY" NO_PROXY="$NO_PROXY"
WAIT_CURL="$WAIT_CURL"
KUBE_TAG="$KUBE_TAG" KUBE_TAG="$KUBE_TAG"
ETCD_TAG="$ETCD_TAG" ETCD_TAG="$ETCD_TAG"
FLANNEL_TAG="$FLANNEL_TAG" FLANNEL_TAG="$FLANNEL_TAG"

View File

@ -622,7 +622,6 @@ resources:
list_join: list_join:
- '-' - '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%'] - [{ get_param: 'OS::stack_name' }, 'master', '%index%']
master_index: '%index%'
prometheus_monitoring: {get_param: prometheus_monitoring} prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd} grafana_admin_passwd: {get_param: grafana_admin_passwd}
api_public_address: {get_attr: [api_lb, floating_address]} api_public_address: {get_attr: [api_lb, floating_address]}
@ -637,7 +636,6 @@ resources:
docker_volume_type: {get_param: docker_volume_type} docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver} docker_storage_driver: {get_param: docker_storage_driver}
cgroup_driver: {get_param: cgroup_driver} cgroup_driver: {get_param: cgroup_driver}
wait_condition_timeout: {get_param: wait_condition_timeout}
network_driver: {get_param: network_driver} network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_cidr: {get_param: flannel_network_cidr}
flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
@ -700,6 +698,41 @@ resources:
kubescheduler_options: {get_param: kubescheduler_options} kubescheduler_options: {get_param: kubescheduler_options}
octavia_enabled: {get_param: octavia_enabled} octavia_enabled: {get_param: octavia_enabled}
kube_cluster_config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- "\n"
-
- get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
- get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
- get_file: ../../common/templates/kubernetes/fragments/calico-service.sh
- str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
- get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh
kube_cluster_deploy:
type: OS::Heat::SoftwareDeployment
properties:
actions: ['CREATE']
signal_transport: HEAT_SIGNAL
config:
get_resource: kube_cluster_config
server:
get_attr: [kube_masters, resource.0]
###################################################################### ######################################################################
# #
# kubernetes minions. This is an resource group that will initially # kubernetes minions. This is an resource group that will initially

View File

@ -178,11 +178,6 @@ parameters:
type: string type: string
description: network driver to use for instantiating container networks description: network driver to use for instantiating container networks
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
secgroup_kube_master_id: secgroup_kube_master_id:
type: string type: string
description: ID of the security group for kubernetes master. description: ID of the security group for kubernetes master.
@ -367,28 +362,11 @@ parameters:
whether or not to use Octavia for LoadBalancer type service. whether or not to use Octavia for LoadBalancer type service.
default: False default: False
master_index:
type: string
description: >
the index of master node, index 0 means the master node is the primary,
bootstrapping node.
cloud_provider_enabled: cloud_provider_enabled:
type: boolean type: boolean
description: Enable or disable the openstack kubernetes cloud provider description: Enable or disable the openstack kubernetes cloud provider
resources: resources:
master_wait_handle:
type: OS::Heat::WaitConditionHandle
master_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube-master
properties:
handle: {get_resource: master_wait_handle}
timeout: {get_param: wait_condition_timeout}
###################################################################### ######################################################################
# #
# resource that exposes the IPs of either the kube master or the API # resource that exposes the IPs of either the kube master or the API
@ -417,7 +395,6 @@ resources:
str_replace: str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml} template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml}
params: params:
"$MASTER_INDEX": {get_param: master_index}
"$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring}
"$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]}
"$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]}
@ -461,7 +438,6 @@ resources:
"$FLANNEL_TAG": {get_param: flannel_tag} "$FLANNEL_TAG": {get_param: flannel_tag}
"$KUBE_VERSION": {get_param: kube_version} "$KUBE_VERSION": {get_param: kube_version}
"$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version}
"$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]}
"$TRUSTEE_USER_ID": {get_param: trustee_user_id} "$TRUSTEE_USER_ID": {get_param: trustee_user_id}
"$TRUSTEE_PASSWORD": {get_param: trustee_password} "$TRUSTEE_PASSWORD": {get_param: trustee_password}
"$TRUST_ID": {get_param: trust_id} "$TRUST_ID": {get_param: trust_id}
@ -548,24 +524,6 @@ resources:
group: ungrouped group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh}
kube_apiserver_to_kubelet_role:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
core_dns_service:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh}
master_wc_notify:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh}
disable_selinux: disable_selinux:
type: OS::Heat::SoftwareConfig type: OS::Heat::SoftwareConfig
properties: properties:
@ -601,91 +559,6 @@ resources:
- config: {get_resource: enable_services} - config: {get_resource: enable_services}
- config: {get_resource: write_flannel_config} - config: {get_resource: write_flannel_config}
- config: {get_resource: flannel_config_service} - config: {get_resource: flannel_config_service}
- config: {get_resource: kube_apiserver_to_kubelet_role}
- config: {get_resource: core_dns_service}
- config: {get_resource: master_wc_notify}
enable_prometheus_monitoring:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
params:
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
enable_prometheus_monitoring_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_prometheus_monitoring}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_cert_manager_api:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
params:
"$CA_KEY": {get_param: ca_key}
enable_cert_manager_api_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_cert_manager_api}
server: {get_resource: kube-master}
actions: ['CREATE']
calico_service:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh}
calico_service_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: calico_service}
server: {get_resource: kube-master}
actions: ['CREATE']
enable_ingress_controller:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
params:
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
enable_ingress_controller_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: enable_ingress_controller}
server: {get_resource: kube-master}
actions: ['CREATE']
kubernetes_dashboard:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh}
kubernetes_dashboard_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: kubernetes_dashboard}
server: {get_resource: kube-master}
actions: ['CREATE']
###################################################################### ######################################################################
# #
@ -783,6 +656,9 @@ resources:
outputs: outputs:
OS::stack_id:
value: { get_resource: kube-master }
kube_master_ip: kube_master_ip:
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
description: > description: >

View File

@ -0,0 +1,5 @@
features:
- |
Start Kubernetes workers installation right after the master instances are
created rather than waiting for all the services inside masters, which
could decrease the Kubernetes cluster launch time significantly.