[k8s] Cluster creation speedup
- Start workers as soon as the master VM is created, rather than waiting all the services ready. - Move all the SoftwareDeployment outside of kubemaster stack. - Tweak the scripts in SoftwareDeployment so that they can be combined into a single script. Story: 2004573 Task: 28347 Change-Id: Ie48861253615c8f60b34a2c1e9ad6b91d3ae685e Co-Authored-By: Lingxian Kong <anlin.kong@gmail.com>
This commit is contained in:
parent
844e4db2a9
commit
cae7fa21b6
@ -1,24 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
step="calico-service"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
if [ "$NETWORK_DRIVER" != "calico" ]; then
|
||||
exit 0
|
||||
fi
|
||||
if [ "$NETWORK_DRIVER" = "calico" ]; then
|
||||
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
|
||||
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
|
||||
CERT_DIR=/etc/kubernetes/certs
|
||||
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
|
||||
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
|
||||
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
|
||||
|
||||
_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/}
|
||||
ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP}
|
||||
CERT_DIR=/etc/kubernetes/certs
|
||||
ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'`
|
||||
ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'`
|
||||
ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'`
|
||||
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
|
||||
|
||||
CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml
|
||||
|
||||
[ -f ${CALICO_DEPLOY} ] || {
|
||||
echo "Writing File: $CALICO_DEPLOY"
|
||||
mkdir -p $(dirname ${CALICO_DEPLOY})
|
||||
cat << EOF > ${CALICO_DEPLOY}
|
||||
[ -f ${CALICO_DEPLOY} ] || {
|
||||
echo "Writing File: $CALICO_DEPLOY"
|
||||
mkdir -p $(dirname ${CALICO_DEPLOY})
|
||||
cat << EOF > ${CALICO_DEPLOY}
|
||||
# Calico Version v2.6.7
|
||||
# https://docs.projectcalico.org/v2.6/releases#v2.6.7
|
||||
# This manifest includes the following component versions:
|
||||
@ -445,21 +445,15 @@ subjects:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
echo "Waiting for Kubernetes API..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
|
||||
fi
|
||||
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
echo "Waiting for Kubernetes API..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
set -x
|
||||
|
||||
if [ -n "$ETCD_VOLUME_SIZE" ] && [ "$ETCD_VOLUME_SIZE" -gt 0 ]; then
|
||||
|
||||
attempts=60
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
step="core-dns-service"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
_dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/}
|
||||
@ -245,15 +248,6 @@ spec:
|
||||
EOF
|
||||
}
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
@ -261,3 +255,5 @@ do
|
||||
done
|
||||
|
||||
kubectl apply --validate=false -f $CORE_DNS
|
||||
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -1,15 +1,17 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
step="enable-cert-api-manager"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" = "false" ]; then
|
||||
exit 0
|
||||
if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" != "false" ]; then
|
||||
cert_dir=/etc/kubernetes/certs
|
||||
|
||||
echo -e "$CA_KEY" > ${cert_dir}/ca.key
|
||||
|
||||
chown kube.kube ${cert_dir}/ca.key
|
||||
chmod 400 ${cert_dir}/ca.key
|
||||
fi
|
||||
|
||||
cert_dir=/etc/kubernetes/certs
|
||||
|
||||
echo -e "$CA_KEY" > ${cert_dir}/ca.key
|
||||
|
||||
chown kube.kube ${cert_dir}/ca.key
|
||||
chmod 400 ${cert_dir}/ca.key
|
||||
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
step="enable-ingress-controller"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
# Enables the specified ingress controller.
|
||||
#
|
||||
@ -21,3 +24,5 @@ EOF
|
||||
if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then
|
||||
$enable-ingress-traefik
|
||||
fi
|
||||
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -110,15 +110,6 @@ writeFile $INGRESS_TRAEFIK_MANIFEST "$INGRESS_TRAEFIK_MANIFEST_CONTENT"
|
||||
INGRESS_TRAEFIK_BIN="/srv/magnum/kubernetes/bin/ingress-traefik"
|
||||
INGRESS_TRAEFIK_SERVICE="/etc/systemd/system/ingress-traefik.service"
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Binary for ingress traefik
|
||||
INGRESS_TRAEFIK_BIN_CONTENT='''#!/bin/sh
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
|
@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
step="enable-prometheus-monitoring"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
@ -361,144 +364,136 @@ writeFile $grafanaService_file "$grafanaService_content"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "false" ]; then
|
||||
exit 0
|
||||
fi
|
||||
if [ "$(echo $PROMETHEUS_MONITORING | tr '[:upper:]' '[:lower:]')" = "true" ]; then
|
||||
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
|
||||
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
|
||||
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
|
||||
GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
|
||||
GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
|
||||
|
||||
PROMETHEUS_MON_BASE_DIR="/srv/magnum/kubernetes/monitoring"
|
||||
KUBE_MON_BIN=${PROMETHEUS_MON_BASE_DIR}"/bin/kube-enable-monitoring"
|
||||
KUBE_MON_SERVICE="/etc/systemd/system/kube-enable-monitoring.service"
|
||||
GRAFANA_DEF_DASHBOARDS=${PROMETHEUS_MON_BASE_DIR}"/dashboards"
|
||||
GRAFANA_DEF_DASHBOARD_FILE=$GRAFANA_DEF_DASHBOARDS"/default.json"
|
||||
# Write the binary for enable-monitoring
|
||||
KUBE_MON_BIN_CONTENT='''#!/bin/sh
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
echo "Waiting for Kubernetes API..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Write the binary for enable-monitoring
|
||||
KUBE_MON_BIN_CONTENT='''#!/bin/sh
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
echo "Waiting for Kubernetes API..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Check if prometheus-monitoring namespace exist already before creating the namespace
|
||||
kubectl get namespace prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml
|
||||
fi
|
||||
|
||||
# Check if all resources exist already before creating them
|
||||
# Check if configmap Prometheus exists
|
||||
kubectl get configmap prometheus -n prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml
|
||||
fi
|
||||
|
||||
# Check if deployment and service Prometheus exist
|
||||
kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring
|
||||
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml
|
||||
fi
|
||||
|
||||
# Check if configmap graf-dash exists
|
||||
kubectl get configmap graf-dash -n prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then
|
||||
kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring
|
||||
fi
|
||||
|
||||
# Check if deployment and service Grafana exist
|
||||
kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring
|
||||
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml
|
||||
fi
|
||||
|
||||
# Wait for Grafana pod and then inject data source
|
||||
while true
|
||||
do
|
||||
echo "Waiting for Grafana pod to be up and Running"
|
||||
if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then
|
||||
break
|
||||
# Check if prometheus-monitoring namespace exist already before creating the namespace
|
||||
kubectl get namespace prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusNamespace.yaml
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Which node is running Grafana
|
||||
NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana`
|
||||
PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}`
|
||||
# Check if all resources exist already before creating them
|
||||
# Check if configmap Prometheus exists
|
||||
kubectl get configmap prometheus -n prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusConfigMap.yaml
|
||||
fi
|
||||
|
||||
# The Grafana pod might be running but the app might still be initiating
|
||||
echo "Check if Grafana is ready..."
|
||||
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
|
||||
until [ $? -eq 0 ]
|
||||
do
|
||||
sleep 2
|
||||
# Check if deployment and service Prometheus exist
|
||||
kubectl get service prometheus -n prometheus-monitoring | kubectl get deployment prometheus -n prometheus-monitoring
|
||||
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/prometheusService.yaml
|
||||
fi
|
||||
|
||||
# Check if configmap graf-dash exists
|
||||
kubectl get configmap graf-dash -n prometheus-monitoring
|
||||
if [ "$?" != "0" ] && \
|
||||
[ -f '''$GRAFANA_DEF_DASHBOARD_FILE''' ]; then
|
||||
kubectl create configmap graf-dash --from-file='''$GRAFANA_DEF_DASHBOARD_FILE''' -n prometheus-monitoring
|
||||
fi
|
||||
|
||||
# Check if deployment and service Grafana exist
|
||||
kubectl get service grafana -n prometheus-monitoring | kubectl get deployment grafana -n prometheus-monitoring
|
||||
if [ "${PIPESTATUS[0]}" != "0" ] && [ "${PIPESTATUS[1]}" != "0" ] && \
|
||||
[ -f "'''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml" ]; then
|
||||
kubectl create -f '''${PROMETHEUS_MON_BASE_DIR}'''/grafanaService.yaml
|
||||
fi
|
||||
|
||||
# Wait for Grafana pod and then inject data source
|
||||
while true
|
||||
do
|
||||
echo "Waiting for Grafana pod to be up and Running"
|
||||
if [ "$(kubectl get po -n prometheus-monitoring -l name=grafana -o jsonpath={..phase})" = "Running" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Which node is running Grafana
|
||||
NODE_IP=`kubectl get po -n prometheus-monitoring -o jsonpath={.items[0].status.hostIP} -l name=grafana`
|
||||
PROM_SERVICE_IP=`kubectl get svc prometheus --namespace prometheus-monitoring -o jsonpath={..clusterIP}`
|
||||
|
||||
# The Grafana pod might be running but the app might still be initiating
|
||||
echo "Check if Grafana is ready..."
|
||||
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
|
||||
done
|
||||
until [ $? -eq 0 ]
|
||||
do
|
||||
sleep 2
|
||||
curl --user admin:$ADMIN_PASSWD -X GET http://$NODE_IP:3000/api/datasources/1
|
||||
done
|
||||
|
||||
# Inject Prometheus datasource into Grafana
|
||||
while true
|
||||
do
|
||||
INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \
|
||||
-H "Content-Type: application/json;charset=UTF-8" \
|
||||
--data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true,
|
||||
"type":"prometheus","url":"http://'''"'"'''$PROM_SERVICE_IP'''"'"''':9090","access":"proxy"}'''"'"'''\
|
||||
"http://$NODE_IP:3000/api/datasources/"`
|
||||
# Inject Prometheus datasource into Grafana
|
||||
while true
|
||||
do
|
||||
INJECT=`curl --user admin:$ADMIN_PASSWD -X POST \
|
||||
-H "Content-Type: application/json;charset=UTF-8" \
|
||||
--data-binary '''"'"'''{"name":"k8sPrometheus","isDefault":true,
|
||||
"type":"prometheus","url":"http://'''"'"'''$PROM_SERVICE_IP'''"'"''':9090","access":"proxy"}'''"'"'''\
|
||||
"http://$NODE_IP:3000/api/datasources/"`
|
||||
|
||||
if [[ "$INJECT" = *"Datasource added"* ]]; then
|
||||
echo "Prometheus datasource injected into Grafana"
|
||||
break
|
||||
if [[ "$INJECT" = *"Datasource added"* ]]; then
|
||||
echo "Prometheus datasource injected into Grafana"
|
||||
break
|
||||
fi
|
||||
echo "Trying to inject Prometheus datasource into Grafana - "$INJECT
|
||||
done
|
||||
'''
|
||||
writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
|
||||
|
||||
|
||||
# Write the monitoring service
|
||||
KUBE_MON_SERVICE_CONTENT='''[Unit]
|
||||
Description=Enable Prometheus monitoring stack
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=HOME=/root
|
||||
EnvironmentFile=-/etc/kubernetes/config
|
||||
ExecStart='''${KUBE_MON_BIN}'''
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
'''
|
||||
writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT"
|
||||
|
||||
chown root:root ${KUBE_MON_BIN}
|
||||
chmod 0755 ${KUBE_MON_BIN}
|
||||
|
||||
chown root:root ${KUBE_MON_SERVICE}
|
||||
chmod 0644 ${KUBE_MON_SERVICE}
|
||||
|
||||
# Download the default JSON Grafana dashboard
|
||||
# Not a crucial step, so allow it to fail
|
||||
# TODO: this JSON should be passed into the minions as gzip in cloud-init
|
||||
GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download"
|
||||
mkdir -p $GRAFANA_DEF_DASHBOARDS
|
||||
curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard"
|
||||
if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then
|
||||
sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE
|
||||
fi
|
||||
echo "Trying to inject Prometheus datasource into Grafana - "$INJECT
|
||||
done
|
||||
'''
|
||||
writeFile $KUBE_MON_BIN "$KUBE_MON_BIN_CONTENT"
|
||||
|
||||
|
||||
# Write the monitoring service
|
||||
KUBE_MON_SERVICE_CONTENT='''[Unit]
|
||||
Description=Enable Prometheus monitoring stack
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=HOME=/root
|
||||
EnvironmentFile=-/etc/kubernetes/config
|
||||
ExecStart='''${KUBE_MON_BIN}'''
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
'''
|
||||
writeFile $KUBE_MON_SERVICE "$KUBE_MON_SERVICE_CONTENT"
|
||||
|
||||
chown root:root ${KUBE_MON_BIN}
|
||||
chmod 0755 ${KUBE_MON_BIN}
|
||||
|
||||
chown root:root ${KUBE_MON_SERVICE}
|
||||
chmod 0644 ${KUBE_MON_SERVICE}
|
||||
|
||||
# Download the default JSON Grafana dashboard
|
||||
# Not a crucial step, so allow it to fail
|
||||
# TODO: this JSON should be passed into the minions as gzip in cloud-init
|
||||
GRAFANA_DASHB_URL="https://grafana.net/api/dashboards/1621/revisions/1/download"
|
||||
mkdir -p $GRAFANA_DEF_DASHBOARDS
|
||||
curl $GRAFANA_DASHB_URL -o $GRAFANA_DEF_DASHBOARD_FILE || echo "Failed to fetch default Grafana dashboard"
|
||||
if [ -f $GRAFANA_DEF_DASHBOARD_FILE ]; then
|
||||
sed -i -- 's|${DS_PROMETHEUS}|k8sPrometheus|g' $GRAFANA_DEF_DASHBOARD_FILE
|
||||
# Launch the monitoring service
|
||||
set -x
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-enable-monitoring.service
|
||||
systemctl start --no-block kube-enable-monitoring.service
|
||||
fi
|
||||
|
||||
# Launch the monitoring service
|
||||
set -x
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-enable-monitoring.service
|
||||
systemctl start --no-block kube-enable-monitoring.service
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -1,16 +1,10 @@
|
||||
#!/bin/sh -x
|
||||
#!/bin/sh
|
||||
|
||||
step="kube-apiserver-to-kubelet-role"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
@ -84,3 +78,5 @@ EOF
|
||||
}
|
||||
|
||||
kubectl apply --validate=false -f ${ADMIN_RBAC}
|
||||
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -1,20 +1,20 @@
|
||||
#!/bin/bash -x
|
||||
#!/bin/sh
|
||||
|
||||
step="kube-dashboard-service"
|
||||
printf "Starting to run ${step}\n"
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "false" ]; then
|
||||
exit 0
|
||||
fi
|
||||
if [ "$(echo $KUBE_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
|
||||
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
|
||||
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
|
||||
|
||||
KUBE_DASH_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}kubernetes-dashboard-amd64:${KUBE_DASHBOARD_VERSION}"
|
||||
HEAPSTER_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-amd64:v1.4.2"
|
||||
KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml
|
||||
|
||||
KUBE_DASH_DEPLOY=/srv/magnum/kubernetes/kubernetes-dashboard.yaml
|
||||
|
||||
[ -f ${KUBE_DASH_DEPLOY} ] || {
|
||||
echo "Writing File: $KUBE_DASH_DEPLOY"
|
||||
mkdir -p $(dirname ${KUBE_DASH_DEPLOY})
|
||||
cat << EOF > ${KUBE_DASH_DEPLOY}
|
||||
[ -f ${KUBE_DASH_DEPLOY} ] || {
|
||||
echo "Writing File: $KUBE_DASH_DEPLOY"
|
||||
mkdir -p $(dirname ${KUBE_DASH_DEPLOY})
|
||||
cat << EOF > ${KUBE_DASH_DEPLOY}
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -197,22 +197,22 @@ spec:
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
INFLUX_SINK=""
|
||||
# Deploy INFLUX AND GRAFANA
|
||||
if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
|
||||
INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086"
|
||||
INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3"
|
||||
GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3"
|
||||
INFLUX_SINK=""
|
||||
# Deploy INFLUX AND GRAFANA
|
||||
if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then
|
||||
INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086"
|
||||
INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3"
|
||||
GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3"
|
||||
|
||||
INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml
|
||||
GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml
|
||||
INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml
|
||||
GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml
|
||||
|
||||
[ -f ${INFLUX_DEPLOY} ] || {
|
||||
echo "Writing File: $INFLUX_DEPLOY"
|
||||
mkdir -p $(dirname ${INFLUX_DEPLOY})
|
||||
cat << EOF > ${INFLUX_DEPLOY}
|
||||
[ -f ${INFLUX_DEPLOY} ] || {
|
||||
echo "Writing File: $INFLUX_DEPLOY"
|
||||
mkdir -p $(dirname ${INFLUX_DEPLOY})
|
||||
cat << EOF > ${INFLUX_DEPLOY}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@ -254,12 +254,12 @@ spec:
|
||||
selector:
|
||||
k8s-app: influxdb
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
[ -f ${GRAFANA_DEPLOY} ] || {
|
||||
echo "Writing File: $GRAFANA_DEPLOY"
|
||||
mkdir -p $(dirname ${GRAFANA_DEPLOY})
|
||||
cat << EOF > ${GRAFANA_DEPLOY}
|
||||
[ -f ${GRAFANA_DEPLOY} ] || {
|
||||
echo "Writing File: $GRAFANA_DEPLOY"
|
||||
mkdir -p $(dirname ${GRAFANA_DEPLOY})
|
||||
cat << EOF > ${GRAFANA_DEPLOY}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@ -333,31 +333,25 @@ spec:
|
||||
selector:
|
||||
k8s-app: grafana
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
|
||||
kubectl apply --validate=false -f $INFLUX_DEPLOY
|
||||
kubectl apply --validate=false -f $GRAFANA_DEPLOY
|
||||
fi
|
||||
|
||||
# Deploy Heapster
|
||||
HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml
|
||||
|
||||
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
|
||||
kubectl apply --validate=false -f $INFLUX_DEPLOY
|
||||
kubectl apply --validate=false -f $GRAFANA_DEPLOY
|
||||
fi
|
||||
|
||||
# Deploy Heapster
|
||||
HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml
|
||||
|
||||
[ -f ${HEAPSTER_DEPLOY} ] || {
|
||||
echo "Writing File: $HEAPSTER_DEPLOY"
|
||||
mkdir -p $(dirname ${HEAPSTER_DEPLOY})
|
||||
cat << EOF > ${HEAPSTER_DEPLOY}
|
||||
[ -f ${HEAPSTER_DEPLOY} ] || {
|
||||
echo "Writing File: $HEAPSTER_DEPLOY"
|
||||
mkdir -p $(dirname ${HEAPSTER_DEPLOY})
|
||||
cat << EOF > ${HEAPSTER_DEPLOY}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@ -452,23 +446,16 @@ subjects:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
# NOTE(flwang): Let's keep the same addons yaml file on all masters,
|
||||
# but if it's not the primary/bootstrapping master, don't try to
|
||||
# create those resources to avoid race condition issue until the
|
||||
# kubectl issue https://github.com/kubernetes/kubernetes/issues/44165
|
||||
# fixed.
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [ "$MASTER_INDEX" != "0" ]; then
|
||||
exit 0
|
||||
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
|
||||
kubectl apply --validate=false -f $HEAPSTER_DEPLOY
|
||||
fi
|
||||
|
||||
echo "Waiting for Kubernetes API..."
|
||||
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
|
||||
kubectl apply --validate=false -f $KUBE_DASH_DEPLOY
|
||||
kubectl apply --validate=false -f $HEAPSTER_DEPLOY
|
||||
printf "Finished running ${step}\n"
|
||||
|
@ -5,7 +5,6 @@ write_files:
|
||||
owner: "root:root"
|
||||
permissions: "0600"
|
||||
content: |
|
||||
MASTER_INDEX="$MASTER_INDEX"
|
||||
PROMETHEUS_MONITORING="$PROMETHEUS_MONITORING"
|
||||
KUBE_API_PUBLIC_ADDRESS="$KUBE_API_PUBLIC_ADDRESS"
|
||||
KUBE_API_PRIVATE_ADDRESS="$KUBE_API_PRIVATE_ADDRESS"
|
||||
@ -42,7 +41,6 @@ write_files:
|
||||
HTTP_PROXY="$HTTP_PROXY"
|
||||
HTTPS_PROXY="$HTTPS_PROXY"
|
||||
NO_PROXY="$NO_PROXY"
|
||||
WAIT_CURL="$WAIT_CURL"
|
||||
KUBE_TAG="$KUBE_TAG"
|
||||
ETCD_TAG="$ETCD_TAG"
|
||||
FLANNEL_TAG="$FLANNEL_TAG"
|
||||
|
@ -655,7 +655,6 @@ resources:
|
||||
list_join:
|
||||
- '-'
|
||||
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
|
||||
master_index: '%index%'
|
||||
prometheus_monitoring: {get_param: prometheus_monitoring}
|
||||
grafana_admin_passwd: {get_param: grafana_admin_passwd}
|
||||
api_public_address: {get_attr: [api_lb, floating_address]}
|
||||
@ -670,7 +669,6 @@ resources:
|
||||
docker_volume_type: {get_param: docker_volume_type}
|
||||
docker_storage_driver: {get_param: docker_storage_driver}
|
||||
cgroup_driver: {get_param: cgroup_driver}
|
||||
wait_condition_timeout: {get_param: wait_condition_timeout}
|
||||
network_driver: {get_param: network_driver}
|
||||
flannel_network_cidr: {get_param: flannel_network_cidr}
|
||||
flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
|
||||
@ -738,6 +736,41 @@ resources:
|
||||
grafana_tag: {get_param: grafana_tag}
|
||||
heat_container_agent_tag: {get_param: heat_container_agent_tag}
|
||||
|
||||
kube_cluster_config:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config:
|
||||
list_join:
|
||||
- "\n"
|
||||
-
|
||||
- get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh
|
||||
- str_replace:
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
|
||||
params:
|
||||
"$CA_KEY": {get_param: ca_key}
|
||||
- get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh
|
||||
- str_replace:
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
|
||||
params:
|
||||
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
|
||||
- get_file: ../../common/templates/kubernetes/fragments/calico-service.sh
|
||||
- str_replace:
|
||||
params:
|
||||
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
|
||||
- get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh
|
||||
|
||||
kube_cluster_deploy:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
properties:
|
||||
actions: ['CREATE']
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config:
|
||||
get_resource: kube_cluster_config
|
||||
server:
|
||||
get_attr: [kube_masters, resource.0]
|
||||
|
||||
|
||||
######################################################################
|
||||
#
|
||||
|
@ -178,11 +178,6 @@ parameters:
|
||||
type: string
|
||||
description: network driver to use for instantiating container networks
|
||||
|
||||
wait_condition_timeout:
|
||||
type: number
|
||||
description : >
|
||||
timeout for the Wait Conditions
|
||||
|
||||
secgroup_kube_master_id:
|
||||
type: string
|
||||
description: ID of the security group for kubernetes master.
|
||||
@ -367,12 +362,6 @@ parameters:
|
||||
whether or not to use Octavia for LoadBalancer type service.
|
||||
default: False
|
||||
|
||||
master_index:
|
||||
type: string
|
||||
description: >
|
||||
the index of master node, index 0 means the master node is the primary,
|
||||
bootstrapping node.
|
||||
|
||||
kube_service_account_key:
|
||||
type: string
|
||||
hidden: true
|
||||
@ -404,17 +393,6 @@ parameters:
|
||||
description: tag of the heat_container_agent system container
|
||||
|
||||
resources:
|
||||
|
||||
master_wait_handle:
|
||||
type: OS::Heat::WaitConditionHandle
|
||||
|
||||
master_wait_condition:
|
||||
type: OS::Heat::WaitCondition
|
||||
depends_on: kube-master
|
||||
properties:
|
||||
handle: {get_resource: master_wait_handle}
|
||||
timeout: {get_param: wait_condition_timeout}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# resource that exposes the IPs of either the kube master or the API
|
||||
@ -443,7 +421,6 @@ resources:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.yaml}
|
||||
params:
|
||||
"$MASTER_INDEX": {get_param: master_index}
|
||||
"$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring}
|
||||
"$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]}
|
||||
"$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]}
|
||||
@ -487,7 +464,6 @@ resources:
|
||||
"$FLANNEL_TAG": {get_param: flannel_tag}
|
||||
"$KUBE_VERSION": {get_param: kube_version}
|
||||
"$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version}
|
||||
"$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]}
|
||||
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
|
||||
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
|
||||
"$TRUST_ID": {get_param: trust_id}
|
||||
@ -585,18 +561,6 @@ resources:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh}
|
||||
|
||||
kube_apiserver_to_kubelet_role:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh}
|
||||
|
||||
master_wc_notify:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/wc-notify-master.sh}
|
||||
|
||||
disable_selinux:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
@ -633,109 +597,6 @@ resources:
|
||||
- config: {get_resource: write_flannel_config}
|
||||
- config: {get_resource: flannel_config_service}
|
||||
- config: {get_resource: flannel_service}
|
||||
- config: {get_resource: kube_apiserver_to_kubelet_role}
|
||||
- config: {get_resource: master_wc_notify}
|
||||
|
||||
enable_cert_manager_api:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh}
|
||||
params:
|
||||
"$CA_KEY": {get_param: ca_key}
|
||||
|
||||
enable_cert_manager_api_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: enable_cert_manager_api}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
core_dns_service:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh}
|
||||
|
||||
core_dns_service_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
depends_on: enable_cert_manager_api_deployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: core_dns_service}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
enable_prometheus_monitoring:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh}
|
||||
params:
|
||||
"$ADMIN_PASSWD": {get_param: grafana_admin_passwd}
|
||||
|
||||
enable_prometheus_monitoring_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
depends_on: core_dns_service_deployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: enable_prometheus_monitoring}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
calico_service:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh}
|
||||
|
||||
calico_service_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
depends_on: enable_prometheus_monitoring_deployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: calico_service}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
enable_ingress_controller:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config:
|
||||
str_replace:
|
||||
params:
|
||||
$enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh}
|
||||
template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh}
|
||||
|
||||
enable_ingress_controller_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
depends_on: calico_service_deployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: enable_ingress_controller}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
kubernetes_dashboard:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: script
|
||||
config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh}
|
||||
|
||||
kubernetes_dashboard_deployment:
|
||||
type: OS::Heat::SoftwareDeployment
|
||||
depends_on: enable_ingress_controller_deployment
|
||||
properties:
|
||||
signal_transport: HEAT_SIGNAL
|
||||
config: {get_resource: kubernetes_dashboard}
|
||||
server: {get_resource: kube-master}
|
||||
actions: ['CREATE']
|
||||
|
||||
######################################################################
|
||||
#
|
||||
@ -833,6 +694,9 @@ resources:
|
||||
|
||||
outputs:
|
||||
|
||||
OS::stack_id:
|
||||
value: { get_resource: kube-master }
|
||||
|
||||
kube_master_ip:
|
||||
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
|
||||
description: >
|
||||
|
@ -0,0 +1,5 @@
|
||||
features:
|
||||
- |
|
||||
Start Kubernetes workers installation right after the master instances are
|
||||
created rather than waiting for all the services inside masters, which
|
||||
could decrease the Kubernetes cluster launch time significantly.
|
Loading…
Reference in New Issue
Block a user