Fix CoreOS cluster creation and heat notify

Also fix the label issue introduced with
https://review.openstack.org/#/c/426291/

Closes-Bug: #1679663

Change-Id: Ic1ca4ebef96a796f22acb06722f209477b7db934
This commit is contained in:
ArchiFleKs 2017-03-15 13:52:41 +01:00 committed by Kevin Lefevre
parent 1f2f002c52
commit ff18982505
7 changed files with 52 additions and 2 deletions

View File

@ -57,7 +57,7 @@ write_files:
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--register-node=true \
--container-runtime=${CONTAINER_RUNTIME}
--container-runtime=${CONTAINER_RUNTIME} \
--register-schedulable=false \
--allow-privileged=true \
--pod-manifest-path=/etc/kubernetes/manifests \

View File

@ -66,6 +66,8 @@ write_files:
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
CNI=/etc/kubernetes/cni/net.d/10-flannel.conf
@ -80,5 +82,12 @@ write_files:
}
EOF
DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env
mkdir -p $(dirname $DOCKER_FLANNEL_CONF)
cat > $DOCKER_FLANNEL_CONF <<EOF
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
systemctl enable flanneld
systemctl --no-block start flanneld

View File

@ -66,6 +66,8 @@ write_files:
[Unit]
Requires=flanneld.service
After=flanneld.service
[Service]
EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
EOF
CNI=/etc/kubernetes/cni/net.d/10-flannel.conf
@ -80,5 +82,12 @@ write_files:
}
EOF
DOCKER_FLANNEL_CONF=/etc/kubernetes/cni/docker_opts_cni.env
mkdir -p $(dirname $DOCKER_FLANNEL_CONF)
cat > $DOCKER_FLANNEL_CONF <<EOF
DOCKER_OPT_BIP=""
DOCKER_OPT_IPMASQ=""
EOF
systemctl enable flanneld
systemctl --no-block start flanneld

View File

@ -20,5 +20,5 @@ write_files:
permissions: "0755"
content: |
#!/bin/bash -v
command="$WAIT_CURL --data-binary '{\"status\": \"SUCCESS\"}'"
command="$WAIT_CURL --insecure --data-binary '{\"status\": \"SUCCESS\"}'"
eval $(echo "$command")

View File

@ -41,6 +41,19 @@ parameters:
default: m1.small
description: flavor to use when booting the servers for minions
prometheus_monitoring:
type: boolean
default: false
description: >
whether or not to have the grafana-prometheus-cadvisor monitoring setup
grafana_admin_passwd:
type: string
default: admin
hidden: true
description: >
admin user password for the Grafana monitoring interface
discovery_url:
type: string
description: >
@ -408,6 +421,8 @@ resources:
hyperkube_image: {get_param: hyperkube_image}
insecure_registry_url: {get_param: insecure_registry_url}
container_runtime: {get_param: container_runtime}
prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd}
######################################################################
#
@ -454,6 +469,7 @@ resources:
hyperkube_image: {get_param: hyperkube_image}
insecure_registry_url: {get_param: insecure_registry_url}
container_runtime: {get_param: container_runtime}
prometheus_monitoring: {get_param: prometheus_monitoring}
outputs:

View File

@ -127,6 +127,17 @@ parameters:
type: string
description: identifier for the cluster this template is generating
prometheus_monitoring:
type: boolean
description: >
whether or not to have prometheus and grafana deployed
grafana_admin_passwd:
type: string
hidden: true
description: >
admin user password for the Grafana monitoring interface
magnum_url:
type: string
description: endpoint to retrieve TLS certs from

View File

@ -133,6 +133,11 @@ parameters:
description: >
Container runtime to use with Kubernetes.
prometheus_monitoring:
type: boolean
description: >
whether or not to have the node-exporter running on the node
resources:
minion_wait_handle: