Merge "k8s_coreos_driver: cleanup file naming"

This commit is contained in:
Jenkins 2016-08-02 05:45:31 +00:00 committed by Gerrit Code Review
commit 007b8125f5
43 changed files with 26 additions and 1299 deletions

View File

@ -122,10 +122,12 @@ class CoreOSK8sTemplateDefinition(K8sTemplateDefinition):
"""Kubernetes template for CoreOS VM."""
provides = [
{'server_type': 'vm', 'os': 'coreos', 'coe': 'kubernetes'},
{'server_type': 'vm',
'os': 'coreos',
'coe': 'kubernetes'},
]
@property
def template_path(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates/kubecluster-coreos.yaml')
'templates/kubecluster.yaml')

View File

@ -1,40 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
DOCKER_PROXY_CONF=/etc/systemd/system/docker.service.d/proxy.conf
BASH_RC=/etc/bashrc
mkdir -p /etc/systemd/system/docker.service.d
if [ -n "$HTTP_PROXY" ]; then
cat <<EOF | sed "s/^ *//" > $DOCKER_PROXY_CONF
[Service]
Environment=HTTP_PROXY=$HTTP_PROXY
EOF
systemctl daemon-reload
systemctl --no-block restart docker.service
if [ -f "$BASH_RC" ]; then
echo "declare -x http_proxy=$HTTP_PROXY" >> $BASH_RC
else
echo "File $BASH_RC does not exist, not setting http_proxy"
fi
fi
if [ -n "$HTTPS_PROXY" ]; then
if [ -f "$BASH_RC" ]; then
echo "declare -x https_proxy=$HTTPS_PROXY" >> $BASH_RC
else
echo "File $BASH_RC does not exist, not setting https_proxy"
fi
fi
if [ -n "$NO_PROXY" ]; then
if [ -f "$BASH_RC" ]; then
echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC
else
echo "File $BASH_RC does not exist, not setting no_proxy"
fi
fi

View File

@ -1,37 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
attempts=60
while [ ${attempts} -gt 0 ]; do
device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$)
if [ -n "${device_name}" ]; then
break
fi
echo "waiting for disk device"
sleep 0.5
udevadm trigger
let attempts--
done
if [ -z "${device_name}" ]; then
echo "ERROR: disk device does not exist" >&2
exit 1
fi
device_path=/dev/disk/by-id/${device_name}
$configure_docker_storage_driver
if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) \
= $(uname -r) ]; then
ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Bay node kernel version: $(uname -r)"
echo "ERROR: ${ERROR_MESSAGE}" >&2
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'"
else
configure_overlay
fi
else
configure_devicemapper
fi

View File

@ -1,20 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
myip="$KUBE_NODE_IP"
cat > /etc/etcd/etcd.conf <<EOF
ETCD_NAME="$myip"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_LISTEN_PEER_URLS="http://$myip:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://$myip:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://$myip:2380"
ETCD_DISCOVERY="$ETCD_DISCOVERY_URL"
EOF
if [ -n "$HTTP_PROXY" ]; then
echo "ETCD_DISCOVERY_PROXY=$HTTP_PROXY" >> /etc/etcd/etcd.conf
fi

View File

@ -1,30 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
. /etc/sysconfig/flanneld
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
# Generate a flannel configuration that we will
# store into etcd using curl.
cat > $FLANNEL_JSON <<EOF
{
"Network": "$FLANNEL_NETWORK_CIDR",
"Subnetlen": $FLANNEL_NETWORK_SUBNETLEN,
"Backend": {
"Type": "$FLANNEL_BACKEND"
}
}
EOF
# wait for etcd to become active (we will need it to push the flanneld config)
while ! curl -sf -o /dev/null $FLANNEL_ETCD/v2/keys/; do
echo "waiting for etcd"
sleep 1
done
# put the flannel config in etcd
echo "creating flanneld config in etcd"
curl -sf -L $FLANNEL_ETCD/v2/keys/coreos.com/network/config \
-X PUT \
--data-urlencode value@/etc/sysconfig/flannel-network.json

View File

@ -1,56 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
echo "configuring kubernetes (master)"
sed -i '
/^ETCD_LISTEN_CLIENT_URLS=/ s/=.*/="http:\/\/0.0.0.0:2379"/
' /etc/etcd/etcd.conf
sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow_privileged='"$KUBE_ALLOW_PRIV"'"/
' /etc/kubernetes/config
KUBE_API_ARGS="--runtime_config=api/all=true"
if [ "$TLS_DISABLED" == "True" ]; then
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0 --insecure-port=$KUBE_API_PORT"
else
KUBE_API_ADDRESS="--bind_address=0.0.0.0 --secure-port=$KUBE_API_PORT"
# insecure port is used internaly
KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-port=8080"
KUBE_API_ARGS="$KUBE_API_ARGS --tls_cert_file=/srv/kubernetes/server.crt"
KUBE_API_ARGS="$KUBE_API_ARGS --tls_private_key_file=/srv/kubernetes/server.key"
KUBE_API_ARGS="$KUBE_API_ARGS --client_ca_file=/srv/kubernetes/ca.crt"
fi
sed -i '
/^KUBE_API_ADDRESS=/ s/=.*/='"${KUBE_API_ADDRESS}"'/
/^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"|
/^KUBE_API_ARGS=/ s/KUBE_API_ARGS.//
/^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd_servers=http:\/\/127.0.0.1:2379"/
/^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/
' /etc/kubernetes/apiserver
cat << _EOC_ >> /etc/kubernetes/apiserver
#Uncomment the following line to disable Load Balancer feature
KUBE_API_ARGS="$KUBE_API_ARGS"
#Uncomment the following line to enable Load Balancer feature
#KUBE_API_ARGS="$KUBE_API_ARGS --cloud_config=/etc/sysconfig/kube_openstack_config --cloud_provider=openstack"
_EOC_
sed -i '
/^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud_config=\/etc\/sysconfig\/kube_openstack_config --cloud_provider=openstack"/
' /etc/kubernetes/controller-manager
KUBELET_ARGS="--register-node=true --register-schedulable=false --config=/etc/kubernetes/manifests --hostname-override=$KUBE_NODE_IP"
if [ -n "${INSECURE_REGISTRY_URL}" ]; then
KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:0.8.0"
echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker
fi
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_ARGS=/ s|=.*|='"$KUBELET_ARGS"'|
' /etc/kubernetes/kubelet

View File

@ -1,85 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
echo "configuring kubernetes (minion)"
ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP}
KUBE_PROTOCOL="https"
KUBE_CONFIG=""
if [ "$TLS_DISABLED" = "True" ]; then
KUBE_PROTOCOL="http"
else
KUBE_CONFIG="--kubeconfig=/srv/kubernetes/kubeconfig.yaml"
fi
KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT"
sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow_privileged='"$KUBE_ALLOW_PRIV"'"/
/^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd_servers=http://'"$ETCD_SERVER_IP"':2379"|
/^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"|
' /etc/kubernetes/config
KUBELET_ARGS="--config=/etc/kubernetes/manifests --cadvisor-port=4194 --hostname-override=$KUBE_NODE_IP ${KUBE_CONFIG}"
if [ -n "${INSECURE_REGISTRY_URL}" ]; then
KUBELET_ARGS="${KUBELET_ARGS} --pod-infra-container-image=${INSECURE_REGISTRY_URL}/google_containers/pause\:0.8.0"
echo "INSECURE_REGISTRY='--insecure-registry ${INSECURE_REGISTRY_URL}'" >> /etc/sysconfig/docker
fi
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_API_SERVER=/ s|=.*|="--api_servers='"$KUBE_MASTER_URI"'"|
/^KUBELET_ARGS=/ s|=.*|='"${KUBELET_ARGS}"'|
' /etc/kubernetes/kubelet
sed -i '
/^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'|
' /etc/kubernetes/proxy
if [ "$NETWORK_DRIVER" = "flannel" ]; then
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' /etc/sysconfig/flanneld
# Make sure etcd has a flannel configuration
. /etc/sysconfig/flanneld
until curl -sf "$FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config?quorum=false&recursive=false&sorted=false"
do
echo "Waiting for flannel configuration in etcd..."
sleep 5
done
fi
if [ "$VOLUME_DRIVER" = "cinder" ]; then
CLOUD_CONFIG=/etc/kubernetes/kube_openstack_config
KUBERNETES=/etc/kubernetes
if [ ! -d ${KUBERNETES} -o ! -f ${CLOUD_CONFIG} ]; then
sudo mkdir -p $KUBERNETES
fi
AUTH_URL=$(echo "$AUTH_URL" | sed 's/v3/v2/')
cat > $CLOUD_CONFIG <<EOF
[Global]
auth-url=$AUTH_URL
username=$USERNAME
password=$PASSWORD
region=$REGION_NAME
tenant-name=$TENANT_NAME
EOF
cat << _EOC_ >> /etc/kubernetes/kubelet
#KUBELET_ARGS="$KUBELET_ARGS --cloud-provider=openstack --cloud-config=/etc/kubernetes/kube_openstack_config"
_EOC_
if [ ! -f /usr/bin/udevadm ]; then
sudo ln -s /sbin/udevadm /usr/bin/udevadm
fi
fi
cat >> /etc/environment <<EOF
KUBERNETES_MASTER=$KUBE_MASTER_URI
EOF
hostname `hostname | sed 's/.novalocal//'`

View File

@ -1,8 +0,0 @@
#cloud-boothook
#!/bin/sh
setenforce 0
sed -i '
/^SELINUX=/ s/=.*/=permissive/
' /etc/selinux/config

View File

@ -1,12 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
if [ "$REGISTRY_ENABLED" = "False" ]; then
exit 0
fi
echo "starting docker registry ..."
systemctl daemon-reload
systemctl enable registry
systemctl --no-block start registry

View File

@ -1,5 +0,0 @@
#!/bin/sh
echo "starting etcd"
systemctl enable etcd
systemctl --no-block start etcd

View File

@ -1,150 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
if [ -n "${INSECURE_REGISTRY_URL}" ]; then
PODMASTER_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/podmaster:1.1"
HYPERKUBE_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/hyperkube:${KUBE_VERSION}"
else
PODMASTER_IMAGE="gcr.io/google_containers/podmaster:1.1"
HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube:${KUBE_VERSION}"
fi
init_templates () {
local TEMPLATE=/etc/kubernetes/manifests/kube-podmaster.yaml
[ -f ${TEMPLATE} ] || {
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname ${TEMPLATE})
cat << EOF > ${TEMPLATE}
apiVersion: v1
kind: Pod
metadata:
name: kube-podmaster
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: scheduler-elector
image: ${PODMASTER_IMAGE}
command:
- /podmaster
- --etcd-servers=http://127.0.0.1:2379
- --key=scheduler
- --source-file=/src/manifests/kube-scheduler.yaml
- --dest-file=/dst/manifests/kube-scheduler.yaml
volumeMounts:
- mountPath: /src/manifests
name: manifest-src
readOnly: true
- mountPath: /dst/manifests
name: manifest-dst
- name: controller-manager-elector
image: ${PODMASTER_IMAGE}
command:
- /podmaster
- --etcd-servers=http://127.0.0.1:2379
- --key=controller
- --source-file=/src/manifests/kube-controller-manager.yaml
- --dest-file=/dst/manifests/kube-controller-manager.yaml
terminationMessagePath: /dev/termination-log
volumeMounts:
- mountPath: /src/manifests
name: manifest-src
readOnly: true
- mountPath: /dst/manifests
name: manifest-dst
volumes:
- hostPath:
path: /srv/kubernetes/manifests
name: manifest-src
- hostPath:
path: /etc/kubernetes/manifests
name: manifest-dst
EOF
}
local SERVICE_ACCOUNT_PRIVATE_KEY_FILE=/etc/kubernetes/ssl/server.key
local ROOT_CA_FILE=/etc/kubernetes/ssl/ca.crt
if [ "${TLS_DISABLED}" = "True" ]; then
SERVICE_ACCOUNT_PRIVATE_KEY_FILE=
ROOT_CA_FILE=
fi
local TEMPLATE=/srv/kubernetes/manifests/kube-controller-manager.yaml
[ -f ${TEMPLATE} ] || {
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname ${TEMPLATE})
cat << EOF > ${TEMPLATE}
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- name: kube-controller-manager
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:8080
- --service-account-private-key-file=${SERVICE_ACCOUNT_PRIVATE_KEY_FILE}
- --root-ca-file=${ROOT_CA_FILE}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 15
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/kubernetes/ssl
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /srv/kubernetes
name: ssl-certs-kubernetes
- hostPath:
path: /etc/ssl/certs
name: ssl-certs-host
EOF
}
local TEMPLATE=/srv/kubernetes/manifests/kube-scheduler.yaml
[ -f ${TEMPLATE} ] || {
echo "TEMPLATE: $TEMPLATE"
mkdir -p $(dirname ${TEMPLATE})
cat << EOF > ${TEMPLATE}
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- scheduler
- --master=http://127.0.0.1:8080
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 15
timeoutSeconds: 1
EOF
}
}
init_templates

View File

@ -1,11 +0,0 @@
#!/bin/sh
# make sure we pick up any modified unit files
systemctl daemon-reload
echo "starting services"
for service in etcd docker kube-apiserver kubelet; do
echo "activating service $service"
systemctl enable $service
systemctl --no-block start $service
done

View File

@ -1,17 +0,0 @@
#!/bin/sh
# docker is already enabled and possibly running on centos atomic host
# so we need to stop it first and delete the docker0 bridge (which will
# be re-created using the flannel-provided subnet).
echo "stopping docker"
systemctl stop docker
ip link del docker0
# make sure we pick up any modified unit files
systemctl daemon-reload
for service in docker kubelet; do
echo "activating service $service"
systemctl enable $service
systemctl --no-block start $service
done

View File

@ -1,117 +0,0 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
write_files:
- path: /etc/kubernetes/examples/replication-controller.yaml
owner: "root:root"
permissions: "0644"
content: |
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-controller
spec:
replicas: 2
# selector identifies the set of pods that this
# replication controller is responsible for managing
selector:
name: nginx
# template defines the 'cookie cutter' used for creating
# new pods when necessary
template:
metadata:
labels:
# Important: these labels need to match the selector above
# The api server enforces this constraint.
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
- path: /etc/kubernetes/examples/pod-nginx-with-label.yaml
owner: "root:root"
permissions: "0644"
content: |
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
- path: /etc/kubernetes/examples/service.yaml
owner: "root:root"
permissions: "0644"
content: |
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
ports:
- port: 8000 # the port that this service should serve on
# the container on each pod to connect to, can be a name
# (e.g. 'www') or a number (e.g. 80)
targetPort: 80
protocol: TCP
# just like the selector in the replication controller,
# but this time it identifies the set of pods to load balance
# traffic to.
selector:
app: nginx
- path: /etc/kubernetes/examples/README.md
owner: "root:root"
permissions: "0644"
content: |
Kubernetes 101 (http://kubernetes.io/v1.0/docs/user-guide/walkthrough/README.html)
==================================================================================
List all nodes:
kubectl get nodes
Replication Controllers:
kubectl create -f /etc/kubernetes/examples/replication-controller.yaml
kubectl get rc
kubectl delete rc nginx-controller
Pods:
kubectl create -f /etc/kubernetes/examples/pod-nginx-with-label.yaml
kubectl get pods
curl http://$(kubectl get pod nginx -o=template -t={{.status.podIP}})
Services:
kubectl create -f /etc/kubernetes/examples/service.yaml
kubectl get services
export SERVICE_IP=$(kubectl get service nginx-service -o=template -t={{.spec.clusterIP}})
export SERVICE_PORT=$(kubectl get service nginx-service -o=template '-t={{(index .spec.ports 0).port}}')
curl http://${SERVICE_IP}:${SERVICE_PORT}
kubectl delete service nginx-service
Troubleshooting:
kubectl get events
kubectl describe rc nginx-controller
kubectl describe pod nginx
kubectl describe service nginx-service
kubectl exec nginx env
kubectl exec -ti nginx -- bash

View File

@ -1,66 +0,0 @@
#!/bin/sh
# this service required because docker will start only after cloud init was finished
# due service dependencies at Fedora Atomic (docker <- docker-storage-setup <- cloud-final)
. /etc/sysconfig/heat-params
KUBE_SYSTEM_JSON=/srv/kubernetes/kube-system-namespace.json
[ -f ${KUBE_SYSTEM_JSON} ] || {
echo "Writing File: $KUBE_SYSTEM_JSON"
mkdir -p $(dirname ${KUBE_SYSTEM_JSON})
cat << EOF > ${KUBE_SYSTEM_JSON}
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "kube-system"
}
}
EOF
}
KUBE_SYSTEM_BIN=/usr/local/bin/kube-system-namespace
[ -f ${KUBE_SYSTEM_BIN} ] || {
echo "Writing File: $KUBE_SYSTEM_BIN"
mkdir -p $(dirname ${KUBE_SYSTEM_BIN})
cat << EOF > ${KUBE_SYSTEM_BIN}
#!/bin/sh
until curl -sf "http://127.0.0.1:8080/healthz"
do
echo "Waiting for Kubernetes API..."
sleep 5
done
/usr/bin/kubectl create -f /srv/kubernetes/kube-system-namespace.json
EOF
}
KUBE_SYSTEM_SERVICE=/etc/systemd/system/kube-system-namespace.service
[ -f ${KUBE_SYSTEM_SERVICE} ] || {
echo "Writing File: $KUBE_SYSTEM_SERVICE"
mkdir -p $(dirname ${KUBE_SYSTEM_SERVICE})
cat << EOF > ${KUBE_SYSTEM_SERVICE}
[Unit]
After=kubelet.service
Requires=kubelet.service
[Service]
Type=oneshot
Environment=HOME=/root
EnvironmentFile=-/etc/kubernetes/config
ExecStart=${KUBE_SYSTEM_BIN}
[Install]
WantedBy=multi-user.target
EOF
}
chown root:root ${KUBE_SYSTEM_BIN}
chmod 0755 ${KUBE_SYSTEM_BIN}
chown root:root ${KUBE_SYSTEM_SERVICE}
chmod 0644 ${KUBE_SYSTEM_SERVICE}
systemctl enable kube-system-namespace
systemctl start --no-block kube-system-namespace

View File

@ -1,121 +0,0 @@
#!/bin/sh
# this service is required because docker will start only after cloud init was finished
# due to the service dependencies in Fedora Atomic (docker <- docker-storage-setup <- cloud-final)
. /etc/sysconfig/heat-params
if [ -n "${INSECURE_REGISTRY_URL}" ]; then
KUBEUI_IMAGE="${INSECURE_REGISTRY_URL}/google_containers/kube-ui:v4"
else
KUBEUI_IMAGE="gcr.io/google_containers/kube-ui:v4"
fi
KUBE_UI_RC=/srv/kubernetes/manifests/kube-ui-rc.yaml
[ -f ${KUBE_UI_RC} ] || {
echo "Writing File: $KUBE_UI_RC"
mkdir -p $(dirname ${KUBE_UI_RC})
cat << EOF > ${KUBE_UI_RC}
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-ui-v4
namespace: kube-system
labels:
k8s-app: kube-ui
version: v4
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-ui
version: v4
template:
metadata:
labels:
k8s-app: kube-ui
version: v4
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kube-ui
image: ${KUBEUI_IMAGE}
resources:
limits:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 8080
EOF
}
KUBE_UI_SVC=/srv/kubernetes/manifests/kube-ui-svc.yaml
[ -f ${KUBE_UI_SVC} ] || {
echo "Writing File: $KUBE_UI_SVC"
mkdir -p $(dirname ${KUBE_UI_SVC})
cat << EOF > ${KUBE_UI_SVC}
apiVersion: v1
kind: Service
metadata:
name: kube-ui
namespace: kube-system
labels:
k8s-app: kube-ui
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeUI"
spec:
selector:
k8s-app: kube-ui
ports:
- port: 80
targetPort: 8080
EOF
}
KUBE_UI_BIN=/usr/local/bin/kube-ui
[ -f ${KUBE_UI_BIN} ] || {
echo "Writing File: $KUBE_UI_BIN"
mkdir -p $(dirname ${KUBE_UI_BIN})
cat << EOF > ${KUBE_UI_BIN}
#!/bin/sh
until curl -sf "http://127.0.0.1:8080/healthz"
do
echo "Waiting for Kubernetes API..."
sleep 5
done
/usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-ui-rc.yaml --namespace=kube-system
/usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-ui-svc.yaml --namespace=kube-system
EOF
}
KUBE_UI_SERVICE=/etc/systemd/system/kube-ui.service
[ -f ${KUBE_UI_SERVICE} ] || {
echo "Writing File: $KUBE_UI_SERVICE"
mkdir -p $(dirname ${KUBE_UI_SERVICE})
cat << EOF > ${KUBE_UI_SERVICE}
[Unit]
After=kube-system-namespace
Requires=kubelet.service
Wants=kube-system-namespace.service
[Service]
Type=oneshot
EnvironmentFile=-/etc/kubernetes/config
ExecStart=${KUBE_UI_BIN}
[Install]
WantedBy=multi-user.target
EOF
}
chown root:root ${KUBE_UI_BIN}
chmod 0755 ${KUBE_UI_BIN}
chown root:root ${KUBE_UI_SERVICE}
chmod 0644 ${KUBE_UI_SERVICE}
systemctl enable kube-ui
systemctl start --no-block kube-ui

View File

@ -1,9 +0,0 @@
#cloud-config
system_info:
default_user:
name: minion
lock_passwd: true
gecos: Kubernetes Interactive User
groups: [wheel, adm, systemd-journal]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash

View File

@ -1,117 +0,0 @@
#!/bin/sh
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. /etc/sysconfig/heat-params
set -o errexit
set -o nounset
set -o pipefail
if [ "$TLS_DISABLED" == "True" ]; then
exit 0
fi
cert_dir=/srv/kubernetes
cert_conf_dir=${cert_dir}/conf
mkdir -p "$cert_dir"
mkdir -p "$cert_conf_dir"
CA_CERT=$cert_dir/ca.crt
CLIENT_CERT=$cert_dir/client.crt
CLIENT_CSR=$cert_dir/client.csr
CLIENT_KEY=$cert_dir/client.key
#Get a token by user credentials and trust
auth_json=$(cat << EOF
{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "$TRUSTEE_USER_ID",
"password": "$TRUSTEE_PASSWORD"
}
}
},
"scope": {
"OS-TRUST:trust": {
"id": "$TRUST_ID"
}
}
}
}
EOF
)
#trust is introduced in Keystone v3 version
AUTH_URL=${AUTH_URL/v2.0/v3}
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
# Get CA certificate for this bay
curl -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$BAY_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
# Create config for client's csr
cat > ${cert_conf_dir}/client.conf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = kubernetes.invalid
[req_ext]
keyUsage=critical,digitalSignature,keyEncipherment
extendedKeyUsage=clientAuth
subjectAltName=dirName:kubelet,dirName:kubeproxy
[kubelet]
CN=kubelet
[kubeproxy]
CN=kube-proxy
EOF
# Generate client's private key and csr
openssl genrsa -out "${CLIENT_KEY}" 4096
chmod 400 "${CLIENT_KEY}"
openssl req -new -days 1000 \
-key "${CLIENT_KEY}" \
-out "${CLIENT_CSR}" \
-reqexts req_ext \
-config "${cert_conf_dir}/client.conf"
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${CLIENT_CSR}'); print json.dumps({'bay_uuid': '$BAY_UUID', 'csr': fp.read()}); fp.close()")
curl -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CLIENT_CERT}
chmod 500 "${cert_dir}"
chown -R kube:kube "${cert_dir}"
sed -i '
s|CA_CERT|'"$CA_CERT"'|
s|CLIENT_CERT|'"$CLIENT_CERT"'|
s|CLIENT_KEY|'"$CLIENT_KEY"'|
' /srv/kubernetes/kubeconfig.yaml

View File

@ -1,126 +0,0 @@
#!/bin/sh
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. /etc/sysconfig/heat-params
set -o errexit
set -o nounset
set -o pipefail
if [ "$TLS_DISABLED" == "True" ]; then
exit 0
fi
if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then
KUBE_NODE_PUBLIC_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [[ -z "${KUBE_NODE_IP}" ]]; then
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}"
if [[ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ]]; then
sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}"
fi
if [[ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ]]; then
sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}"
fi
MASTER_HOSTNAME=${MASTER_HOSTNAME:-}
if [[ -n "${MASTER_HOSTNAME}" ]]; then
sans="${sans},DNS:${MASTER_HOSTNAME}"
fi
sans="${sans},IP:127.0.0.1"
cert_dir=/srv/kubernetes
cert_conf_dir=${cert_dir}/conf
mkdir -p "$cert_dir"
mkdir -p "$cert_conf_dir"
CA_CERT=$cert_dir/ca.crt
SERVER_CERT=$cert_dir/server.crt
SERVER_CSR=$cert_dir/server.csr
SERVER_KEY=$cert_dir/server.key
#Get a token by user credentials and trust
auth_json=$(cat << EOF
{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "$TRUSTEE_USER_ID",
"password": "$TRUSTEE_PASSWORD"
}
}
},
"scope": {
"OS-TRUST:trust": {
"id": "$TRUST_ID"
}
}
}
}
EOF
)
#trust is introduced in Keystone v3 version
AUTH_URL=${AUTH_URL/v2.0/v3}
content_type='Content-Type: application/json'
url="$AUTH_URL/auth/tokens"
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
# Get CA certificate for this bay
curl -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$BAY_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
# Create config for server's csr
cat > ${cert_conf_dir}/server.conf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = kubernetes.invalid
[req_ext]
subjectAltName = ${sans}
extendedKeyUsage = clientAuth,serverAuth
EOF
# Generate server's private key and csr
openssl genrsa -out "${SERVER_KEY}" 4096
chmod 400 "${SERVER_KEY}"
openssl req -new -days 1000 \
-key "${SERVER_KEY}" \
-out "${SERVER_CSR}" \
-reqexts req_ext \
-config "${cert_conf_dir}/server.conf"
# Send csr to Magnum to have it signed
csr_req=$(python -c "import json; fp = open('${SERVER_CSR}'); print json.dumps({'bay_uuid': '$BAY_UUID', 'csr': fp.read()}); fp.close()")
curl -X POST \
-H "X-Auth-Token: $USER_TOKEN" \
-H "Content-Type: application/json" \
-d "$csr_req" \
$MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${SERVER_CERT}
chmod 500 "${cert_dir}"
chown -R kube:kube "${cert_dir}"

View File

@ -1,58 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "flannel" ]; then
exit 0
fi
. /etc/sysconfig/flanneld
FLANNEL_CONFIG_BIN=/usr/local/bin/flannel-config
FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
echo "creating $FLANNEL_CONFIG_BIN"
cat > $FLANNEL_CONFIG_BIN <<EOF
#!/bin/sh
if ! [ -f "$FLANNEL_JSON" ]; then
echo "ERROR: missing network configuration file" >&2
exit 1
fi
if ! [ "$FLANNEL_ETCD" ] && [ "$FLANNEL_ETCD_KEY" ]; then
echo "ERROR: missing required configuration" >&2
exit 1
fi
echo "creating flanneld config in etcd"
while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd"
sleep 1
done
EOF
cat > $FLANNEL_CONFIG_SERVICE <<EOF
[Unit]
After=etcd.service
Requires=etcd.service
[Service]
Type=oneshot
EnvironmentFile=/etc/sysconfig/flanneld
ExecStart=$FLANNEL_CONFIG_BIN
[Install]
WantedBy=multi-user.target
EOF
chown root:root $FLANNEL_CONFIG_BIN
chmod 0755 $FLANNEL_CONFIG_BIN
chown root:root $FLANNEL_CONFIG_SERVICE
chmod 0644 $FLANNEL_CONFIG_SERVICE
systemctl enable flannel-config
systemctl start --no-block flannel-config

View File

@ -1,78 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "flannel" ]; then
exit 0
fi
FLANNEL_DOCKER_BRIDGE_BIN=/usr/local/bin/flannel-docker-bridge
FLANNEL_DOCKER_BRIDGE_SERVICE=/etc/systemd/system/flannel-docker-bridge.service
DOCKER_FLANNEL_CONF=/etc/systemd/system/docker.service.d/flannel.conf
FLANNEL_DOCKER_BRIDGE_CONF=/etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf
mkdir -p /etc/systemd/system/docker.service.d
mkdir -p /etc/systemd/system/flanneld.service.d
cat >> $FLANNEL_DOCKER_BRIDGE_BIN <<EOF1
#!/bin/sh
if ! [ "\$FLANNEL_SUBNET" ] && [ "\$FLANNEL_MTU" ] ; then
echo "ERROR: missing required environment variables." >&2
exit 1
fi
mkdir -p /run/flannel/
cat > /run/flannel/docker <<EOF2
DOCKER_NETWORK_OPTIONS="--bip=\$FLANNEL_SUBNET --mtu=\$FLANNEL_MTU"
EOF2
EOF1
chown root:root $FLANNEL_DOCKER_BRIDGE_BIN
chmod 0755 $FLANNEL_DOCKER_BRIDGE_BIN
cat >> $FLANNEL_DOCKER_BRIDGE_SERVICE <<EOF
[Unit]
After=flanneld.service
Before=docker.service
Requires=flanneld.service
[Service]
Type=oneshot
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/local/bin/flannel-docker-bridge
[Install]
WantedBy=docker.service
EOF
chown root:root $FLANNEL_DOCKER_BRIDGE_SERVICE
chmod 0644 $FLANNEL_DOCKER_BRIDGE_SERVICE
cat >> $DOCKER_FLANNEL_CONF <<EOF
[Unit]
Requires=flannel-docker-bridge.service
After=flannel-docker-bridge.service
[Service]
EnvironmentFile=/run/flannel/docker
EOF
chown root:root $DOCKER_FLANNEL_CONF
chmod 0644 $DOCKER_FLANNEL_CONF
cat >> $FLANNEL_DOCKER_BRIDGE_CONF <<EOF
[Unit]
Requires=flannel-docker-bridge.service
Before=flannel-docker-bridge.service
[Install]
Also=flannel-docker-bridge.service
EOF
chown root:root $FLANNEL_DOCKER_BRIDGE_CONF
chmod 0644 $FLANNEL_DOCKER_BRIDGE_CONF
echo "activating service flanneld"
systemctl enable flanneld
systemctl --no-block start flanneld

View File

@ -1,36 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
WC_NOTIFY_BIN=/usr/local/bin/wc-notify
WC_NOTIFY_SERVICE=/etc/systemd/system/wc-notify.service
cat > $WC_NOTIFY_BIN <<EOF
#!/bin/bash -v
until curl -sf "http://127.0.0.1:8080/healthz"; do
echo "Waiting for Kubernetes API..."
sleep 5
done
$WAIT_CURL --data-binary '{"status": "SUCCESS"}'
EOF
cat > $WC_NOTIFY_SERVICE <<EOF
[Unit]
Description=Notify Heat
After=docker.service etcd.service
Requires=docker.service etcd.service
[Service]
Type=oneshot
ExecStart=$WC_NOTIFY_BIN
[Install]
WantedBy=multi-user.target
EOF
chown root:root $WC_NOTIFY_BIN
chmod 0755 $WC_NOTIFY_BIN
chown root:root $WC_NOTIFY_SERVICE
chmod 0644 $WC_NOTIFY_SERVICE
systemctl enable wc-notify
systemctl start --no-block wc-notify

View File

@ -1,24 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
KUBE_OS_CLOUD_CONFIG=/etc/sysconfig/kube_openstack_config
# kubernetes backend only support keystone v2 at this point
AUTH_URL=$(echo "$AUTH_URL" | sed 's/v3/v2/')
# Generate a the configuration for Kubernetes services
# to talk to OpenStack Neutron
cat > $KUBE_OS_CLOUD_CONFIG <<EOF
[Global]
auth-url=$AUTH_URL
Username=$USERNAME
Password=$PASSWORD
tenant-name=$TENANT_NAME
[LoadBalancer]
subnet-id=$CLUSTER_SUBNET
create-monitor=yes
monitor-delay=1m
monitor-timeout=30s
monitor-max-retries=3
EOF

View File

@ -1,24 +0,0 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
write_files:
- path: /etc/kubernetes/config/worker-kubeconfig.yaml
owner: "root:root"
permissions: "0644"
content: |
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/worker.pem
client-key: /etc/kubernetes/ssl/worker-key.pem
contexts:
- context:
cluster: local
user: kubelet
name: kubelet-context
current-context: kubelet-context

View File

@ -1,28 +0,0 @@
#!/bin/sh
. /etc/sysconfig/heat-params
if [ "$NETWORK_DRIVER" != "flannel" ]; then
exit 0
fi
. /etc/sysconfig/flanneld
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
FLANNELD_CONFIG=/etc/sysconfig/flanneld
sed -i '
/^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/
' /etc/sysconfig/flanneld
# Generate a flannel configuration that we will
# store into etcd using curl.
cat > $FLANNEL_JSON <<EOF
{
"Network": "$FLANNEL_NETWORK_CIDR",
"Subnetlen": $FLANNEL_NETWORK_SUBNETLEN,
"Backend": {
"Type": "$FLANNEL_BACKEND"
}
}
EOF

View File

@ -232,7 +232,7 @@ resources:
properties:
count: 1
resource_def:
type: kubemaster-coreos.yaml
type: kubemaster.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
@ -273,7 +273,7 @@ resources:
count: {get_param: number_of_minions}
removal_policies: [{resource_list: {get_param: minions_to_remove}}]
resource_def:
type: kubeminion-coreos.yaml
type: kubeminion.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}

View File

@ -3,7 +3,7 @@ heat_template_version: 2014-10-16
description: >
This is a nested stack that defines a Kubernetes master. This stack is
included by an ResourceGroup resource in the parent template
(kubecluster-coreos.yaml).
(kubeclusters.yaml).
parameters:
@ -202,73 +202,73 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/configure-etcd-coreos.yaml}
config: {get_file: fragments/configure-etcds.yaml}
make_cert:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/make-cert-coreos.yaml}
config: {get_file: fragments/make-certs.yaml}
write_network_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/write-network-config-coreos.yaml}
config: {get_file: fragments/write-network-configs.yaml}
enable_network_service:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-network-service-coreos.yaml}
config: {get_file: fragments/enable-network-services.yaml}
enable_kubelet:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kubelet-master-coreos.yaml}
config: {get_file: fragments/enable-kubelet-masters.yaml}
enable_kube_apiserver:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-apiserver-coreos.yaml}
config: {get_file: fragments/enable-kube-apiservers.yaml}
create_kube_namespace:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/create-kube-namespace-coreos.yaml}
config: {get_file: fragments/create-kube-namespaces.yaml}
enable_kube_proxy:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-proxy-master-coreos.yaml}
config: {get_file: fragments/enable-kube-proxy-masters.yaml}
enable_kube_controller_manager:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-controller-manager-coreos.yaml}
config: {get_file: fragments/enable-kube-controller-managers.yaml}
enable_kube_scheduler:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-scheduler-coreos.yaml}
config: {get_file: fragments/enable-kube-schedulers.yaml}
enable_kube_podmaster:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-podmaster-coreos.yaml}
config: {get_file: fragments/enable-kube-podmasters.yaml}
wc_notify:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/wc-notify-coreos.yaml}
config: {get_file: fragments/wc-notifys.yaml}
kube_master_init:
type: OS::Heat::SoftwareConfig

View File

@ -3,7 +3,7 @@ heat_template_version: 2014-10-16
description: >
This is a nested stack that defines a single Kubernetes minion,
based on a CoreOS cloud image. This stack is included by a ResourceGroup
resource in the parent template (kubecluster-coreos.yaml).
resource in the parent template (kubecluster.yaml).
parameters:
@ -164,37 +164,37 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/write-kubeconfig-coreos.yaml}
config: {get_file: fragments/write-kubeconfigs.yaml}
make_cert:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/make-cert-client-coreos.yaml}
config: {get_file: fragments/make-cert-clients.yaml}
enable_network_service:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-network-service-coreos.yaml}
config: {get_file: fragments/enable-network-services.yaml}
enable_kubelet:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kubelet-minion-coreos.yaml}
config: {get_file: fragments/enable-kubelet-minions.yaml}
enable_kube_proxy:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/enable-kube-proxy-minion-coreos.yaml}
config: {get_file: fragments/enable-kube-proxy-minions.yaml}
wc_notify:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/wc-notify-coreos.yaml}
config: {get_file: fragments/wc-notifys.yaml}
kube_minion_init:
type: OS::Heat::SoftwareConfig