Kubernetes salt formula first version.
This commit is contained in:
commit
acdae7e366
184
README.rst
Normal file
184
README.rst
Normal file
@ -0,0 +1,184 @@
|
||||
|
||||
==================
|
||||
Kubernetes Formula
|
||||
==================
|
||||
|
||||
|
||||
Based on official Kubernetes salt
|
||||
https://github.com/kubernetes/kubernetes/tree/master/cluster/saltbase
|
||||
|
||||
Extended on Contrail contribution https://github.com/Juniper/kubernetes/blob/opencontrail-integration/docs/getting-started-guides/opencontrail.md
|
||||
|
||||
|
||||
Sample pillars
|
||||
==============
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
master:
|
||||
|
||||
kubernetes:
|
||||
pool:
|
||||
|
||||
|
||||
Kubernetes with OpenContrail
|
||||
----------------------------
|
||||
|
||||
On Master:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
master:
|
||||
network:
|
||||
engine: opencontrail
|
||||
host: 10.0.170.70
|
||||
port: 8082
|
||||
default_domain: default-domain
|
||||
default_project: default-domain:default-project
|
||||
public_network: default-domain:default-project:Public
|
||||
public_ip_range: 185.22.97.128/26
|
||||
private_ip_range: 10.150.0.0/16
|
||||
service_cluster_ip_range: 10.254.0.0/16
|
||||
network_label: name
|
||||
service_label: uses
|
||||
cluster_service: kube-system/default
|
||||
network_manager:
|
||||
image: pupapaik/opencontrail-kube-network-manager
|
||||
tag: release-1.1-jpa-final-1
|
||||
|
||||
On pools:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
pool:
|
||||
network:
|
||||
engine: opencontrail
|
||||
|
||||
Kubernetes with Flannel
|
||||
-----------------------
|
||||
|
||||
On Master:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
master:
|
||||
network:
|
||||
engine: flannel
|
||||
common:
|
||||
network:
|
||||
engine: flannel
|
||||
|
||||
On pools:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
pool:
|
||||
network:
|
||||
engine: flannel
|
||||
common:
|
||||
network:
|
||||
engine: flannel
|
||||
|
||||
Kubernetes with Calico
|
||||
-----------------------
|
||||
|
||||
On Master:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
master:
|
||||
network:
|
||||
engine: calico
|
||||
|
||||
On pools:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
kubernetes:
|
||||
pool:
|
||||
network:
|
||||
engine: calico
|
||||
|
||||
Kubernetes Service Definitions
|
||||
------------------------------
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt:
|
||||
control:
|
||||
enabled: True
|
||||
hostNetwork: True
|
||||
service:
|
||||
memcached:
|
||||
privileged: True
|
||||
service: memcached
|
||||
role: server
|
||||
type: LoadBalancer
|
||||
replicas: 3
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
ports:
|
||||
- port: 8774
|
||||
name: nova-api
|
||||
- port: 8775
|
||||
name: nova-metadata
|
||||
volume:
|
||||
volume_name:
|
||||
type: hostPath
|
||||
mount: /certs
|
||||
path: /etc/certs
|
||||
container:
|
||||
memcached:
|
||||
image: memcached
|
||||
tag:2
|
||||
ports:
|
||||
- port: 8774
|
||||
name: nova-api
|
||||
- port: 8775
|
||||
name: nova-metadata
|
||||
variables:
|
||||
- name: HTTP_TLS_CERTIFICATE:
|
||||
value: /certs/domain.crt
|
||||
- name: HTTP_TLS_KEY
|
||||
value: /certs/domain.key
|
||||
volumes:
|
||||
- name: /etc/certs
|
||||
type: hostPath
|
||||
mount: /certs
|
||||
path: /etc/certs
|
||||
|
||||
Volumes
|
||||
-------
|
||||
|
||||
hostPath
|
||||
===========
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
container:
|
||||
memcached:
|
||||
...
|
||||
volumes:
|
||||
- name: /etc/certs
|
||||
mount: /certs
|
||||
type: hostPath
|
||||
path: /etc/certs
|
||||
|
||||
emptyDir
|
||||
===========
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
container:
|
||||
memcached:
|
||||
...
|
||||
volumes:
|
||||
- name: /etc/certs
|
||||
mount: /certs
|
||||
type: emptyDir
|
31
kubernetes/_common.sls
Normal file
31
kubernetes/_common.sls
Normal file
@ -0,0 +1,31 @@
|
||||
{% from "kubernetes/map.jinja" import common with context %}
|
||||
|
||||
kubernetes_pkgs:
|
||||
pkg.installed:
|
||||
- names: {{ common.pkgs }}
|
||||
|
||||
kubernetes_binaries:
|
||||
cmd.run:
|
||||
- name: 'wget -r --no-parent --reject "index.html*" http://apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/ && chmod +x -R /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/*'
|
||||
- pwd: /root
|
||||
- unless: test -d /root/apt.tcpcloud.eu/kubernetes/bin/
|
||||
|
||||
etcdctl_binaries:
|
||||
cmd.run:
|
||||
- name: "curl -L https://github.com/coreos/etcd/releases/download/v2.2.1/etcd-v2.2.1-linux-amd64.tar.gz -o etcd-v2.2.1-linux-amd64.tar.gz;tar -zxvf etcd-v2.2.1-linux-amd64.tar.gz"
|
||||
- pwd: /root
|
||||
- unless: test -f /root/etcd-v2.2.1-linux-amd64.tar.gz
|
||||
|
||||
{%- if common.network.get('engine', 'none') == 'flannel' %}
|
||||
flannel-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /usr/local/src
|
||||
- makedirs: True
|
||||
- source: https://storage.googleapis.com/kubernetes-release/flannel/flannel-0.5.5-linux-amd64.tar.gz
|
||||
- tar_options: v
|
||||
- source_hash: md5=972c717254775bef528f040af804f2cc
|
||||
- archive_format: tar
|
||||
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
|
||||
{%- endif %}
|
38
kubernetes/control/cluster.sls
Normal file
38
kubernetes/control/cluster.sls
Normal file
@ -0,0 +1,38 @@
|
||||
{% from "kubernetes/map.jinja" import control with context %}
|
||||
{%- if control.enabled %}
|
||||
|
||||
/srv/kubernetes:
|
||||
file.directory:
|
||||
- makedirs: true
|
||||
|
||||
{%- for service_name, service in control.service.iteritems() %}
|
||||
|
||||
{%- if service.enabled == true %}
|
||||
/srv/kubernetes/services/{{ service_name }}-svc.yml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/svc.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- template: jinja
|
||||
- makedirs: true
|
||||
- require:
|
||||
- file: /srv/kubernetes
|
||||
- defaults:
|
||||
service: {{ service|yaml }}
|
||||
{%- endif %}
|
||||
|
||||
/srv/kubernetes/{{ service.cluster }}/{{ service_name }}-{{ service.kind }}.yml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/rc.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- template: jinja
|
||||
- makedirs: true
|
||||
- require:
|
||||
- file: /srv/kubernetes
|
||||
- defaults:
|
||||
service: {{ service|yaml }}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
{%- endif %}
|
3
kubernetes/control/init.sls
Normal file
3
kubernetes/control/init.sls
Normal file
@ -0,0 +1,3 @@
|
||||
|
||||
include:
|
||||
- kubernetes.control.cluster
|
2
kubernetes/files/basic_auth.csv
Normal file
2
kubernetes/files/basic_auth.csv
Normal file
@ -0,0 +1,2 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{{ master.admin.username }},{{ master.admin.password }},admin
|
Can't render this file because it contains an unexpected character in line 1 and column 10.
|
16
kubernetes/files/calico/calico-node.service
Normal file
16
kubernetes/files/calico/calico-node.service
Normal file
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Calico per-node agent
|
||||
Documentation=https://github.com/projectcalico/calico-docker
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
EnvironmentFile=/etc/calico/network-environment
|
||||
PermissionsStartOnly=true
|
||||
ExecStart=/usr/bin/calicoctl node --ip=${DEFAULT_IPV4} --detach=false
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
10
kubernetes/files/calico/calico.conf
Normal file
10
kubernetes/files/calico/calico.conf
Normal file
@ -0,0 +1,10 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{
|
||||
"name": "calico-k8s-network",
|
||||
"type": "calico",
|
||||
"etcd_authority": "{{ pool.master.host }}:6666",
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
}
|
||||
}
|
7
kubernetes/files/calico/network-environment.master
Normal file
7
kubernetes/files/calico/network-environment.master
Normal file
@ -0,0 +1,7 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
# This host's IPv4 address (the source IP address used to reach other nodes
|
||||
# in the Kubernetes cluster).
|
||||
DEFAULT_IPV4={{ master.apiserver.address }}
|
||||
|
||||
# IP and port of etcd instance used by Calico
|
||||
ETCD_AUTHORITY={{ master.apiserver.address }}:6666
|
10
kubernetes/files/calico/network-environment.pool
Normal file
10
kubernetes/files/calico/network-environment.pool
Normal file
@ -0,0 +1,10 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
# This host's IPv4 address (the source IP address used to reach other nodes
|
||||
# in the Kubernetes cluster).
|
||||
DEFAULT_IPV4={{ pool.address }}
|
||||
|
||||
# The Kubernetes master IP
|
||||
KUBERNETES_MASTER={{ pool.master.host }}
|
||||
|
||||
# IP and port of etcd instance used by Calico
|
||||
ETCD_AUTHORITY={{ pool.master.host }}:6666
|
3
kubernetes/files/flannel/default.master
Normal file
3
kubernetes/files/flannel/default.master
Normal file
@ -0,0 +1,3 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
|
||||
DAEMON_ARGS="--etcd-endpoints={% for member in master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %} --ip-masq --etcd-prefix=/kubernetes.io/network"
|
3
kubernetes/files/flannel/default.pool
Normal file
3
kubernetes/files/flannel/default.pool
Normal file
@ -0,0 +1,3 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
|
||||
DAEMON_ARGS="--etcd-endpoints={% for member in pool.master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %} --ip-masq --etcd-prefix=/kubernetes.io/network"
|
126
kubernetes/files/flannel/initd
Normal file
126
kubernetes/files/flannel/initd
Normal file
@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: flanneld
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Flannel daemon
|
||||
# Description:
|
||||
# Flannel daemon.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Flannel overlay network daemon"
|
||||
NAME=flannel
|
||||
DAEMON=/usr/local/bin/flanneld
|
||||
DAEMON_ARGS="--ip-masq"
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Avoid a potential race at boot time when both monit and init.d start
|
||||
# the same service
|
||||
PIDS=$(pidof $DAEMON)
|
||||
for PID in ${PIDS}; do
|
||||
kill -9 $PID
|
||||
done
|
||||
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
9
kubernetes/files/flannel/network.json
Normal file
9
kubernetes/files/flannel/network.json
Normal file
@ -0,0 +1,9 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{
|
||||
"Network": "{{ master.network.private_ip_range }}",
|
||||
"SubnetLen": 24,
|
||||
"Backend": {
|
||||
"Type": "vxlan",
|
||||
"VNI": 1
|
||||
}
|
||||
}
|
9
kubernetes/files/known_tokens.csv
Normal file
9
kubernetes/files/known_tokens.csv
Normal file
@ -0,0 +1,9 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{{ master.token.admin }},admin,admin
|
||||
{{ master.token.kubelet }},kubelet,kubelet
|
||||
{{ master.token.kube_proxy }},kube_proxy,kube_proxy
|
||||
{{ master.token.scheduler }},system:scheduler,system:scheduler
|
||||
{{ master.token.controller_manager }},system:controller_manager,system:controller_manager
|
||||
{{ master.token.logging }},system:logging,system:logging
|
||||
{{ master.token.monitoring }},system:monitoring,system:monitoring
|
||||
{{ master.token.dns }},system:dns,system:dns
|
Can't render this file because it contains an unexpected character in line 1 and column 10.
|
@ -0,0 +1,17 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard-address
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
deprecatedPublicIPs: ["{{ master.addons.ui.public_ip }}"]
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 9090
|
@ -0,0 +1,39 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
# Keep the name in sync with image version and
|
||||
# gce/coreos/kube-manifests/addons/dashboard counterparts
|
||||
name: kubernetes-dashboard-v1.1.0-beta2
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0-beta2
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
@ -0,0 +1,16 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: {{ master.addons.ui.public_ip }}
|
||||
|
||||
ports:
|
||||
- port: 9090
|
||||
protocol: TCP
|
@ -0,0 +1,18 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
{%- if master.network.engine != 'opencontrail' %}
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
type: NodePort
|
||||
{%- endif %}
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 9090
|
100
kubernetes/files/kube-addons/dns/skydns-rc.yaml
Normal file
100
kubernetes/files/kube-addons/dns/skydns-rc.yaml
Normal file
@ -0,0 +1,100 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v9
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v9
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ master.addons.dns.replicas }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v9
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v9
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
volumeMounts:
|
||||
- name: etcd-storage
|
||||
mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.11
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- -domain={{ master.addons.dns.domain }}
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://127.0.0.1:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -ns-rotate=false
|
||||
- -domain={{ master.addons.dns.domain }}.
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
timeoutSeconds: 5
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ master.addons.dns.domain }} localhost >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
21
kubernetes/files/kube-addons/dns/skydns-svc.yaml
Normal file
21
kubernetes/files/kube-addons/dns/skydns-svc.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ master.addons.dns.server }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
@ -0,0 +1,18 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
kubernetes.io/name: 'Heapster'
|
||||
name: heapster-address
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
deprecatedPublicIPs: ['{{ master.addons.heapster_influxdb.public_ip }}']
|
||||
type: LoadBalancer
|
@ -0,0 +1,30 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v6
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
version: v6
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
# name: heapster
|
||||
uses: monitoring-influxdb
|
||||
k8s-app: heapster
|
||||
version: v6
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: heapster
|
||||
image: kubernetes/heapster:canary
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:https://kubernetes.default
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
@ -0,0 +1,17 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Heapster"
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: {{ master.addons.heapster_influxdb.public_ip }}
|
||||
|
||||
ports:
|
||||
- port: 8082
|
||||
protocol: TCP
|
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
kubernetes.io/name: 'Heapster'
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: influxGrafana
|
||||
name: influxdb-grafana
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: influxGrafana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: influxGrafana
|
||||
spec:
|
||||
containers:
|
||||
- name: influxdb
|
||||
image: kubernetes/heapster_influxdb:v0.6
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: influxdb-storage
|
||||
volumes:
|
||||
- name: influxdb-storage
|
||||
emptyDir: {}
|
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: monitoring-influxdb
|
||||
name: monitoring-influxdb
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8083
|
||||
targetPort: 8083
|
||||
- name: api
|
||||
port: 8086
|
||||
targetPort: 8086
|
||||
selector:
|
||||
name: influxGrafana
|
120
kubernetes/files/kube-addons/initd
Normal file
120
kubernetes/files/kube-addons/initd
Normal file
@ -0,0 +1,120 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-addons
|
||||
# Required-Start: $local_fs $network $syslog kube-apiserver
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Addon Object Manager
|
||||
# Description:
|
||||
# Enforces installation of Kubernetes Addon Objects
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Addon Object Manager"
|
||||
NAME=kube-addons
|
||||
DAEMON_LOG_FILE=/var/log/${NAME}.log
|
||||
PIDFILE=/var/run/${NAME}.pid
|
||||
SCRIPTNAME=/etc/init.d/${NAME}
|
||||
KUBE_ADDONS_SH=/etc/kubernetes/kube-addons.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# use setsid to make sure the new daemon has its own group (I suppose
|
||||
# start-stop-daemon does create a process group, but let's stay on the
|
||||
# safe side).
|
||||
setsid start-stop-daemon --start --verbose --background --no-close --make-pidfile --pidfile "${PIDFILE}" --startas "${KUBE_ADDONS_SH}" </dev/null >> ${DAEMON_LOG_FILE} 2>&1
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# start-stop-daemon is not used because we have to stop all children
|
||||
# limitations:
|
||||
# - stop does not work if the pid file is missing
|
||||
# - stop does not work if the daemon process is missing (children will not
|
||||
# be killed)
|
||||
# This is sufficient - remaining processes will end after a while.
|
||||
|
||||
local pid
|
||||
pid=$(cat "${PIDFILE}" 2> /dev/null)
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
local pgrp
|
||||
# find the process group for the service and kill entire group
|
||||
# o - output format: pgpg - process group
|
||||
pgrp=$(ps --no-headers --pid "${pid}" -o pgrp 2>/dev/null)
|
||||
if [[ $? != 0 ]] || [[ "${pgrp}" == "" ]]; then
|
||||
return 1
|
||||
fi
|
||||
pgrp=$(echo -e ${pgrp}) # strip whitespaces (that's why there are no quotes around pgrp)
|
||||
# negative pid is for killing entire group
|
||||
kill -- -${pgrp} 2> /dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
return 2
|
||||
fi
|
||||
rm -f "${PIDFILE}"
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting ${DESC}" "${NAME}"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping ${DESC}" "${NAME}"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "${PIDFILE}" "${KUBE_ADDONS_SH}" "${NAME}"
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting ${DESC}" "${NAME}"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: ${SCRIPTNAME} {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
507
kubernetes/files/kube-addons/kube-addon-update.sh
Normal file
507
kubernetes/files/kube-addons/kube-addon-update.sh
Normal file
@ -0,0 +1,507 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
|
||||
# Parameters
|
||||
# $1 path to add-ons
|
||||
|
||||
|
||||
# LIMITATIONS
|
||||
# 1. controllers are not updated unless their name is changed
|
||||
# 3. Services will not be updated unless their name is changed,
|
||||
# but for services we actually want updates without name change.
|
||||
# 4. Json files are not handled at all. Currently addons must be
|
||||
# in yaml files
|
||||
# 5. exit code is probably not always correct (I haven't checked
|
||||
# carefully if it works in 100% cases)
|
||||
# 6. There are no unittests
|
||||
# 8. Will not work if the total length of paths to addons is greater than
|
||||
# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
|
||||
# 9. Performance issue: yaml files are read many times in a single execution.
|
||||
|
||||
# cosmetic improvements to be done
|
||||
# 1. improve the log function; add timestamp, file name, etc.
|
||||
# 2. logging doesn't work from files that print things out.
|
||||
# 3. kubectl prints the output to stderr (the output should be captured and then
|
||||
# logged)
|
||||
|
||||
|
||||
|
||||
# global config
|
||||
KUBECTL=${TEST_KUBECTL:-} # substitute for tests
|
||||
KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}}
|
||||
KUBECTL=${KUBECTL:-/usr/local/bin/kubectl}
|
||||
if [[ ! -x ${KUBECTL} ]]; then
|
||||
echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If an add-on definition is incorrect, or a definition has just disappeared
|
||||
# from the local directory, the script will still keep on retrying.
|
||||
# The script does not end until all retries are done, so
|
||||
# one invalid manifest may block updates of other add-ons.
|
||||
# Be careful how you set these parameters
|
||||
NUM_TRIES=1 # will be updated based on input parameters
|
||||
DELAY_AFTER_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
|
||||
|
||||
|
||||
# remember that you can't log from functions that print some output (because
|
||||
# logs are also printed on stdout)
|
||||
# $1 level
|
||||
# $2 message
|
||||
function log() {
|
||||
# manage log levels manually here
|
||||
|
||||
# add the timestamp if you find it useful
|
||||
case $1 in
|
||||
DB3 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DB2 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DBG )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
INFO )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
WRN )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
ERR )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
* )
|
||||
echo "INVALID_LOG_LEVEL $1: $2"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
#$1 yaml file path
|
||||
function get-object-kind-from-file() {
|
||||
# prints to stdout, so log cannot be used
|
||||
#WARNING: only yaml is supported
|
||||
cat $1 | python -c '''
|
||||
try:
|
||||
import pipes,sys,yaml
|
||||
y = yaml.load(sys.stdin)
|
||||
labels = y["metadata"]["labels"]
|
||||
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||
# Otherwise we are ignoring them (the update will not work anyway)
|
||||
print "ERROR"
|
||||
else:
|
||||
print y["kind"]
|
||||
except Exception, ex:
|
||||
print "ERROR"
|
||||
'''
|
||||
}
|
||||
|
||||
# $1 yaml file path
|
||||
# returns a string of the form <namespace>/<name> (we call it nsnames)
|
||||
function get-object-nsname-from-file() {
|
||||
# prints to stdout, so log cannot be used
|
||||
#WARNING: only yaml is supported
|
||||
#addons that do not specify a namespace are assumed to be in "default".
|
||||
cat $1 | python -c '''
|
||||
try:
|
||||
import pipes,sys,yaml
|
||||
y = yaml.load(sys.stdin)
|
||||
labels = y["metadata"]["labels"]
|
||||
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||
# Otherwise we are ignoring them (the update will not work anyway)
|
||||
print "ERROR"
|
||||
else:
|
||||
try:
|
||||
print "%s/%s" % (y["metadata"]["namespace"], y["metadata"]["name"])
|
||||
except Exception, ex:
|
||||
print "default/%s" % y["metadata"]["name"]
|
||||
except Exception, ex:
|
||||
print "ERROR"
|
||||
'''
|
||||
}
|
||||
|
||||
# $1 addon directory path
|
||||
# $2 addon type (e.g. ReplicationController)
|
||||
# echoes the string with paths to files containing addon for the given type
|
||||
# works only for yaml files (!) (ignores json files)
|
||||
function get-addon-paths-from-disk() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r addon_dir=$1
|
||||
local -r obj_type=$2
|
||||
local kind
|
||||
local file_path
|
||||
for file_path in $(find ${addon_dir} -name \*.yaml); do
|
||||
kind=$(get-object-kind-from-file ${file_path})
|
||||
# WARNING: assumption that the topmost indentation is zero (I'm not sure yaml allows for topmost indentation)
|
||||
if [[ "${kind}" == "${obj_type}" ]]; then
|
||||
echo ${file_path}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# waits for all subprocesses
|
||||
# returns 0 if all of them were successful and 1 otherwise
|
||||
function wait-for-jobs() {
|
||||
local rv=0
|
||||
local pid
|
||||
for pid in $(jobs -p); do
|
||||
wait ${pid}
|
||||
if [[ $? -ne 0 ]]; then
|
||||
rv=1;
|
||||
log ERR "error in pid ${pid}"
|
||||
fi
|
||||
log DB2 "pid ${pid} completed, current error code: ${rv}"
|
||||
done
|
||||
return ${rv}
|
||||
}
|
||||
|
||||
|
||||
function run-until-success() {
|
||||
local -r command=$1
|
||||
local tries=$2
|
||||
local -r delay=$3
|
||||
local -r command_name=$1
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
log DBG "executing: '$command'"
|
||||
# let's give the command as an argument to bash -c, so that we can use
|
||||
# && and || inside the command itself
|
||||
/bin/bash -c "${command}" && \
|
||||
log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \
|
||||
return 0
|
||||
let tries=tries-1
|
||||
log INFO "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay}
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# $1 object type
|
||||
# returns a list of <namespace>/<name> pairs (nsnames)
|
||||
function get-addon-nsnames-from-server() {
|
||||
local -r obj_type=$1
|
||||
"${KUBECTL}" get "${obj_type}" --all-namespaces -o go-template="{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true
|
||||
}
|
||||
|
||||
# returns the characters after the last separator (including)
|
||||
# If the separator is empty or if it doesn't appear in the string,
|
||||
# an empty string is printed
|
||||
# $1 input string
|
||||
# $2 separator (must be single character, or empty)
|
||||
function get-suffix() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r input_string=$1
|
||||
local -r separator=$2
|
||||
local suffix
|
||||
|
||||
if [[ "${separator}" == "" ]]; then
|
||||
echo ""
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${input_string}" == *"${separator}"* ]]; then
|
||||
suffix=$(echo "${input_string}" | rev | cut -d "${separator}" -f1 | rev)
|
||||
echo "${separator}${suffix}"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# returns the characters up to the last '-' (without it)
|
||||
# $1 input string
|
||||
# $2 separator
|
||||
function get-basename() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r input_string=$1
|
||||
local -r separator=$2
|
||||
local suffix
|
||||
suffix="$(get-suffix ${input_string} ${separator})"
|
||||
# this will strip the suffix (if matches)
|
||||
echo ${input_string%$suffix}
|
||||
}
|
||||
|
||||
function stop-object() {
|
||||
local -r obj_type=$1
|
||||
local -r namespace=$2
|
||||
local -r obj_name=$3
|
||||
log INFO "Stopping ${obj_type} ${namespace}/${obj_name}"
|
||||
|
||||
run-until-success "${KUBECTL} stop --namespace=${namespace} ${obj_type} ${obj_name}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
|
||||
}
|
||||
|
||||
function create-object() {
|
||||
local -r obj_type=$1
|
||||
local -r file_path=$2
|
||||
|
||||
local nsname_from_file
|
||||
nsname_from_file=$(get-object-nsname-from-file ${file_path})
|
||||
if [[ "${nsname_from_file}" == "ERROR" ]]; then
|
||||
log INFO "Cannot read object name from ${file_path}. Ignoring"
|
||||
return 1
|
||||
fi
|
||||
IFS='/' read namespace obj_name <<< "${nsname_from_file}"
|
||||
|
||||
log INFO "Creating new ${obj_type} from file ${file_path} in namespace ${namespace}, name: ${obj_name}"
|
||||
# this will keep on failing if the ${file_path} disappeared in the meantime.
|
||||
# Do not use too many retries.
|
||||
run-until-success "${KUBECTL} create --namespace=${namespace} -f ${file_path}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
|
||||
}
|
||||
|
||||
function update-object() {
|
||||
local -r obj_type=$1
|
||||
local -r namespace=$2
|
||||
local -r obj_name=$3
|
||||
local -r file_path=$4
|
||||
log INFO "updating the ${obj_type} ${namespace}/${obj_name} with the new definition ${file_path}"
|
||||
stop-object ${obj_type} ${namespace} ${obj_name}
|
||||
create-object ${obj_type} ${file_path}
|
||||
}
|
||||
|
||||
# deletes the objects from the server
|
||||
# $1 object type
|
||||
# $2 a list of object nsnames
|
||||
function stop-objects() {
|
||||
local -r obj_type=$1
|
||||
local -r obj_nsnames=$2
|
||||
local namespace
|
||||
local obj_name
|
||||
for nsname in ${obj_nsnames}; do
|
||||
IFS='/' read namespace obj_name <<< "${nsname}"
|
||||
stop-object ${obj_type} ${namespace} ${obj_name} &
|
||||
done
|
||||
}
|
||||
|
||||
# creates objects from the given files
|
||||
# $1 object type
|
||||
# $2 a list of paths to definition files
|
||||
function create-objects() {
|
||||
local -r obj_type=$1
|
||||
local -r file_paths=$2
|
||||
local file_path
|
||||
for file_path in ${file_paths}; do
|
||||
# Remember that the file may have disappear by now
|
||||
# But we don't want to check it here because
|
||||
# such race condition may always happen after
|
||||
# we check it. Let's have the race
|
||||
# condition happen a bit more often so that
|
||||
# we see that our tests pass anyway.
|
||||
create-object ${obj_type} ${file_path} &
|
||||
done
|
||||
}
|
||||
|
||||
# updates objects
|
||||
# $1 object type
|
||||
# $2 a list of update specifications
|
||||
# each update specification is a ';' separated pair: <nsname>;<file path>
|
||||
function update-objects() {
|
||||
local -r obj_type=$1 # ignored
|
||||
local -r update_spec=$2
|
||||
local objdesc
|
||||
local nsname
|
||||
local obj_name
|
||||
local namespace
|
||||
|
||||
for objdesc in ${update_spec}; do
|
||||
IFS=';' read nsname file_path <<< "${objdesc}"
|
||||
IFS='/' read namespace obj_name <<< "${nsname}"
|
||||
|
||||
update-object ${obj_type} ${namespace} ${obj_name} ${file_path} &
|
||||
done
|
||||
}
|
||||
|
||||
# Global variables set by function match-objects.
|
||||
nsnames_for_delete="" # a list of object nsnames to be deleted
|
||||
for_update="" # a list of pairs <nsname>;<filePath> for objects that should be updated
|
||||
nsnames_for_ignore="" # a list of object nsnames that will be ignored
|
||||
new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now)
|
||||
|
||||
|
||||
# $1 path to files with objects
|
||||
# $2 object type in the API (ReplicationController or Service)
|
||||
# $3 name separator (single character or empty)
|
||||
function match-objects() {
|
||||
local -r addon_dir=$1
|
||||
local -r obj_type=$2
|
||||
local -r separator=$3
|
||||
|
||||
# output variables (globals)
|
||||
nsnames_for_delete=""
|
||||
for_update=""
|
||||
nsnames_for_ignore=""
|
||||
new_files=""
|
||||
|
||||
addon_nsnames_on_server=$(get-addon-nsnames-from-server "${obj_type}")
|
||||
addon_paths_in_files=$(get-addon-paths-from-disk "${addon_dir}" "${obj_type}")
|
||||
|
||||
log DB2 "addon_nsnames_on_server=${addon_nsnames_on_server}"
|
||||
log DB2 "addon_paths_in_files=${addon_paths_in_files}"
|
||||
|
||||
local matched_files=""
|
||||
|
||||
local basensname_on_server=""
|
||||
local nsname_on_server=""
|
||||
local suffix_on_server=""
|
||||
local nsname_from_file=""
|
||||
local suffix_from_file=""
|
||||
local found=0
|
||||
local addon_path=""
|
||||
|
||||
# objects that were moved between namespaces will have different nsname
|
||||
# because the namespace is included. So they will be treated
|
||||
# like different objects and not updated but deleted and created again
|
||||
# (in the current version update is also delete+create, so it does not matter)
|
||||
for nsname_on_server in ${addon_nsnames_on_server}; do
|
||||
basensname_on_server=$(get-basename ${nsname_on_server} ${separator})
|
||||
suffix_on_server="$(get-suffix ${nsname_on_server} ${separator})"
|
||||
|
||||
log DB3 "Found existing addon ${nsname_on_server}, basename=${basensname_on_server}"
|
||||
|
||||
# check if the addon is present in the directory and decide
|
||||
# what to do with it
|
||||
# this is not optimal because we're reading the files over and over
|
||||
# again. But for small number of addons it doesn't matter so much.
|
||||
found=0
|
||||
for addon_path in ${addon_paths_in_files}; do
|
||||
nsname_from_file=$(get-object-nsname-from-file ${addon_path})
|
||||
if [[ "${nsname_from_file}" == "ERROR" ]]; then
|
||||
log INFO "Cannot read object name from ${addon_path}. Ignoring"
|
||||
continue
|
||||
else
|
||||
log DB2 "Found object name '${nsname_from_file}' in file ${addon_path}"
|
||||
fi
|
||||
suffix_from_file="$(get-suffix ${nsname_from_file} ${separator})"
|
||||
|
||||
log DB3 "matching: ${basensname_on_server}${suffix_from_file} == ${nsname_from_file}"
|
||||
if [[ "${basensname_on_server}${suffix_from_file}" == "${nsname_from_file}" ]]; then
|
||||
log DB3 "matched existing ${obj_type} ${nsname_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}"
|
||||
found=1
|
||||
matched_files="${matched_files} ${addon_path}"
|
||||
if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then
|
||||
nsnames_for_ignore="${nsnames_for_ignore} ${nsname_from_file}"
|
||||
else
|
||||
for_update="${for_update} ${nsname_on_server};${addon_path}"
|
||||
fi
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ ${found} -eq 0 ]]; then
|
||||
log DB2 "No definition file found for replication controller ${nsname_on_server}. Scheduling for deletion"
|
||||
nsnames_for_delete="${nsnames_for_delete} ${nsname_on_server}"
|
||||
fi
|
||||
done
|
||||
|
||||
log DB3 "matched_files=${matched_files}"
|
||||
|
||||
|
||||
# note that if the addon file is invalid (or got removed after listing files
|
||||
# but before we managed to match it) it will not be matched to any
|
||||
# of the existing objects. So we will treat it as a new file
|
||||
# and try to create its object.
|
||||
for addon_path in ${addon_paths_in_files}; do
|
||||
echo ${matched_files} | grep "${addon_path}" >/dev/null
|
||||
if [[ $? -ne 0 ]]; then
|
||||
new_files="${new_files} ${addon_path}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
function reconcile-objects() {
|
||||
local -r addon_path=$1
|
||||
local -r obj_type=$2
|
||||
local -r separator=$3 # name separator
|
||||
match-objects ${addon_path} ${obj_type} ${separator}
|
||||
|
||||
log DBG "${obj_type}: nsnames_for_delete=${nsnames_for_delete}"
|
||||
log DBG "${obj_type}: for_update=${for_update}"
|
||||
log DBG "${obj_type}: nsnames_for_ignore=${nsnames_for_ignore}"
|
||||
log DBG "${obj_type}: new_files=${new_files}"
|
||||
|
||||
stop-objects "${obj_type}" "${nsnames_for_delete}"
|
||||
# wait for jobs below is a protection against changing the basename
|
||||
# of a replication controllerm without changing the selector.
|
||||
# If we don't wait, the new rc may be created before the old one is deleted
|
||||
# In such case the old one will wait for all its pods to be gone, but the pods
|
||||
# are created by the new replication controller.
|
||||
# passing --cascade=false could solve the problem, but we want
|
||||
# all orphan pods to be deleted.
|
||||
wait-for-jobs
|
||||
stopResult=$?
|
||||
|
||||
create-objects "${obj_type}" "${new_files}"
|
||||
update-objects "${obj_type}" "${for_update}"
|
||||
|
||||
local nsname
|
||||
for nsname in ${nsnames_for_ignore}; do
|
||||
log DB2 "The ${obj_type} ${nsname} is already up to date"
|
||||
done
|
||||
|
||||
wait-for-jobs
|
||||
createUpdateResult=$?
|
||||
|
||||
if [[ ${stopResult} -eq 0 ]] && [[ ${createUpdateResult} -eq 0 ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function update-addons() {
|
||||
local -r addon_path=$1
|
||||
# be careful, reconcile-objects uses global variables
|
||||
reconcile-objects ${addon_path} ReplicationController "-" &
|
||||
|
||||
# We don't expect names to be versioned for the following kinds, so
|
||||
# we match the entire name, ignoring version suffix.
|
||||
# That's why we pass an empty string as the version separator.
|
||||
# If the description differs on disk, the object should be recreated.
|
||||
# This is not implemented in this version.
|
||||
reconcile-objects ${addon_path} Service "" &
|
||||
reconcile-objects ${addon_path} PersistentVolume "" &
|
||||
reconcile-objects ${addon_path} PersistentVolumeClaim "" &
|
||||
|
||||
wait-for-jobs
|
||||
if [[ $? -eq 0 ]]; then
|
||||
log INFO "== Kubernetes addon update completed successfully at $(date -Is) =="
|
||||
else
|
||||
log WRN "== Kubernetes addon update completed with errors at $(date -Is) =="
|
||||
fi
|
||||
}
|
||||
|
||||
# input parameters:
|
||||
# $1 input directory
|
||||
# $2 retry period in seconds - the script will retry api-server errors for approximately
|
||||
# this amound of time (it is not very precise), at interval equal $DELAY_AFTER_ERROR_SEC.
|
||||
#
|
||||
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "Illegal number of parameters. Usage $0 addon-dir [retry-period]" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NUM_TRIES=$(($2 / ${DELAY_AFTER_ERROR_SEC}))
|
||||
if [[ ${NUM_TRIES} -le 0 ]]; then
|
||||
NUM_TRIES=1
|
||||
fi
|
||||
|
||||
addon_path=$1
|
||||
update-addons ${addon_path}
|
9
kubernetes/files/kube-addons/kube-addons.service
Normal file
9
kubernetes/files/kube-addons/kube-addons.service
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Addon Object Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/etc/kubernetes/kube-addons.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
226
kubernetes/files/kube-addons/kube-addons.sh
Normal file
226
kubernetes/files/kube-addons/kube-addons.sh
Normal file
@ -0,0 +1,226 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
|
||||
|
||||
SYSTEM_NAMESPACE=kube-system
|
||||
token_dir=${TOKEN_DIR:-/srv/kubernetes}
|
||||
|
||||
function create-kubeconfig-secret() {
|
||||
local -r token=$1
|
||||
local -r username=$2
|
||||
local -r server=$3
|
||||
local -r safe_username=$(tr -s ':_' '--' <<< "${username}")
|
||||
|
||||
# Make a kubeconfig file with the token.
|
||||
if [[ ! -z "${CA_CERT:-}" ]]; then
|
||||
# If the CA cert is available, put it into the secret rather than using
|
||||
# insecure-skip-tls-verify.
|
||||
read -r -d '' kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: ${username}
|
||||
user:
|
||||
token: ${token}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${server}
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: ${username}
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
else
|
||||
read -r -d '' kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: ${username}
|
||||
user:
|
||||
token: ${token}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${server}
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: ${username}
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
fi
|
||||
|
||||
local -r kubeconfig_base64=$(echo "${kubeconfig}" | base64 -w0)
|
||||
read -r -d '' secretyaml <<EOF
|
||||
apiVersion: v1
|
||||
data:
|
||||
kubeconfig: ${kubeconfig_base64}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: token-${safe_username}
|
||||
type: Opaque
|
||||
EOF
|
||||
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" &
|
||||
}
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 namespace
|
||||
function start_addon() {
|
||||
local -r addon_filename=$1;
|
||||
local -r tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r namespace=$4
|
||||
|
||||
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
|
||||
}
|
||||
|
||||
# $1 string with json or yaml.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 name of this object to use when logging about it.
|
||||
# $5 namespace for this object
|
||||
function create-resource-from-string() {
|
||||
local -r config_string=$1;
|
||||
local tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r config_name=$4;
|
||||
local -r namespace=$5;
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \
|
||||
echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
|
||||
return 0;
|
||||
let tries=tries-1;
|
||||
echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay};
|
||||
done
|
||||
return 1;
|
||||
}
|
||||
|
||||
# $1 is the directory containing all of the docker images
|
||||
function load-docker-images() {
|
||||
local success
|
||||
local restart_docker
|
||||
while true; do
|
||||
success=true
|
||||
restart_docker=false
|
||||
for image in "$1/"*; do
|
||||
timeout 30 docker load -i "${image}" &>/dev/null
|
||||
rc=$?
|
||||
if [[ "$rc" == 124 ]]; then
|
||||
restart_docker=true
|
||||
elif [[ "$rc" != 0 ]]; then
|
||||
success=false
|
||||
fi
|
||||
done
|
||||
if [[ "$success" == "true" ]]; then break; fi
|
||||
if [[ "$restart_docker" == "true" ]]; then service docker restart; fi
|
||||
sleep 15
|
||||
done
|
||||
}
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
|
||||
|
||||
# Load the kube-env, which has all the environment variables we care
|
||||
# about, in a flat yaml format.
|
||||
kube_env_yaml="/var/cache/kubernetes-install/kube_env.yaml"
|
||||
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||
eval $(python -c '''
|
||||
import pipes,sys,yaml
|
||||
|
||||
for k,v in yaml.load(sys.stdin).iteritems():
|
||||
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
|
||||
''' < "${kube_env_yaml}")
|
||||
fi
|
||||
|
||||
# Load any images that we may need
|
||||
load-docker-images /srv/salt/kube-addons-images
|
||||
|
||||
# Create the namespace that will be used to host the cluster-level add-ons.
|
||||
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &
|
||||
|
||||
# Wait for the default service account to be created in the kube-system namespace.
|
||||
token_found=""
|
||||
while [ -z "${token_found}" ]; do
|
||||
sleep .5
|
||||
token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}" || true)
|
||||
done
|
||||
|
||||
echo "== default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
|
||||
|
||||
# Generate secrets for "internal service accounts".
|
||||
# TODO(etune): move to a completely yaml/object based
|
||||
# workflow so that service accounts can be created
|
||||
# at the same time as the services that use them.
|
||||
# NOTE: needs to run as root to read this file.
|
||||
# Read each line in the csv file of tokens.
|
||||
# Expect errors when the script is started again.
|
||||
# NOTE: secrets are created asynchronously, in background.
|
||||
while read line; do
|
||||
# Split each line into the token and username.
|
||||
IFS=',' read -a parts <<< "${line}"
|
||||
token=${parts[0]}
|
||||
username=${parts[1]}
|
||||
# DNS is special, since it's necessary for cluster bootstrapping.
|
||||
if [[ "${username}" == "system:dns" ]] && [[ ! -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
||||
create-kubeconfig-secret "${token}" "${username}" "https://${KUBERNETES_MASTER_NAME}"
|
||||
else
|
||||
# Set the server to https://kubernetes. Pods/components that
|
||||
# do not have DNS available will have to override the server.
|
||||
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default"
|
||||
fi
|
||||
done < "${token_dir}/known_tokens.csv"
|
||||
|
||||
# Create admission_control objects if defined before any other addon services. If the limits
|
||||
# are defined in a namespace other than default, we should still create the limits for the
|
||||
# default namespace.
|
||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||
start_addon "${obj}" 100 10 default &
|
||||
echo "++ obj ${obj} is created ++"
|
||||
done
|
||||
|
||||
# Check if the configuration has changed recently - in case the user
|
||||
# created/updated/deleted the files on the master.
|
||||
while true; do
|
||||
start_sec=$(date +"%s")
|
||||
#kube-addon-update.sh must be deployed in the same directory as this file
|
||||
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons ${ADDON_CHECK_INTERVAL_SEC}
|
||||
end_sec=$(date +"%s")
|
||||
len_sec=$((${end_sec}-${start_sec}))
|
||||
# subtract the time passed from the sleep time
|
||||
if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
|
||||
sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
|
||||
sleep ${sleep_time}
|
||||
fi
|
||||
done
|
18
kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
Normal file
18
kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-ui-address
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeUI"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-ui
|
||||
deprecatedPublicIPs: ["{{ master.addons.ui.public_ip }}"]
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
17
kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
Normal file
17
kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: kube-ui
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeUI"
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: {{ master.addons.ui.public_ip }}
|
||||
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
36
kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
Normal file
36
kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-ui-v4
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kube-ui
|
||||
version: v4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-ui
|
||||
image: gcr.io/google_containers/kube-ui:v4
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
13
kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
Normal file
13
kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-ui
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeUI"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
4
kubernetes/files/kube-addons/namespace.yml
Normal file
4
kubernetes/files/kube-addons/namespace.yml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
31
kubernetes/files/kube-gen-token.sh
Normal file
31
kubernetes/files/kube-gen-token.sh
Normal file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
|
||||
token_file="${token_dir}/known_tokens.csv"
|
||||
|
||||
create_accounts=($@)
|
||||
|
||||
touch "${token_file}"
|
||||
for account in "${create_accounts[@]}"; do
|
||||
if grep ",${account}," "${token_file}" ; then
|
||||
continue
|
||||
fi
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${token_file}"
|
||||
echo "${token}" > "${token_dir}/${account}.token"
|
||||
echo "Added ${account}"
|
||||
done
|
5
kubernetes/files/kube-proxy/default
Normal file
5
kubernetes/files/kube-proxy/default
Normal file
@ -0,0 +1,5 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="--logtostderr=true --v=2 --kubeconfig=/var/lib/kube-proxy/kubeconfig --master=https://{{ pool.master.host }}{% if pool.network.engine == 'calico' %} --proxy-mode=iptables{% endif %}"
|
||||
#--kubeconfig=/etc/kubernetes/proxy.kubeconfig"
|
130
kubernetes/files/kube-proxy/initd
Normal file
130
kubernetes/files/kube-proxy/initd
Normal file
@ -0,0 +1,130 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-proxy
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes network proxy
|
||||
# Description:
|
||||
# The Kubernetes network proxy enables network redirection and
|
||||
# loadbalancing for dynamically placed containers.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes network proxy"
|
||||
NAME=kube-proxy
|
||||
DAEMON=/usr/local/bin/kube-proxy
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Avoid a potential race at boot time when both monit and init.d start
|
||||
# the same service
|
||||
PIDS=$(pidof $DAEMON)
|
||||
for PID in ${PIDS}; do
|
||||
kill -9 $PID
|
||||
done
|
||||
|
||||
# Raise the file descriptor limit - we expect to open a lot of sockets!
|
||||
ulimit -n 65536
|
||||
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
12
kubernetes/files/kube-proxy/kube-proxy.service
Normal file
12
kubernetes/files/kube-proxy/kube-proxy.service
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kube-Proxy Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/kube-proxy
|
||||
ExecStart=/usr/local/bin/kube-proxy "$DAEMON_ARGS"
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
20
kubernetes/files/kube-proxy/proxy.kubeconfig
Normal file
20
kubernetes/files/kube-proxy/proxy.kubeconfig
Normal file
@ -0,0 +1,20 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: proxy-to-cluster.local
|
||||
preferences: {}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: cluster.local
|
||||
user: kube_proxy
|
||||
name: proxy-to-cluster.local
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /etc/ssl/certs/ca-{{ pool.ca }}.crt
|
||||
# server: https://{{ pool.master.host }}:443
|
||||
name: cluster.local
|
||||
users:
|
||||
- name: kube_proxy
|
||||
user:
|
||||
token: {{ pool.token.kube_proxy}}
|
4
kubernetes/files/kubelet/default.master
Normal file
4
kubernetes/files/kubelet/default.master
Normal file
@ -0,0 +1,4 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="--config=/etc/kubernetes/manifests --allow-privileged={{ master.kubelet.allow_privileged }} --cluster_dns={{ master.addons.dns.server }} --register-node=false --cluster_domain={{ master.addons.dns.domain }} --v=2"
|
4
kubernetes/files/kubelet/default.pool
Normal file
4
kubernetes/files/kubelet/default.pool
Normal file
@ -0,0 +1,4 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="--api-servers={% for member in pool.master.apiserver.members %}https://{{ member.host }}{% if not loop.last %},{% endif %}{% endfor %} --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --config=/etc/kubernetes/manifests --allow-privileged={{ pool.kubelet.allow_privileged }} --cluster_dns={{ pool.cluster_dns }} --cluster_domain={{ pool.cluster_domain }} --v=2 {% if pool.network.engine == 'opencontrail' %}--network-plugin={{ pool.network.engine }}{% endif %} {% if pool.network.engine == 'calico' %}--network-plugin=cni --network-plugin-dir=/etc/cni/net.d{% endif %} --file-check-frequency={{ pool.kubelet.frequency }}"
|
126
kubernetes/files/kubelet/initd
Normal file
126
kubernetes/files/kubelet/initd
Normal file
@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes node container manager
|
||||
# Description:
|
||||
# The Kubernetes container manager maintains docker state against a state file.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes container manager"
|
||||
NAME=kubelet
|
||||
DAEMON=/usr/local/bin/kubelet
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Avoid a potential race at boot time when both monit and init.d start
|
||||
# the same service
|
||||
PIDS=$(pidof $DAEMON)
|
||||
for PID in ${PIDS}; do
|
||||
kill -9 $PID
|
||||
done
|
||||
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
20
kubernetes/files/kubelet/kubelet.kubeconfig
Normal file
20
kubernetes/files/kubelet/kubelet.kubeconfig
Normal file
@ -0,0 +1,20 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubelet-to-cluster.local
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /etc/ssl/certs/ca-{{ pool.ca }}.crt
|
||||
# server: https://{{ pool.master.host }}:443
|
||||
name: cluster.local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: cluster.local
|
||||
user: kubelet
|
||||
name: kubelet-to-cluster.local
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: {{ pool.token.kubelet }}
|
11
kubernetes/files/kubelet/kubelet.service
Normal file
11
kubernetes/files/kubelet/kubelet.service
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/kubelet
|
||||
ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
39
kubernetes/files/manifest/cadvisor.manifest
Normal file
39
kubernetes/files/manifest/cadvisor.manifest
Normal file
@ -0,0 +1,39 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: cadvisor
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: cadvisor
|
||||
image: google/cadvisor:latest
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /rootfs
|
||||
readOnly: true
|
||||
- name: varrun
|
||||
mountPath: /var/run
|
||||
readOnly: false
|
||||
- name: varlibdocker
|
||||
mountPath: /var/lib/docker
|
||||
readOnly: true
|
||||
- name: sys
|
||||
mountPath: /sys
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /
|
||||
- name: varrun
|
||||
hostPath:
|
||||
path: /var/run
|
||||
- name: varlibdocker
|
||||
hostPath:
|
||||
path: /var/lib/docker
|
||||
- name: sys
|
||||
hostPath:
|
||||
path: /sys
|
31
kubernetes/files/manifest/calico-etcd.manifest
Normal file
31
kubernetes/files/manifest/calico-etcd.manifest
Normal file
@ -0,0 +1,31 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: calico-etcd
|
||||
namespace: calico-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: calico-etcd-container
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
command:
|
||||
- "/usr/local/bin/etcd"
|
||||
- "--name=calico-etcd"
|
||||
- "--data-dir=/var/etcd/calico-data"
|
||||
- "--advertise-client-urls=http://{{ master.apiserver.address }}:6666"
|
||||
- "--listen-client-urls=http://0.0.0.0:6666"
|
||||
- "--listen-peer-urls=http://0.0.0.0:6660"
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- name: clientport
|
||||
containerPort: 6666
|
||||
hostPort: 6666
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd
|
||||
name: varetcd
|
||||
volumes:
|
||||
- name: "varetcd"
|
||||
hostPath:
|
||||
path: "/mnt/master-pd/var/etcd"
|
47
kubernetes/files/manifest/contrail-vrouter-agent.manifest
Normal file
47
kubernetes/files/manifest/contrail-vrouter-agent.manifest
Normal file
@ -0,0 +1,47 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"contrail-vrouter-agent",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "vrouter-agent",
|
||||
"image": "opencontrail/vrouter-agent:2.20",
|
||||
"securityContext": {
|
||||
"Privileged": true
|
||||
},
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "250m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/usr/bin/contrail-vrouter-agent"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{"name": "contrail-configs",
|
||||
"mountPath": "/etc/contrail",
|
||||
"readOnly": false
|
||||
},
|
||||
{"name": "contrail-logs",
|
||||
"mountPath": "/var/log/contrail",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "contrail-configs",
|
||||
"hostPath": {
|
||||
"path": "/etc/contrail"}
|
||||
},
|
||||
{ "name": "contrail-logs",
|
||||
"hostPath": {
|
||||
"path": "/var/log/contrail"}
|
||||
}
|
||||
]
|
||||
}}
|
65
kubernetes/files/manifest/etcd-events.manifest
Normal file
65
kubernetes/files/manifest/etcd-events.manifest
Normal file
@ -0,0 +1,65 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"etcd-server-events",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "gcr.io/google_containers/etcd:2.2.1",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2381 --addr 127.0.0.1:4002 --bind-addr 127.0.0.1:4002 --data-dir /var/etcd/data-events 1>>/var/log/etcd-events.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 4002,
|
||||
"path": "/health"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports":[
|
||||
{ "name": "serverport",
|
||||
"containerPort": 2381,
|
||||
"hostPort": 2381
|
||||
},{
|
||||
"name": "clientport",
|
||||
"containerPort": 4002,
|
||||
"hostPort": 4002
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{"name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false
|
||||
},
|
||||
{"name": "varlogetcd",
|
||||
"mountPath": "/var/log/etcd-events.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/mnt/master-pd/var/etcd"}
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/log/etcd-events.log"}
|
||||
}
|
||||
]
|
||||
}}
|
65
kubernetes/files/manifest/etcd.manifest
Normal file
65
kubernetes/files/manifest/etcd.manifest
Normal file
@ -0,0 +1,65 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name":"etcd-server",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "gcr.io/google_containers/etcd:2.2.1",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "200m" }
|
||||
},
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/etcd --name {{ master.etcd.name }} --initial-cluster-state new --initial-advertise-peer-urls http://{{ master.etcd.host }}:2380 --listen-peer-urls http://{{ master.etcd.host }}:2380 --advertise-client-urls http://{{ master.etcd.host }}:4001 --listen-client-urls {%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001 --initial-cluster {% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %} --initial-cluster-token {{ master.etcd.token }} --data-dir /var/etcd/data 1>>/var/log/etcd.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 4001,
|
||||
"path": "/health"
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 15
|
||||
},
|
||||
"ports":[
|
||||
{ "name": "serverport",
|
||||
"containerPort": 2380,
|
||||
"hostPort": 2380
|
||||
},{
|
||||
"name": "clientport",
|
||||
"containerPort": 4001,
|
||||
"hostPort": 4001
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{"name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false
|
||||
},
|
||||
{"name": "varlogetcd",
|
||||
"mountPath": "/var/log/etcd.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/mnt/master-pd/var/etcd"}
|
||||
},
|
||||
{ "name": "varlogetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/log/etcd.log"}
|
||||
}
|
||||
]
|
||||
}}
|
78
kubernetes/files/manifest/flannel-server.manifest
Normal file
78
kubernetes/files/manifest/flannel-server.manifest
Normal file
@ -0,0 +1,78 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "flannel-server",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"app": "flannel-server",
|
||||
"version": "v0.1"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"volumes": [
|
||||
{
|
||||
"name": "varlog",
|
||||
"hostPath": {
|
||||
"path": "/var/log"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "etcdstorage",
|
||||
"emptyDir": {}
|
||||
},
|
||||
{
|
||||
"name": "networkconfig",
|
||||
"hostPath": {
|
||||
"path": "/etc/kubernetes/network.json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"containers": [
|
||||
{
|
||||
"name": "flannel-server-helper",
|
||||
"image": "gcr.io/google_containers/flannel-server-helper:0.1",
|
||||
"args": [
|
||||
"--network-config=/etc/kubernetes/network.json",
|
||||
"--etcd-prefix=/kubernetes.io/network",
|
||||
"--etcd-server=http://127.0.0.1:4001"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "networkconfig",
|
||||
"mountPath": "/etc/kubernetes/network.json"
|
||||
}
|
||||
],
|
||||
"imagePullPolicy": "Always"
|
||||
},
|
||||
{
|
||||
"name": "flannel-container",
|
||||
"image": "quay.io/coreos/flannel:0.5.5",
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/opt/bin/flanneld -listen 0.0.0.0:10253 -etcd-endpoints {% for member in master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %} -etcd-prefix /kubernetes.io/network 2>&1 | tee -a /var/log/flannel_server.log"
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"hostPort": 10253,
|
||||
"containerPort": 10253
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
},
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "varlog",
|
||||
"mountPath": "/var/log"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"hostNetwork": true
|
||||
}
|
||||
}
|
12
kubernetes/files/manifest/glusterfs-endpoints.manifest
Normal file
12
kubernetes/files/manifest/glusterfs-endpoints.manifest
Normal file
@ -0,0 +1,12 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: glusterfs-cluster
|
||||
subnets:
|
||||
{%- for member in master.storage.members %}
|
||||
addresses:
|
||||
ip: {{ member.host }}
|
||||
ports:
|
||||
port: {{ member.port }}
|
||||
{%- endfor %}
|
8
kubernetes/files/manifest/glusterfs-svc.manifest
Normal file
8
kubernetes/files/manifest/glusterfs-svc.manifest
Normal file
@ -0,0 +1,8 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: glusterfs-cluster
|
||||
spec:
|
||||
ports:
|
||||
port: {{ master.storage.port }}
|
83
kubernetes/files/manifest/kube-apiserver.manifest
Normal file
83
kubernetes/files/manifest/kube-apiserver.manifest
Normal file
@ -0,0 +1,83 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: {{ master.registry }}/kube-apiserver:{{ master.version }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-apiserver
|
||||
--address=127.0.0.1
|
||||
--etcd-servers={% for member in master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %}
|
||||
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
||||
--service-cluster-ip-range={{ master.service_addresses }}
|
||||
--client-ca-file=/etc/ssl/certs/ca-{{ master.ca }}.crt
|
||||
--basic-auth-file=/srv/kubernetes/basic_auth.csv
|
||||
--tls-cert-file=/etc/ssl/certs/kubernetes-server.crt
|
||||
--tls-private-key-file=/etc/ssl/private/kubernetes-server.key
|
||||
--secure-port=443
|
||||
--bind-address={{ master.apiserver.address }}
|
||||
--token-auth-file=/srv/kubernetes/known_tokens.csv
|
||||
--v=2
|
||||
--allow-privileged=True
|
||||
1>>/var/log/kube-apiserver.log 2>&1
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: local
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
volumeMounts:
|
||||
- mountPath: /srv/kubernetes
|
||||
name: srvkube
|
||||
readOnly: true
|
||||
- mountPath: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- mountPath: /etc/ssl
|
||||
name: etcssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
readOnly: true
|
||||
- mountPath: /srv/sshproxy
|
||||
name: srvsshproxy
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /srv/kubernetes
|
||||
name: srvkube
|
||||
- hostPath:
|
||||
path: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- hostPath:
|
||||
path: /etc/ssl
|
||||
name: etcssl
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
- hostPath:
|
||||
path: /srv/sshproxy
|
||||
name: srvsshproxy
|
64
kubernetes/files/manifest/kube-controller-manager.manifest
Normal file
64
kubernetes/files/manifest/kube-controller-manager.manifest
Normal file
@ -0,0 +1,64 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: {{ master.registry }}/kube-controller-manager:{{ master.version }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-controller-manager
|
||||
--master=127.0.0.1:8080
|
||||
--cluster-name=kubernetes
|
||||
--service-account-private-key-file=/etc/ssl/private/kubernetes-server.key
|
||||
--v=2
|
||||
--root-ca-file=/etc/ssl/certs/ca-{{ master.ca }}.crt
|
||||
--leader-elect=true
|
||||
1>>/var/log/kube-controller-manager.log 2>&1
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
requests:
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- mountPath: /srv/kubernetes
|
||||
name: srvkube
|
||||
readOnly: true
|
||||
- mountPath: /var/log/kube-controller-manager.log
|
||||
name: logfile
|
||||
- mountPath: /etc/ssl
|
||||
name: etcssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /srv/kubernetes
|
||||
name: srvkube
|
||||
- hostPath:
|
||||
path: /var/log/kube-controller-manager.log
|
||||
name: logfile
|
||||
- hostPath:
|
||||
path: /etc/ssl
|
||||
name: etcssl
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
24
kubernetes/files/manifest/kube-network-manager.manifest
Normal file
24
kubernetes/files/manifest/kube-network-manager.manifest
Normal file
@ -0,0 +1,24 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"namespace": "opencontrail",
|
||||
"name": "kube-network-manager"
|
||||
},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[{
|
||||
"name": "kube-network-manager",
|
||||
"image": "{{ master.network.network_manager.image }}:{{ master.network.network_manager.tag }}",
|
||||
"volumeMounts": [{
|
||||
"name": "config",
|
||||
"mountPath": "/etc/kubernetes"
|
||||
}]
|
||||
}],
|
||||
"volumes": [{
|
||||
"name": "config",
|
||||
"hostPath": {"path": "/etc/kubernetes"}
|
||||
}]
|
||||
}
|
||||
}
|
46
kubernetes/files/manifest/kube-proxy.manifest
Normal file
46
kubernetes/files/manifest/kube-proxy.manifest
Normal file
@ -0,0 +1,46 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: {{ pool.registry }}/kube-proxy:{{ pool.version }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- kube-proxy
|
||||
--logtostderr=true
|
||||
--v=2
|
||||
--kubeconfig=/etc/kubernetes/proxy.kubeconfig
|
||||
--master=https://{{ pool.master.host }}
|
||||
{% if pool.network.engine == 'calico' %}--proxy-mode=iptables{% endif %}
|
||||
1>>/var/log/kube-proxy.log 2>&1
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: /var/log
|
||||
name: varlog
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/kube-proxy/kubeconfig
|
||||
name: kubeconfig
|
||||
readOnly: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
||||
- hostPath:
|
||||
path: /var/lib/kube-proxy/kubeconfig
|
||||
name: kubeconfig
|
||||
- hostPath:
|
||||
path: /var/log
|
||||
name: varlog
|
42
kubernetes/files/manifest/kube-scheduler.manifest
Normal file
42
kubernetes/files/manifest/kube-scheduler.manifest
Normal file
@ -0,0 +1,42 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
nodeName: kubernetes-master
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: {{ master.registry }}/kube-scheduler:{{ master.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-scheduler
|
||||
--master=127.0.0.1:8080
|
||||
--v=2
|
||||
--leader-elect=true
|
||||
1>>/var/log/kube-scheduler.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/kube-scheduler.log
|
||||
name: logfile
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log/kube-scheduler.log
|
||||
name: logfile
|
37
kubernetes/files/nginx.conf
Normal file
37
kubernetes/files/nginx.conf
Normal file
@ -0,0 +1,37 @@
|
||||
server {
|
||||
listen 443;
|
||||
server_name localhost;
|
||||
|
||||
root html;
|
||||
index index.html index.htm;
|
||||
|
||||
ssl on;
|
||||
ssl_certificate /etc/pki/cert/{{ master.ca }}/{{ master.apiserver.address }}.crt;
|
||||
ssl_certificate_key /etc/pki/cert/{{ master.ca }}/{{ master.apiserver.address }}.key;
|
||||
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
# don't use SSLv3 because of POODLE
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location / {
|
||||
|
||||
# Proxy settings
|
||||
# disable buffering so that watch works
|
||||
proxy_buffering off;
|
||||
proxy_pass http://127.0.0.1:8080/;
|
||||
proxy_connect_timeout 159s;
|
||||
proxy_send_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# Disable retry
|
||||
proxy_next_upstream off;
|
||||
|
||||
# Support web sockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
4
kubernetes/files/opencontrail/namespace-opencontrail.yml
Normal file
4
kubernetes/files/opencontrail/namespace-opencontrail.yml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: opencontrail
|
15
kubernetes/files/opencontrail/network.conf
Normal file
15
kubernetes/files/opencontrail/network.conf
Normal file
@ -0,0 +1,15 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
[DEFAULT]
|
||||
service-cluster-ip-range = {{ master.network.service_cluster_ip_range }}
|
||||
|
||||
[opencontrail]
|
||||
default-domain = {{ master.network.default_domain }}
|
||||
public-ip-range = {{ master.network.public_ip_range }}
|
||||
cluster-service = {{ master.network.cluster_service }}
|
||||
api-server = {{ master.network.host }}
|
||||
api-port = {{ master.network.port }}
|
||||
default-project = {{ master.network.default_project }}
|
||||
public-network = {{ master.network.public_network }}
|
||||
private-ip-range = {{ master.network.private_ip_range }}
|
||||
network-label = {{ master.network.network_label }}
|
||||
service-label = {{ master.network.service_label }}
|
117
kubernetes/files/rc.yml
Normal file
117
kubernetes/files/rc.yml
Normal file
@ -0,0 +1,117 @@
|
||||
{% from "kubernetes/map.jinja" import control with context %}
|
||||
apiVersion: {{ service.apiVersion }}
|
||||
kind: {{ service.kind }}
|
||||
metadata:
|
||||
name: {{ service.service }}-{{ service.role }}
|
||||
namespace: {{ service.namespace }}
|
||||
labels:
|
||||
app: {{ service.service }}-{{ service.role }}
|
||||
spec:
|
||||
replicas: {{ service.replicas }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ service.service }}-{{ service.role }}
|
||||
spec:
|
||||
{%- if service.hostNetwork is defined %}
|
||||
hostNetwork: True
|
||||
{%- endif %}
|
||||
containers:
|
||||
{%- for container_name, container in service.container.iteritems() %}
|
||||
- name: {{ container_name }}
|
||||
image: {% if container.registry is defined %}{{ container.registry }}/{%- endif %}{{ container.image }}{%- if container.tag is defined %}:{{ container.tag }}{%- endif %}
|
||||
imagePullPolicy: IfNotPresent
|
||||
{%- if container.privileged is defined %}
|
||||
securityContext:
|
||||
privileged: True
|
||||
{%- endif %}
|
||||
{%- if container.variables is defined %}
|
||||
env:
|
||||
{%- for variable in container.variables %}
|
||||
- name: {{ variable.name }}
|
||||
{%- if variable.fieldPath is defined %}
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: {{ variable.fieldPath }}
|
||||
{%- else %}
|
||||
value: {{ variable.value }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if container.ports is defined %}
|
||||
ports:
|
||||
{%- for port in container.ports %}
|
||||
- containerPort: {{ port.port }}
|
||||
name: {{ port.name }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if container.command is defined %}
|
||||
command:
|
||||
{%- for command in container.command %}
|
||||
- {{ command }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if container.volumes is defined %}
|
||||
volumeMounts:
|
||||
{%- for volume in container.volumes %}
|
||||
- name: {{ volume.name }}
|
||||
mountPath: {{ volume.mount }}
|
||||
readOnly: {{ volume.read_only }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if container.liveness_probe is defined %}
|
||||
livenessProbe:
|
||||
{%- if container.liveness_probe.type == 'http' %}
|
||||
httpGet:
|
||||
path: {{ container.liveness_probe.path }}
|
||||
port: {{ container.liveness_probe.port }}
|
||||
{%- elif container.liveness_probe.type == 'exec' %}
|
||||
exec:
|
||||
command:
|
||||
{%- for command in container.liveness_probe.command %}
|
||||
- {{ command }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
initialDelaySeconds: {{ container.liveness_probe.initial_delay }}
|
||||
timeoutSeconds: {{ container.liveness_probe.timeout }}
|
||||
{%- endif %}
|
||||
{%- if container.readiness_probe is defined %}
|
||||
readinessProbe:
|
||||
{%- if container.readiness_probe.type == 'http' %}
|
||||
httpGet:
|
||||
path: {{ container.readiness_probe.path }}
|
||||
port: {{ container.readiness_probe.port }}
|
||||
{%- elif container.readiness_probe.type == 'exec' %}
|
||||
exec:
|
||||
command:
|
||||
{%- for command in container.liveness_probe.command %}
|
||||
- {{ command }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
initialDelaySeconds: {{ container.readiness_probe.initial_delay }}
|
||||
timeoutSeconds: {{ container.readiness_probe.timeout }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- if service.volume is defined %}
|
||||
volumes:
|
||||
{%- for volume_name, volume in service.volume.iteritems() %}
|
||||
- name: {{ volume_name }}
|
||||
{%- if volume.type == 'emptyDir' %}
|
||||
emptyDir: {}
|
||||
{%- elif volume.type == 'hostPath' %}
|
||||
hostPath:
|
||||
path: {{ volume.path }}
|
||||
{%- elif volume.type == 'glusterfs' %}
|
||||
glusterfs:
|
||||
endpoints: {{ volume.endpoints }}
|
||||
path: {{ volume.path }}
|
||||
readOnly: {{ volume.read_only }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if service.nodeSelector is defined %}
|
||||
nodeSelector:
|
||||
{%- for selector in service.nodeSelector %}
|
||||
{{ selector.key }}: {{ selector.value }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
17
kubernetes/files/svc.yml
Normal file
17
kubernetes/files/svc.yml
Normal file
@ -0,0 +1,17 @@
|
||||
{% from "kubernetes/map.jinja" import control with context %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: {{ service.service }}-{{ service.role }}
|
||||
app: {{ service.service }}-{{ service.role }}
|
||||
name: {{ service.service }}-{{ service.role }}
|
||||
spec:
|
||||
ports:
|
||||
{%- for port in service.ports %}
|
||||
- port: {{ port.port }}
|
||||
name: {{ port.name }}
|
||||
{%- endfor %}
|
||||
type: {{ service.type }}
|
||||
selector:
|
||||
app: {{ service.service }}-{{ service.role }}
|
13
kubernetes/init.sls
Normal file
13
kubernetes/init.sls
Normal file
@ -0,0 +1,13 @@
|
||||
|
||||
{%- if pillar.kubernetes is defined %}
|
||||
include:
|
||||
{%- if pillar.kubernetes.master is defined %}
|
||||
- kubernetes.master
|
||||
{%- endif %}
|
||||
{%- if pillar.kubernetes.pool is defined %}
|
||||
- kubernetes.pool
|
||||
{%- endif %}
|
||||
{%- if pillar.kubernetes.control is defined %}
|
||||
- kubernetes.control
|
||||
{%- endif %}
|
||||
{%- endif %}
|
42
kubernetes/map.jinja
Normal file
42
kubernetes/map.jinja
Normal file
@ -0,0 +1,42 @@
|
||||
{% set common = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'pkgs': ['curl', 'git', 'apt-transport-https', 'python-apt', 'nfs-common', 'socat', 'netcat-traditional', 'openssl'],
|
||||
'services': [],
|
||||
},
|
||||
'RedHat': {
|
||||
'pkgs': ['curl', 'git', 'apt-transport-https', 'python-apt', 'nfs-common', 'socat', 'netcat-traditional', 'python'],
|
||||
'services': [],
|
||||
},
|
||||
}, merge=salt['pillar.get']('kubernetes:common')) %}
|
||||
|
||||
{% set master = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'pkgs': [],
|
||||
'services': [],
|
||||
},
|
||||
'RedHat': {
|
||||
'pkgs': [],
|
||||
'services': [],
|
||||
},
|
||||
}, merge=salt['pillar.get']('kubernetes:master')) %}
|
||||
|
||||
{% set pool = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'pkgs': [],
|
||||
'services': [],
|
||||
},
|
||||
'RedHat': {
|
||||
'pkgs': [],
|
||||
'services': [],
|
||||
},
|
||||
}, merge=salt['pillar.get']('kubernetes:pool')) %}
|
||||
|
||||
{% set control = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'service': {},
|
||||
},
|
||||
'RedHat': {
|
||||
'service': {},
|
||||
},
|
||||
}, merge=salt['pillar.get']('kubernetes:control')) %}
|
||||
|
38
kubernetes/master/api.sls
Normal file
38
kubernetes/master/api.sls
Normal file
@ -0,0 +1,38 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/srv/kubernetes/known_tokens.csv:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/known_tokens.csv
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
/srv/kubernetes/basic_auth.csv:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/basic_auth.csv
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
/var/log/kube-apiserver.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/kubernetes/manifests/kube-apiserver.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/kube-apiserver.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
{%- endif %}
|
52
kubernetes/master/calico.sls
Normal file
52
kubernetes/master/calico.sls
Normal file
@ -0,0 +1,52 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
/etc/kubernetes/manifests/calico-etcd.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/calico-etcd.manifest
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- template: jinja
|
||||
|
||||
/usr/bin/calicoctl:
|
||||
file.managed:
|
||||
- source: https://github.com/projectcalico/calico-containers/releases/download/{{ master.network.version }}/calicoctl
|
||||
- source_hash: md5={{ master.network.hash }}
|
||||
- mode: 751
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/etc/calico/network-environment:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/calico/network-environment.master
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- template: jinja
|
||||
|
||||
/etc/systemd/calico-node.service:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/calico/calico-node.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
{% endif %}
|
||||
|
||||
calico_node:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/bin/calicoctl
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: /etc/systemd/calico-node.service
|
||||
{% else %}
|
||||
- file: /etc/init/docker-calico-node.conf
|
||||
{% endif %}
|
||||
|
||||
{%- endif %}
|
20
kubernetes/master/controller-manager.sls
Normal file
20
kubernetes/master/controller-manager.sls
Normal file
@ -0,0 +1,20 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/etc/kubernetes/manifests/kube-controller-manager.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/kube-controller-manager.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/kube-controller-manager.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{%- endif %}
|
46
kubernetes/master/etcd.sls
Normal file
46
kubernetes/master/etcd.sls
Normal file
@ -0,0 +1,46 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/var/log/etcd-events.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/var/log/etcd.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/var/etcd:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 700
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
|
||||
/etc/kubernetes/manifests/etcd.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/etcd.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/etc/kubernetes/manifests/etcd-events.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/etcd-events.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
{%- endif %}
|
76
kubernetes/master/flannel.sls
Normal file
76
kubernetes/master/flannel.sls
Normal file
@ -0,0 +1,76 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
flannel-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /usr/local/src
|
||||
- source: https://storage.googleapis.com/kubernetes-release/flannel/flannel-0.5.5-linux-amd64.tar.gz
|
||||
- tar_options: v
|
||||
- source_hash: md5=972c717254775bef528f040af804f2cc
|
||||
- archive_format: tar
|
||||
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
|
||||
|
||||
flannel-symlink:
|
||||
file.symlink:
|
||||
- name: /usr/local/bin/flanneld
|
||||
- target: /usr/local/src/flannel-0.5.5/flanneld
|
||||
- force: true
|
||||
- watch:
|
||||
- archive: flannel-tar
|
||||
|
||||
/var/log/etcd_flannel.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
/var/log/flannel.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
/etc/kubernetes/network.json:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/flannel/network.json
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
/etc/kubernetes/manifests/flannel-server.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/flannel-server.manifest
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- template: jinja
|
||||
|
||||
/etc/default/flannel:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/flannel/default.master
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/init.d/flannel:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/flannel/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
flannel:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/flanneld
|
||||
- file: /etc/init.d/flannel
|
||||
- file: /etc/default/flannel
|
||||
|
||||
{%- endif %}
|
22
kubernetes/master/glusterfs.sls
Normal file
22
kubernetes/master/glusterfs.sls
Normal file
@ -0,0 +1,22 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/etc/kubernetes/glusterfs/glusterfs-endpoints.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/glusterfs-endpoints.manifest
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
|
||||
/etc/kubernetes/glusterfs/glusterfs-svc.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/glusterfs-svc.manifest
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
|
||||
{%- endif %}
|
21
kubernetes/master/init.sls
Normal file
21
kubernetes/master/init.sls
Normal file
@ -0,0 +1,21 @@
|
||||
|
||||
include:
|
||||
- kubernetes.master.service
|
||||
- kubernetes.master.etcd
|
||||
- kubernetes.master.api
|
||||
- kubernetes.master.controller-manager
|
||||
- kubernetes.master.scheduler
|
||||
- kubernetes.master.kube-addons
|
||||
{%- if master.network.engine == "opencontrail" %}
|
||||
- kubernetes.master.opencontrail-network-manager
|
||||
{%- endif %}
|
||||
{%- if master.network.engine == "flannel" %}
|
||||
- kubernetes.master.flannel
|
||||
{%- endif %}
|
||||
{%- if master.network.engine == "calico" %}
|
||||
- kubernetes.master.calico
|
||||
{%- endif %}
|
||||
{%- if master.storage.get('engine', 'none') == 'glusterfs' %}
|
||||
- kubernetes.master.glusterfs
|
||||
{%- endif %}
|
||||
- kubernetes.master.kubelet
|
179
kubernetes/master/kube-addons.sls
Normal file
179
kubernetes/master/kube-addons.sls
Normal file
@ -0,0 +1,179 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
addon-dir-create:
|
||||
file.directory:
|
||||
- name: /etc/kubernetes/addons
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0755
|
||||
|
||||
/etc/kubernetes/addons/namespace.yml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/namespace.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
|
||||
{%- if master.addons.dns.enabled %}
|
||||
|
||||
/etc/kubernetes/addons/dns/skydns-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dns/skydns-svc.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/dns/skydns-rc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dns/skydns-rc.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
{%- if master.addons.ui.enabled %}
|
||||
|
||||
{%- if master.version == "v1.1.1" %}
|
||||
|
||||
/etc/kubernetes/addons/kube-ui/kube-ui-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/kube-ui/kube-ui-rc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/kube-ui/kube-ui-address.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/kube-ui/kube-ui-endpoint.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
/etc/kubernetes/addons/dashboard/dashboard-service.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dashboard/dashboard-service.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/dashboard/dashboard-controller.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
{%- if master.network.engine == "opencontrail" %}
|
||||
|
||||
/etc/kubernetes/addons/dashboard/dashboard-address.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dashboard/dashboard-address.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/dashboard/dashboard-endpoint.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/dashboard/dashboard-endpoint.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{%- if master.addons.heapster_influxdb.enabled %}
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/heapster-address.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/heapster-address.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/heapster-controller.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/heapster-controller.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/heapster-endpoint.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/heapster-endpoint.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/heapster-service.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/heapster-service.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/influxdb-controller.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/influxdb-controller.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/heapster-influxdb/influxdb-service.yaml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/heapster-influxdb/influxdb-service.yaml
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
/etc/kubernetes/kube-addons.sh:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-addons.sh
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
/etc/kubernetes/kube-addon-update.sh:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-addons/kube-addon-update.sh
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
67
kubernetes/master/kubelet.sls
Normal file
67
kubernetes/master/kubelet.sls
Normal file
@ -0,0 +1,67 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/kubelet' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kubelet' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/default.master
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/kubelet:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kubelet.service:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/kubelet.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
fix-service-kubelet:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kubelet
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/kubeconfig
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kubelet:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
{% else %}
|
||||
- file: /etc/init.d/kubelet
|
||||
{% endif %}
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- file: /usr/lib/systemd/system/kubelet.service
|
||||
{% endif %}
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{%- endif %}
|
30
kubernetes/master/opencontrail-network-manager.sls
Normal file
30
kubernetes/master/opencontrail-network-manager.sls
Normal file
@ -0,0 +1,30 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/etc/kubernetes/manifests/kube-network-manager.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/kube-network-manager.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/etc/kubernetes/network.conf:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/opencontrail/network.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
/etc/kubernetes/namespace-opencontrail.yml:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/opencontrail/namespace-opencontrail.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
|
||||
{%- endif %}
|
20
kubernetes/master/scheduler.sls
Normal file
20
kubernetes/master/scheduler.sls
Normal file
@ -0,0 +1,20 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
/etc/kubernetes/manifests/kube-scheduler.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/kube-scheduler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/kube-scheduler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{%- endif %}
|
33
kubernetes/master/service.sls
Normal file
33
kubernetes/master/service.sls
Normal file
@ -0,0 +1,33 @@
|
||||
{%- from "kubernetes/map.jinja" import master with context %}
|
||||
{%- from "kubernetes/map.jinja" import common with context %}
|
||||
{%- from "linux/map.jinja" import system with context %}
|
||||
{%- if master.enabled %}
|
||||
|
||||
include:
|
||||
- kubernetes._common
|
||||
|
||||
kubernetes_master_binaries:
|
||||
cmd.run:
|
||||
- names:
|
||||
- "cp /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/kubectl /usr/bin/"
|
||||
- "cp /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/kubelet /usr/local/bin/"
|
||||
- "cp /root/etcd-v2.2.1-linux-amd64/etcdctl /usr/bin/"
|
||||
- unless: test -f /usr/local/bin/kubelet && test -f /usr/bin/kubectl && test -f /usr/bin/etcdctl
|
||||
- require:
|
||||
- cmd: kubernetes_binaries
|
||||
|
||||
kubernetes_master_cert_group:
|
||||
group.present:
|
||||
- name: kube-cert
|
||||
- system: True
|
||||
|
||||
kubernetes_master_cert_dir:
|
||||
file.directory:
|
||||
- name: /srv/kubernetes/
|
||||
- mode: 750
|
||||
- group: kube-cert
|
||||
- makedirs: True
|
||||
- requires:
|
||||
- group: kubernetes_master_cert_group
|
||||
|
||||
{%- endif %}
|
15
kubernetes/meta/sphinx.yml
Normal file
15
kubernetes/meta/sphinx.yml
Normal file
@ -0,0 +1,15 @@
|
||||
doc:
|
||||
name: Kubernetes
|
||||
description: Manage a cluster of Linux containers as a single system to accelerate Dev and simplify Ops.
|
||||
role:
|
||||
{%- if pillar.kubernetes.pool is defined %}
|
||||
{%- from "kubernetes/map.jinja" import client with context %}
|
||||
pool:
|
||||
name: pool
|
||||
param: {}
|
||||
{%- endif %}
|
||||
{%- if pillar.kubernetes.master is defined %}
|
||||
master:
|
||||
name: master
|
||||
param: {}
|
||||
{%- endif %}
|
71
kubernetes/pool/calico.sls
Normal file
71
kubernetes/pool/calico.sls
Normal file
@ -0,0 +1,71 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{%- if pool.enabled %}
|
||||
|
||||
/usr/bin/calicoctl:
|
||||
file.managed:
|
||||
- source: https://github.com/projectcalico/calico-containers/releases/download/{{ pool.network.version }}/calicoctl
|
||||
- source_hash: md5={{ pool.network.hash }}
|
||||
- mode: 751
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
/opt/cni/bin/calico:
|
||||
file.managed:
|
||||
- source: https://github.com/projectcalico/calico-cni/releases/download/v1.0.0/calico
|
||||
- source_hash: md5=c829450f7e9d7abe81b3a8b37fc787a4
|
||||
- mode: 751
|
||||
- makedirs: true
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
/opt/cni/bin/calico-ipam:
|
||||
file.managed:
|
||||
- source: https://github.com/projectcalico/calico-cni/releases/download/v1.0.0/calico-ipam
|
||||
- source_hash: md5=a40d4db5b3acbb6dc93330b84d25d936
|
||||
- mode: 751
|
||||
- makedirs: true
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
/etc/cni/net.d/10-calico.conf:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/calico/calico.conf
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- template: jinja
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/etc/calico/network-environment:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/calico/network-environment.pool
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- template: jinja
|
||||
|
||||
/etc/systemd/calico-node.service:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/calico/calico-node.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
{% endif %}
|
||||
|
||||
calico_node:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/bin/calicoctl
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: /etc/systemd/calico-node.service
|
||||
{% else %}
|
||||
- file: /etc/init/docker-calico-node.conf
|
||||
{% endif %}
|
||||
|
||||
{%- endif %}
|
46
kubernetes/pool/flannel.sls
Normal file
46
kubernetes/pool/flannel.sls
Normal file
@ -0,0 +1,46 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{%- if pool.enabled %}
|
||||
|
||||
flannel-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /usr/local/src
|
||||
- source: https://storage.googleapis.com/kubernetes-release/flannel/flannel-0.5.5-linux-amd64.tar.gz
|
||||
- tar_options: v
|
||||
- source_hash: md5=972c717254775bef528f040af804f2cc
|
||||
- archive_format: tar
|
||||
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
|
||||
|
||||
flannel-symlink:
|
||||
file.symlink:
|
||||
- name: /usr/local/bin/flanneld
|
||||
- target: /usr/local/src/flannel-0.5.5/flanneld
|
||||
- force: true
|
||||
- watch:
|
||||
- archive: flannel-tar
|
||||
|
||||
/etc/default/flannel:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/flannel/default.pool
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/init.d/flannel:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/flannel/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
flannel:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/flanneld
|
||||
- file: /etc/init.d/flannel
|
||||
- file: /etc/default/flannel
|
||||
|
||||
{%- endif %}
|
11
kubernetes/pool/init.sls
Normal file
11
kubernetes/pool/init.sls
Normal file
@ -0,0 +1,11 @@
|
||||
|
||||
include:
|
||||
- kubernetes.pool.service
|
||||
{%- if pool.network.engine == "calico" %}
|
||||
- kubernetes.pool.calico
|
||||
{%- endif %}
|
||||
- kubernetes.pool.kubelet
|
||||
{%- if pool.network.engine == "flannel" %}
|
||||
- kubernetes.pool.flannel
|
||||
{%- endif %}
|
||||
- kubernetes.pool.kube-proxy
|
92
kubernetes/pool/kube-proxy.sls
Normal file
92
kubernetes/pool/kube-proxy.sls
Normal file
@ -0,0 +1,92 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{%- if pool.enabled %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/kube-proxy' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kube-proxy' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-proxy/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
kube-proxy:
|
||||
group.present:
|
||||
- system: True
|
||||
user.present:
|
||||
- system: True
|
||||
- gid_from_name: True
|
||||
- shell: /sbin/nologin
|
||||
- home: /var/kube-proxy
|
||||
- require:
|
||||
- group: kube-proxy
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kube-proxy.service:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-proxy/kube-proxy.service
|
||||
- user: root
|
||||
- group: root
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kube-proxy
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-proxy.service
|
||||
- file: /var/lib/kube-proxy/kubeconfig
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-proxy:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-proxy/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kube-proxy-service:
|
||||
service.running:
|
||||
- name: kube-proxy
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-proxy.service
|
||||
{% else %}
|
||||
- file: /etc/init.d/kube-proxy
|
||||
{% endif %}
|
||||
- file: /etc/kubernetes/proxy.kubeconfig
|
||||
|
||||
/var/lib/kube-proxy/kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kube-proxy/proxy.kubeconfig
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
/etc/kubernetes/manifests/kube-proxy.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/kube-proxy.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/kube-controller-manager.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{% endif %}
|
86
kubernetes/pool/kubelet.sls
Normal file
86
kubernetes/pool/kubelet.sls
Normal file
@ -0,0 +1,86 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{%- if pool.enabled %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/kubelet' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kubelet' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/default.pool
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/kubelet:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
/etc/kubernetes/kubelet.kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/kubelet.kubeconfig
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kubelet.service:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/kubelet.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
fix-service-kubelet:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kubelet
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/kubeconfig
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/kubelet/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
/etc/kubernetes/manifests/cadvisor.manifest:
|
||||
file.managed:
|
||||
- source: salt://kubernetes/files/manifest/cadvisor.manifest
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- template: jinja
|
||||
|
||||
kubelet:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
{% else %}
|
||||
- file: /etc/init.d/kubelet
|
||||
{% endif %}
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- file: /usr/lib/systemd/system/kubelet.service
|
||||
{% endif %}
|
||||
- file: {{ environment_file }}
|
||||
- file: /etc/kubernetes/kubelet.kubeconfig
|
||||
{%- endif %}
|
18
kubernetes/pool/service.sls
Normal file
18
kubernetes/pool/service.sls
Normal file
@ -0,0 +1,18 @@
|
||||
{%- from "kubernetes/map.jinja" import pool with context %}
|
||||
{%- from "kubernetes/map.jinja" import common with context %}
|
||||
{%- if pool.enabled %}
|
||||
|
||||
include:
|
||||
- kubernetes._common
|
||||
|
||||
kubernetes_pool_binaries:
|
||||
cmd.run:
|
||||
- names:
|
||||
- "cp /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/kube-proxy /usr/local/bin/"
|
||||
- "cp /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/kubectl /usr/bin/"
|
||||
- "cp /root/apt.tcpcloud.eu/kubernetes/bin/{{ common.binaries_version }}/kubelet /usr/local/bin/"
|
||||
- unless: test -f /usr/local/bin/kubelet && test -f /usr/local/bin/kube-proxy && test -f /usr/bin/kubectl
|
||||
- require:
|
||||
- cmd: kubernetes_binaries
|
||||
|
||||
{%- endif %}
|
6
metadata/service/common.yml
Normal file
6
metadata/service/common.yml
Normal file
@ -0,0 +1,6 @@
|
||||
parameters:
|
||||
kubernetes:
|
||||
common:
|
||||
binaries_version: v1.1.1
|
||||
network:
|
||||
engine: none
|
6
metadata/service/control/cluster.yml
Normal file
6
metadata/service/control/cluster.yml
Normal file
@ -0,0 +1,6 @@
|
||||
applications:
|
||||
- kubernetes
|
||||
parameters:
|
||||
kubernetes:
|
||||
control:
|
||||
enabled: true
|
65
metadata/service/master/cluster.yml
Normal file
65
metadata/service/master/cluster.yml
Normal file
@ -0,0 +1,65 @@
|
||||
applications:
|
||||
- kubernetes
|
||||
classes:
|
||||
- service.kubernetes.support
|
||||
- system.nginx.server.single
|
||||
- service.kubernetes.common
|
||||
parameters:
|
||||
nginx:
|
||||
server:
|
||||
site:
|
||||
kubernetes_master:
|
||||
enabled: true
|
||||
type: kubernetes
|
||||
name: master
|
||||
host:
|
||||
name: ${_param:nginx_kubernetes_master_host}
|
||||
kubernetes:
|
||||
master:
|
||||
enabled: true
|
||||
version: v1.2.0
|
||||
registry: tcpcloud
|
||||
service_addresses: 10.254.0.0/16
|
||||
admin:
|
||||
username: admin
|
||||
password: password
|
||||
kubelet:
|
||||
allow_privileged: True
|
||||
apiserver:
|
||||
address: ${_param:apiserver_address}
|
||||
port: 8080
|
||||
etcd:
|
||||
host: 10.10.6.187
|
||||
token: ca939ec9c2a17b0786f6d411fe019e9b
|
||||
name: ${linux:system:name}
|
||||
members:
|
||||
- host: ${_param:cluster_node01_address}
|
||||
name: ${_param:cluster_node01_hostname}
|
||||
- host: ${_param:cluster_node02_address}
|
||||
name: ${_param:cluster_node02_hostname}
|
||||
- host: ${_param:cluster_node03_address}
|
||||
name: ${_param:cluster_node03_hostname}
|
||||
addons:
|
||||
dns:
|
||||
enabled: true
|
||||
replicas: 1
|
||||
domain: cluster.local
|
||||
server: 10.254.0.10
|
||||
ui:
|
||||
enabled: true
|
||||
public_ip: 185.22.97.131
|
||||
heapster_influxdb:
|
||||
enabled: true
|
||||
public_ip: 185.22.97.132
|
||||
token:
|
||||
admin: DFvQ8GJ9JD4fKNfuyEddw3rjnFTkUKsv
|
||||
kubelet: 7bN5hJ9JD4fKjnFTkUKsvVNfuyEddw3r
|
||||
kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
|
||||
scheduler: HY1UUxEPpmjW4a1dDLGIANYQp1nZkLDk
|
||||
controller_manager: EreGh6AnWf8DxH8cYavB2zS029PUi7vx
|
||||
logging: MJkXKdbgqRmTHSa2ykTaOaMykgO6KcEf
|
||||
monitoring: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
|
||||
dns: RAFeVSE4UvsCz4gk3KYReuOI5jsZ1Xt3
|
||||
ca: kubernetes
|
||||
storage:
|
||||
engine: none
|
61
metadata/service/master/single.yml
Normal file
61
metadata/service/master/single.yml
Normal file
@ -0,0 +1,61 @@
|
||||
applications:
|
||||
- kubernetes
|
||||
classes:
|
||||
- service.kubernetes.support
|
||||
- system.nginx.server.single
|
||||
- service.kubernetes.common
|
||||
parameters:
|
||||
nginx:
|
||||
server:
|
||||
site:
|
||||
kubernetes_master:
|
||||
enabled: true
|
||||
type: kubernetes
|
||||
name: master
|
||||
host:
|
||||
name: ${_param:nginx_kubernetes_master_host}
|
||||
kubernetes:
|
||||
master:
|
||||
enabled: true
|
||||
version: v1.2.0
|
||||
registry: tcpcloud
|
||||
service_addresses: 10.254.0.0/16
|
||||
admin:
|
||||
username: admin
|
||||
password: password
|
||||
kubelet:
|
||||
allow_privileged: True
|
||||
apiserver:
|
||||
address: ${_param:apiserver_address}
|
||||
port: 8080
|
||||
etcd:
|
||||
host: 127.0.0.1
|
||||
token: ca939ec9c2a17b0786f6d411fe019e9b
|
||||
name: ${linux:system:name}
|
||||
members:
|
||||
- host: ${_param:apiserver_address}
|
||||
name: ${linux:system:name}
|
||||
addons:
|
||||
dns:
|
||||
enabled: true
|
||||
replicas: 1
|
||||
domain: cluster.local
|
||||
server: 10.254.0.10
|
||||
ui:
|
||||
enabled: true
|
||||
public_ip: 185.22.97.131
|
||||
heapster_influxdb:
|
||||
enabled: true
|
||||
public_ip: 185.22.97.132
|
||||
token:
|
||||
admin: DFvQ8GJ9JD4fKNfuyEddw3rjnFTkUKsv
|
||||
kubelet: 7bN5hJ9JD4fKjnFTkUKsvVNfuyEddw3r
|
||||
kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
|
||||
scheduler: HY1UUxEPpmjW4a1dDLGIANYQp1nZkLDk
|
||||
controller_manager: EreGh6AnWf8DxH8cYavB2zS029PUi7vx
|
||||
logging: MJkXKdbgqRmTHSa2ykTaOaMykgO6KcEf
|
||||
monitoring: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
|
||||
dns: RAFeVSE4UvsCz4gk3KYReuOI5jsZ1Xt3
|
||||
ca: kubernetes
|
||||
storage:
|
||||
engine: none
|
33
metadata/service/pool/cluster.yml
Normal file
33
metadata/service/pool/cluster.yml
Normal file
@ -0,0 +1,33 @@
|
||||
applications:
|
||||
- kubernetes
|
||||
classes:
|
||||
- service.kubernetes.support
|
||||
- service.kubernetes.common
|
||||
parameters:
|
||||
kubernetes:
|
||||
pool:
|
||||
enabled: true
|
||||
version: v1.2.0
|
||||
master:
|
||||
host: ${_param:cluster_vip_address}
|
||||
apiserver:
|
||||
members:
|
||||
- host: ${_param:cluster_node01_address}
|
||||
- host: ${_param:cluster_node02_address}
|
||||
- host: ${_param:cluster_node03_address}
|
||||
etcd:
|
||||
members:
|
||||
- host: ${_param:cluster_node01_address}
|
||||
- host: ${_param:cluster_node02_address}
|
||||
- host: ${_param:cluster_node03_address}
|
||||
address: 0.0.0.0
|
||||
cluster_dns: 10.254.0.10
|
||||
cluster_domain: cluster.local
|
||||
kubelet:
|
||||
config: /etc/kubernetes/manifests
|
||||
allow_privileged: True
|
||||
frequency: 5s
|
||||
token:
|
||||
kubelet: 7bN5hJ9JD4fKjnFTkUKsvVNfuyEddw3r
|
||||
kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
|
||||
ca: kubernetes
|
30
metadata/service/pool/single.yml
Normal file
30
metadata/service/pool/single.yml
Normal file
@ -0,0 +1,30 @@
|
||||
applications:
|
||||
- kubernetes
|
||||
classes:
|
||||
- service.kubernetes.support
|
||||
- service.kubernetes.common
|
||||
parameters:
|
||||
kubernetes:
|
||||
pool:
|
||||
enabled: true
|
||||
version: v1.2.0
|
||||
master:
|
||||
host: ${_param:master_address}
|
||||
apiserver:
|
||||
members:
|
||||
- host: ${_param:master_address}
|
||||
etcd:
|
||||
members:
|
||||
- host: ${_param:master_address}
|
||||
address: 0.0.0.0
|
||||
cluster_dns: 10.254.0.10
|
||||
allow_privileged: True
|
||||
cluster_domain: cluster.local
|
||||
kubelet:
|
||||
config: /etc/kubernetes/manifests
|
||||
allow_privileged: True
|
||||
frequency: 5s
|
||||
token:
|
||||
kubelet: 7bN5hJ9JD4fKjnFTkUKsvVNfuyEddw3r
|
||||
kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
|
||||
ca: kubernetes
|
11
metadata/service/support.yml
Normal file
11
metadata/service/support.yml
Normal file
@ -0,0 +1,11 @@
|
||||
parameters:
|
||||
kubernetes:
|
||||
_support:
|
||||
collectd:
|
||||
enabled: false
|
||||
heka:
|
||||
enabled: false
|
||||
sensu:
|
||||
enabled: false
|
||||
sphinx:
|
||||
enabled: true
|
Loading…
Reference in New Issue
Block a user