update sources of k8s and calico installation

move basic k8s setup to common
copy cni from hyperkube
configurable calico node image
use calico/cni image for obtaining cnis
use calico/ctl image for obtaining calicoctl binary
add cross requirement for k8s services and hyperkube
update metadata for new pillar model
update manifests to use hyperkube from common

Change-Id: I6e5e84cf252399120141ad941ac7aa6dae7c16ac
This commit is contained in:
Tomáš Kukrál 2017-01-18 14:30:09 +01:00
parent 32190a432f
commit 189da4b92b
23 changed files with 219 additions and 130 deletions

View File

@ -1,6 +1,18 @@
kubernetes formula
==================
2016.1.1 (2017-01-18)
- move basic k8s setup to common
- copy cni from hyperkube
- configurable calico node image
- use calico/cni image for obtaining cnis
- use calico/ctl image for obtaining calicoctl binary
- add cross requirement for k8s services and hyperkube
- update metadata for new pillar model
- update manifests to use hyperkube from common
2016.8.3 (2016-08-12)
- remove obsolete kube-addons scripts

View File

@ -16,6 +16,23 @@ Extended on Contrail contribution https://github.com/Juniper/kubernetes/blob/ope
Sample pillars
==============
**REQUIRED:** Define image to use for hyperkube, CNIs and calicoctl image
.. code-block:: yaml
parameters:
kubernetes:
common:
hyperkube:
image: gcr.io/google_containers/hyperkube:v1.4.6
pool:
network:
calicoctl:
image: calico/ctl
cni:
image: calico/cni
Containers on pool definitions in pool.service.local
.. code-block:: yaml

View File

@ -1 +1 @@
2016.8.3
2017.1.1

View File

@ -16,4 +16,85 @@ flannel-tar:
- source_hash: md5=972c717254775bef528f040af804f2cc
- archive_format: tar
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
{%- endif %}
{%- endif %}
{%- if common.hyperkube %}
/root/.bashrc:
file.append:
- text: alias kubectl="hyperkube kubectl"
- makedirs: True
/tmp/hyperkube:
file.directory:
- user: root
- group: root
hyperkube-copy:
dockerng.running:
- image: {{ common.hyperkube.image }}
- command: cp -v /hyperkube /tmp/hyperkube
- binds:
- /tmp/hyperkube/:/tmp/hyperkube/
- force: True
- require:
- file: /tmp/hyperkube
/usr/bin/hyperkube:
file.managed:
- source: /tmp/hyperkube/hyperkube
- mode: 751
- makedirs: true
- user: root
- group: root
- require:
- dockerng: hyperkube-copy
/etc/systemd/system/kubelet.service:
file.managed:
- source: salt://kubernetes/files/systemd/kubelet.service
- template: jinja
- user: root
- group: root
- mode: 644
/etc/kubernetes/config:
file.absent
/etc/kubernetes/manifests:
file.directory:
- user: root
- group: root
- mode: 0751
{%- if not pillar.kubernetes.pool is defined %}
/etc/default/kubelet:
file.managed:
- source: salt://kubernetes/files/kubelet/default.master
- template: jinja
- user: root
- group: root
- mode: 644
{%- else %}
/etc/default/kubelet:
file.managed:
- source: salt://kubernetes/files/kubelet/default.pool
- template: jinja
- user: root
- group: root
- mode: 644
{%- endif %}
kubelet_service:
service.running:
- name: kubelet
- enable: True
- watch:
- file: /etc/default/kubelet
- file: /usr/bin/hyperkube
{% endif %}

View File

@ -35,7 +35,7 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
-v {{ volume }} \
{%- endfor %}
{%- endif %}
{{ pool.network.get('image', 'calico/node') }}:{{ pool.network.get('image', 'latest') }}
{{ pool.network.get('image', 'calico/node') }}
Restart=always
RestartSec=10s
@ -43,4 +43,4 @@ RestartSec=10s
ExecStop=-/usr/bin/docker stop calico-node
[Install]
WantedBy=multi-user.target
WantedBy=multi-user.target

View File

@ -1,4 +1,5 @@
{%- from "kubernetes/map.jinja" import master with context %}
{%- from "kubernetes/map.jinja" import common with context %}
apiVersion: v1
kind: Pod
metadata:
@ -11,7 +12,7 @@ spec:
terminationGracePeriodSeconds: 30
containers:
- name: kube-apiserver
image: {{ master.registry.host }}/hyperkube:{{ master.version }}
image: {{ common.hyperkube.image }}
command:
- /hyperkube
- apiserver

View File

@ -1,4 +1,5 @@
{%- from "kubernetes/map.jinja" import master with context %}
{%- from "kubernetes/map.jinja" import common with context %}
apiVersion: v1
kind: Pod
metadata:
@ -11,7 +12,7 @@ spec:
terminationGracePeriodSeconds: 30
containers:
- name: kube-controller-manager
image: {{ master.registry.host }}/hyperkube:{{ master.version }}
image: {{ common.hyperkube.image }}
command:
- /hyperkube
- controller-manager

View File

@ -1,4 +1,5 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{%- from "kubernetes/map.jinja" import common with context %}
apiVersion: v1
kind: Pod
metadata:
@ -8,7 +9,7 @@ spec:
hostNetwork: true
containers:
- name: kube-proxy
image: {{ pool.registry.host }}/hyperkube:{{ pool.version }}
image: {{ common.hyperkube.image }}
resources:
requests:
cpu: 200m
@ -48,4 +49,4 @@ spec:
name: varlog
- hostPath:
path: /var/run/dbus/system_bus_socket
name: dbus
name: dbus

View File

@ -1,4 +1,5 @@
{%- from "kubernetes/map.jinja" import master with context %}
{%- from "kubernetes/map.jinja" import common with context %}
apiVersion: v1
kind: Pod
metadata:
@ -12,7 +13,7 @@ spec:
terminationGracePeriodSeconds: 30
containers:
- name: kube-scheduler
image: {{ master.registry.host }}/hyperkube:{{ master.version }}
image: {{ common.hyperkube.image }}
imagePullPolicy: IfNotPresent
command:
- hyperkube
@ -38,4 +39,4 @@ spec:
volumes:
- hostPath:
path: /var/log/kube-scheduler.log
name: logfile
name: logfile

View File

@ -124,47 +124,10 @@ master_services:
- file: /etc/default/kube-apiserver
- file: /etc/default/kube-scheduler
- file: /etc/default/kube-controller-manager
- file: /usr/bin/hyperkube
{%- endif %}
{%- if not pillar.kubernetes.pool is defined %}
/usr/bin/hyperkube:
file.managed:
- source: {{ master.hyperkube.get('source', 'http://apt.tcpcloud.eu/kubernetes/bin/') }}{{ master.version }}/hyperkube
- source_hash: md5={{ master.hyperkube.hash }}
- mode: 751
- makedirs: true
- user: root
- group: root
/etc/systemd/system/kubelet.service:
file.managed:
- source: salt://kubernetes/files/systemd/kubelet.service
- template: jinja
- user: root
- group: root
- mode: 644
/etc/kubernetes/config:
file.absent
/etc/default/kubelet:
file.managed:
- source: salt://kubernetes/files/kubelet/default.master
- template: jinja
- user: root
- group: root
- mode: 644
kubelet_service:
service.running:
- name: kubelet
- enable: True
- watch:
- file: /etc/default/kubelet
{%- endif %}
{%- for name,namespace in master.namespace.iteritems() %}
@ -204,4 +167,4 @@ kubelet_service:
{%- endif %}
{%- endif %}
{%- endif %}

View File

@ -7,9 +7,9 @@
kubernetes_addons_{{ addon_name }}:
cmd.run:
- name: |
hyperkube kubectl create -f /etc/kubernetes/addons/{{ addon_name }}
hyperkube kubectl apply -f /etc/kubernetes/addons/{{ addon_name }}
- unless: "hyperkube kubectl get rc {{ addon.get('name', addon_name) }} --namespace=kube-system"
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endif %}

View File

@ -1,31 +1,52 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{%- if pool.enabled %}
/tmp/calico/:
file.directory:
- user: root
- group: root
copy-calico-ctl:
dockerng.running:
- image: {{ pool.network.calicoctl.image }}
copy-calico-ctl-cmd:
cmd.run:
- name: docker cp copy-calico-ctl:calicoctl /tmp/calico/
- require:
- dockerng: copy-calico-ctl
/usr/bin/calicoctl:
file.managed:
- source: {{ pool.network.get('source', 'https://github.com/projectcalico/calico-containers/releases/download/') }}{{ pool.network.version }}/calicoctl
- source_hash: md5={{ pool.network.hash }}
- source: /tmp/calico/calicoctl
- mode: 751
- user: root
- group: root
- require:
- cmd: copy-calico-ctl-cmd
/opt/cni/bin/calico:
copy-calico-cni:
dockerng.running:
- image: {{ pool.network.cni.image }}
- command: cp -vr /opt/cni/bin/ /tmp/calico/
- binds:
- /tmp/calico/:/tmp/calico/
- force: True
{%- for filename in ['calico', 'calico-ipam'] %}
/opt/cni/bin/{{ filename }}:
file.managed:
- source: {{ pool.network.cni.get('source', 'https://github.com/projectcalico/calico-cni/releases/download/') }}{{ pool.network.cni.version }}/calico
- source_hash: md5={{ pool.network.cni.hash }}
- mode: 751
- makedirs: true
- user: root
- group: root
/opt/cni/bin/calico-ipam:
file.managed:
- source: {{ pool.network.ipam.get('source', 'https://github.com/projectcalico/calico-cni/releases/download/') }}{{ pool.network.ipam.version }}/calico-ipam
- source_hash: md5={{ pool.network.ipam.hash }}
- source: /tmp/calico/bin/{{ filename }}
- mode: 751
- makedirs: true
- user: root
- group: root
- require:
- dockerng: copy-calico-cni
- require_in:
- service: calico_node
{%- endfor %}
/etc/cni/net.d/10-calico.conf:
file.managed:
@ -64,4 +85,4 @@ calico_node:
- file: /etc/systemd/system/calico-node.service
{%- endif %}
{%- endif %}
{%- endif %}

View File

@ -1,14 +1,39 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{%- from "kubernetes/map.jinja" import common with context %}
{%- if pool.enabled %}
cni-tar:
archive:
- extracted
- name: /opt/cni/bin
- source: https://github.com/containernetworking/cni/releases/download/{{ pool.cni.version }}/cni-{{ pool.cni.version }}.tgz
- tar_options: v
- source_hash: md5={{ pool.cni.hash }}
- archive_format: tar
- if_missing: /opt/cni/bin/loopback
{%- if common.hyperkube %}
/tmp/cni/:
file.directory:
- user: root
- group: root
copy-network-cni:
dockerng.running:
- image: {{ common.hyperkube.image }}
- command: cp -vr /opt/cni/bin/ /tmp/cni/
- binds:
- /tmp/cni/:/tmp/cni/
- force: True
- require:
- file: /tmp/cni/
{%- for filename in ['cnitool', 'flannel', 'tuning', 'bridge', 'ipvlan', 'loopback', 'macvlan', 'ptp', 'dhcp', 'host-local', 'noop'] %}
/opt/cni/bin/{{ filename }}:
file.managed:
- source: /tmp/cni/bin/{{ filename }}
- user: root
- group: root
- mode: 755
- makedirs: True
- watch_in:
- service: kubelet_service
- require:
- dockerng: copy-network-cni
{%- endfor %}
{%- endif %}
{%- endif %}

View File

@ -1,12 +1,12 @@
{%- from "kubernetes/map.jinja" import pool with context %}
include:
- kubernetes.pool.cni
- kubernetes.pool.service
{%- if pool.network.engine == "calico" %}
- kubernetes.pool.calico
{%- endif %}
- kubernetes.pool.cni
- kubernetes.pool.kubelet
{%- if pool.network.engine == "flannel" %}
- kubernetes.pool.flannel
{%- endif %}
- kubernetes.pool.kube-proxy
- kubernetes.pool.kube-proxy

View File

@ -45,6 +45,7 @@ pool_services:
- enable: True
- watch:
- file: /etc/default/kube-proxy
- file: /usr/bin/hyperkube
{%- endif %}

View File

@ -1,14 +1,6 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{%- if pool.enabled %}
/etc/default/kubelet:
file.managed:
- source: salt://kubernetes/files/kubelet/default.pool
- template: jinja
- user: root
- group: root
- mode: 644
/etc/kubernetes/kubelet.kubeconfig:
file.managed:
- source: salt://kubernetes/files/kubelet/kubelet.kubeconfig
@ -18,9 +10,6 @@
- mode: 644
- makedirs: true
/etc/kubernetes/config:
file.absent
manifest-dir-create:
file.directory:
- name: /etc/kubernetes/manifests
@ -55,28 +44,4 @@ manifest-dir-create:
{%- endif %}
/usr/bin/hyperkube:
file.managed:
- source: {{ pool.hyperkube.get('source', {}).get('url', 'http://apt.tcpcloud.eu/kubernetes/bin/') }}{{ pool.version }}/hyperkube
- source_hash: md5={{ pool.hyperkube.hash }}
- mode: 751
- makedirs: true
- user: root
- group: root
/etc/systemd/system/kubelet.service:
file.managed:
- source: salt://kubernetes/files/systemd/kubelet.service
- template: jinja
- user: root
- group: root
- mode: 644
kubelet_service:
service.running:
- name: kubelet
- enable: True
- watch:
- file: /etc/default/kubelet
{%- endif %}

View File

@ -2,4 +2,4 @@ parameters:
kubernetes:
common:
network:
engine: none
engine: none

View File

@ -7,7 +7,6 @@ parameters:
kubernetes:
master:
enabled: true
version: ${_param:kubernetes_version}
registry:
host: tcpcloud
service_addresses: 10.254.0.0/16
@ -33,6 +32,10 @@ parameters:
replicas: 1
domain: cluster.local
server: 10.254.0.10
dashboard:
enabled: True
heapster_influxdb:
enabled: False
token:
admin: ${_param:kubernetes_admin_token}
kubelet: ${_param:kubernetes_kubelet_token}
@ -54,4 +57,4 @@ parameters:
- host: ${_param:cluster_node02_address}
port: 4001
- host: ${_param:cluster_node03_address}
port: 4001
port: 4001

View File

@ -7,7 +7,6 @@ parameters:
kubernetes:
master:
enabled: true
version: ${_param:kubernetes_version}
registry:
host: tcpcloud
service_addresses: 10.254.0.0/16
@ -29,6 +28,10 @@ parameters:
replicas: 1
domain: cluster.local
server: 10.254.0.10
dashboard:
enabled: True
heapster_influxdb:
enabled: False
token:
admin: ${_param:kubernetes_admin_token}
kubelet: ${_param:kubernetes_kubelet_token}
@ -46,4 +49,4 @@ parameters:
etcd:
members:
- host: ${_param:single_address}
port: 4001
port: 4001

View File

@ -7,7 +7,6 @@ parameters:
kubernetes:
pool:
enabled: true
version: ${_param:kubernetes_version}
registry:
host: tcpcloud
host:
@ -42,6 +41,3 @@ parameters:
port: 4001
- host: ${_param:cluster_node03_address}
port: 4001
cni:
version: v0.3.0
hash: 58237532e1b2b1be1fb3d12457da85f5

View File

@ -7,7 +7,6 @@ parameters:
kubernetes:
pool:
enabled: true
version: ${_param:kubernetes_version}
registry:
host: tcpcloud
host:
@ -35,6 +34,3 @@ parameters:
members:
- host: ${_param:master_address}
port: 4001
cni:
version: v0.3.0
hash: 58237532e1b2b1be1fb3d12457da85f5

View File

@ -2,6 +2,8 @@ kubernetes:
common:
network:
engine: none
hyperkube:
image: hyperkube-amd64:v1.5.0-beta.3-1
master:
addons:
dns:

View File

@ -2,6 +2,8 @@ kubernetes:
common:
network:
engine: none
hyperkube:
image: hyperkube-amd64:v1.5.0-beta.3-1
pool:
enabled: true
version: v1.2.0
@ -27,9 +29,10 @@ kubernetes:
kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
ca: kubernetes
network:
calicoctl:
image: calico/ctl
cni:
hash: 06550617ec199e89a57c94c88c891422
version: v1.3.1
image: calico/cni
engine: calico
hash: c15ae251b633109e63bf128c2fbbc34a
ipam:
@ -46,6 +49,3 @@ kubernetes:
port: 4001
hyperkube:
hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
cni:
version: v0.3.0
hash: 58237532e1b2b1be1fb3d12457da85f5