Browse Source

Kubernetes workload

This workload accomplishes the following:

 1. Provision 3 nodes or the number of nodes configured by stack_size
 2. Create security group
 3. Add security rules to allow ping, ssh, and kubernetes ports
 4. Install common software onto each node such as docker
 5. Download all the required software onto the master node
 6. Setup the master node with kube-apiserver, kube-controller-manager
    and kube-scheduler and configur each kubernetes services on the
    master node
 7. Download software for worker node from the master node.
 8. Setup flanneld, docker, kubelet and kube-proxy on each work node.
 9. Install kubernetes dashboard and dns services.
 10. Install cockroachdb.

Change-Id: I3f7ec234aaa72dd6b2542e53e7eae2673ef7b408
Tong Li 2 years ago
parent
commit
2c89e57bc5
31 changed files with 1683 additions and 0 deletions
  1. 8
    0
      workloads/ansible/shade/k8s/.gitignore
  2. 230
    0
      workloads/ansible/shade/k8s/README.md
  3. 3
    0
      workloads/ansible/shade/k8s/ansible.cfg
  4. 7
    0
      workloads/ansible/shade/k8s/group_vars/all.yml
  5. 1
    0
      workloads/ansible/shade/k8s/hosts
  6. 66
    0
      workloads/ansible/shade/k8s/roles/common/tasks/main.yml
  7. 1
    0
      workloads/ansible/shade/k8s/roles/common/templates/k8s.conf.j2
  8. 13
    0
      workloads/ansible/shade/k8s/roles/common/templates/k8s.service.j2
  9. 134
    0
      workloads/ansible/shade/k8s/roles/master/tasks/main.yml
  10. 15
    0
      workloads/ansible/shade/k8s/roles/master/templates/etcd.coreos.j2
  11. 15
    0
      workloads/ansible/shade/k8s/roles/master/templates/etcd.ubuntu.j2
  12. 30
    0
      workloads/ansible/shade/k8s/roles/post/tasks/apply.yml
  13. 1
    0
      workloads/ansible/shade/k8s/roles/post/tasks/destroy.yml
  14. 146
    0
      workloads/ansible/shade/k8s/roles/post/templates/cockroachdb.j2
  15. 80
    0
      workloads/ansible/shade/k8s/roles/post/templates/dashboard.j2
  16. 151
    0
      workloads/ansible/shade/k8s/roles/post/templates/dnscontroller.j2
  17. 87
    0
      workloads/ansible/shade/k8s/roles/postprovision/tasks/apply.yml
  18. 20
    0
      workloads/ansible/shade/k8s/roles/postprovision/tasks/destroy.yml
  19. 92
    0
      workloads/ansible/shade/k8s/roles/prepare/tasks/apply.yml
  20. 34
    0
      workloads/ansible/shade/k8s/roles/prepare/tasks/destroy.yml
  21. 72
    0
      workloads/ansible/shade/k8s/roles/provision/tasks/apply.yml
  22. 27
    0
      workloads/ansible/shade/k8s/roles/provision/tasks/destroy.yml
  23. 2
    0
      workloads/ansible/shade/k8s/roles/provision/templates/coreos.j2
  24. 6
    0
      workloads/ansible/shade/k8s/roles/provision/templates/ubuntu.j2
  25. 150
    0
      workloads/ansible/shade/k8s/roles/worker/tasks/main.yml
  26. 27
    0
      workloads/ansible/shade/k8s/roles/worker/templates/docker.coreos.j2
  27. 25
    0
      workloads/ansible/shade/k8s/roles/worker/templates/docker.ubuntu.j2
  28. 15
    0
      workloads/ansible/shade/k8s/roles/worker/templates/kubeconfig.j2
  29. 131
    0
      workloads/ansible/shade/k8s/site.yml
  30. 47
    0
      workloads/ansible/shade/k8s/vars/coreos.yml
  31. 47
    0
      workloads/ansible/shade/k8s/vars/ubuntu.yml

+ 8
- 0
workloads/ansible/shade/k8s/.gitignore View File

@@ -0,0 +1,8 @@
1
+*.out
2
+vars/*
3
+run/*
4
+site.retry
5
+*/**/*.log
6
+*/**/.DS_Store
7
+*/**/._
8
+*/**/*.tfstate*

+ 230
- 0
workloads/ansible/shade/k8s/README.md View File

@@ -0,0 +1,230 @@
1
+# Kubernetes Ansible deployments on OpenStack Cloud
2
+
3
+This ansible playbook will install a 3 node kubernetes cluster. The first
4
+node will be used as the master node, rest of the nodes will be used as
5
+kubernetes worker node.
6
+
7
+Once the script finishes, a kubernetes cluster should be ready for use.
8
+
9
+## Status
10
+
11
+In process
12
+
13
+## Requirements
14
+
15
+- [Install Ansible](http://docs.ansible.com/ansible/intro_installation.html)
16
+- [Install openstack shade] (http://docs.openstack.org/infra/shade/installation.html)
17
+- Make sure there is an Ubuntu cloud image available on your cloud.
18
+- Clone this project into a directory.
19
+
20
+
21
+If you will be using an Ubuntu system as Ansible controller, then you can
22
+easily setup an environment by running the following script. If you have
23
+other system as your Ansible controller, you can do similar steps to setup
24
+the environment, the command may not be exact the same but the steps you
25
+need to do should be identical.
26
+
27
+    sudo apt-get update
28
+
29
+    sudo apt-get install python-dev python-pip libssl-dev libffi-dev -y
30
+    sudo pip install --upgrade pip
31
+
32
+    sudo pip install six==1.10.0
33
+    sudo pip install shade==1.16.0
34
+    sudo pip install ansible==2.2.1.0
35
+    sudo ansible-galaxy install vmware.coreos-bootstrap
36
+
37
+    git clone https://github.com/openstack/interop-workloads.git
38
+
39
+This workload requires that you use Ansible version 2.2.0.0 or above due to
40
+floating IP allocation upgrades in Ansible OpenStack cloud modules.
41
+
42
+### Prep
43
+
44
+#### Deal with ssh keys for Openstack Authentication
45
+
46
+If you do not have a ssh key, then you should create one by using a tool.
47
+An example command to do that is provided below. Once you have a key pair,
48
+ensure your local ssh-agent is running and your ssh key has been added.
49
+This step is required. Not doing this, you will have to manually give
50
+passphrase when script runs, and script can fail. If you really do not want
51
+to deal with passphrase, you can create a key pair without passphrase::
52
+
53
+    ssh-keygen -t rsa -f ~/.ssh/interop
54
+    eval $(ssh-agent -s)
55
+    ssh-add ~/.ssh/interop
56
+
57
+#### General Openstack Settings
58
+
59
+Ansible's OpenStack cloud module is used to provision compute resources
60
+against an OpenStack cloud. Before you run the script, the cloud environment
61
+will have to be specified. Sample files have been provided in vars directory.
62
+If you target ubuntu, you should use vars/ubuntu.yml as the sample, if you
63
+target coreos, you should use vars/coreos.yml file as the sample to create
64
+your own environment file. Here is an example of the file::
65
+
66
+    auth: {
67
+      auth_url: "http://x.x.x.x:5000/v3",
68
+      username: "demo",
69
+      password: "{{ password }}",
70
+      domain_name: "default",
71
+      project_name: "demo"
72
+    }
73
+
74
+    app_env: {
75
+      target_os: "ubuntu",
76
+      image_name: "ubuntu-16.04",
77
+      region_name: "RegionOne",
78
+      availability_zone: "nova",
79
+      validate_certs: True,
80
+      private_net_name: "my_tenant_net",
81
+      flavor_name: "m1.medium",
82
+      public_key_file: "/home/ubuntu/.ssh/interop.pub",
83
+      private_key_file: "/home/ubuntu/.ssh/interop"
84
+      stack_size: 3,
85
+      volume_size: 2,
86
+      block_device_name: "/dev/vdb",
87
+
88
+      domain: "cluster.local",
89
+      pod_network: {
90
+        Network: "172.17.0.0/16",
91
+        SubnetLen: 24,
92
+        SubnetMin: "172.17.0.0",
93
+        SubnetMax: "172.17.255.0",
94
+        Backend: {
95
+          Type: "udp",
96
+          Port: 8285
97
+        }
98
+      },
99
+      service_ip_range: "172.16.0.0/24",
100
+      dns_service_ip: "172.16.0.4",
101
+
102
+      flannel_repo: "https://github.com/coreos/flannel/releases/download/v0.7.0/flannel-v0.7.0-linux-amd64.tar.gz",
103
+      k8s_repo: "https://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/"
104
+    }
105
+
106
+The values of the auth section should be provided by your cloud provider. When
107
+use keystone 2.0 API, you will not need to setup domain name. You can leave
108
+region_name empty if you have just one region. You can also leave
109
+private_net_name empty if your cloud does not support tenant network or you
110
+only have one tenant network. The private_net_name is only needed when you
111
+have multiple tenant networks. validate_certs should be normally set to True
112
+when your cloud uses tls(ssl) and your cloud is not using self signed
113
+certificate. If your cloud is using self signed certificate, then the
114
+certificate can not be easily validated by ansible. You can skip it by setting
115
+the parameter to False. currently the only value available for target_os is
116
+ubuntu and coreos. Supported ubuntu releases are 16.04 and 16.10. Supported
117
+coreos is the stable coreos openstack image.
118
+
119
+You should use a network for your OpenStack VMs which will be able to access
120
+internet. For example, in the example above, the parameter private_net_name
121
+was configured as my_tenant_net, this will be a network that all your VMs
122
+will be connected on and the network should have been connected with a router
123
+which routes traffic to external network.
124
+
125
+stack_size is set to 3 in the example configuration file, you can change that
126
+to any number you wish, but it must be 2 at minumum. In that case, you will
127
+have one master node and one worker node for k8s cluster. If you set stack_size
128
+to a bigger number, one node will be used as the master, and the rest of the
129
+nodes will be used as worker. Please note that master node will also act as
130
+worker node.
131
+
132
+public key and private key files should be created before you run the workload
133
+these keys can be located in any directory that you prefer with read access.
134
+
135
+volume_size and block_device_name are the parameter that you can set to allow
136
+the workload script to provision the right size of cinder volume to create
137
+k8s volumes. Each cinder volume will be created, partitioned, formated, and
138
+mounted on each worker and master node. The mount point is /storage. A pod or
139
+service should use hostPath to use the volume.
140
+
141
+The workload is currently developed using flannel udp for k8s networking.
142
+Other networking configurations can be used by simply changing the value of
143
+flannel_backend parameter, but before you change the values, you will have to
144
+make sure that the underlying networking is configured correctly.
145
+
146
+The flanned_repo and k8s_repo point to the offical repositories of each
147
+component, you may choose to setup a local repository to avoid long
148
+download time especially when your cloud is very remote to these offical
149
+repository. To do that, you only need to setup a http server and place the
150
+following binaries in your http server directory.
151
+
152
+    - kubelet
153
+    - kubectl
154
+    - kube-proxy
155
+    - kube-apiserver
156
+    - kube-controller-manager
157
+    - kube-scheduler
158
+    - flannel-v0.7.0-linux-amd64.tar.gz
159
+
160
+## Run the script to create a kubernetes cluster using coreos image
161
+
162
+Coreos images does not have python installed and it needs to be bootstraped.
163
+To do that, you will have to install a bootstrap on your ansible controller
164
+first by executing the following command, this only needs to be done once. We
165
+simply use vmware coreos bootstrap, you can choose other ones, but this is
166
+the one we have been using for testings.
167
+
168
+    ansible-galaxy install vmware.coreos-bootstrap
169
+
170
+With your cloud environment set, you should be able to run the script::
171
+
172
+    ansible-playbook -e "action=apply env=coreos password=XXXXX" site.yml
173
+
174
+The above command will stand up a kubernetes cluster at the environment
175
+defined in vars/coreos.yml file. Replace xxxxx with your own password.
176
+
177
+## Run the script to create a kubernetes cluster using ubuntu image
178
+
179
+With your cloud environment set, you should be able to run the script::
180
+
181
+    ansible-playbook -e "action=apply env=ubuntu password=XXXXX" site.yml
182
+
183
+The above command will stand up a kubernetes cluster at the environment
184
+defined in vars/ubuntu.yml file. Replace xxxxx with your own password.
185
+
186
+
187
+## The results of the work load successful run
188
+
189
+If everything goes well, it will accomplish the following::
190
+
191
+    1. Provision 3 nodes or the number of nodes configured by stack_size
192
+    2. Create security group
193
+    3. Add security rules to allow ping, ssh, and kubernetes ports
194
+    4. Install common software onto each node such as docker
195
+    5. Download all the required software onto the master node
196
+    6. Setup the master node with kube-apiserver, kube-controller-manager and
197
+       kube-scheduler and configur each kubernetes services on the master node
198
+    7. Download software for worker node from the master node.
199
+    8. Setup flanneld, docker, kubelet and kube-proxy on each work node.
200
+    9. Install kubernetes dashboard and dns services.
201
+
202
+
203
+## The method to run just a play, not the entire playbook
204
+
205
+The script will create an ansible inventory file name runhosts at the very
206
+first play, the inventory file will be place at the run directory of the
207
+playbook root. If you like to only run specify plays, you will be able to run
208
+the playbook like the following:
209
+
210
+    ansible-playbook -i run/runhosts -e "action=apply env=leap password=XXXXX" site.yml
211
+    --tags "common,master"
212
+
213
+The above command will use the runhosts inventory file and only run plays
214
+named common and master, all other plays in the play book will be skipped.
215
+
216
+
217
+## Next Steps
218
+
219
+### Check its up
220
+
221
+If there are no errors, you can use kubectl to work with your kubernetes
222
+cluster.
223
+
224
+## Cleanup
225
+
226
+Once you're done with it, don't forget to nuke the whole thing::
227
+
228
+    ansible-playbook -e "action=destroy env=leap password=XXXXX" site.yml
229
+
230
+The above command will destroy all the resources created.

+ 3
- 0
workloads/ansible/shade/k8s/ansible.cfg View File

@@ -0,0 +1,3 @@
1
+[defaults]
2
+inventory = ./hosts
3
+host_key_checking = False

+ 7
- 0
workloads/ansible/shade/k8s/group_vars/all.yml View File

@@ -0,0 +1,7 @@
1
+---
2
+k8suser: "k8suser"
3
+k8spass: "{{ lookup('password',
4
+             '/tmp/k8spassword chars=ascii_letters,digits length=8') }}"
5
+
6
+proxy_env: {
7
+}

+ 1
- 0
workloads/ansible/shade/k8s/hosts View File

@@ -0,0 +1 @@
1
+cloud ansible_host=127.0.0.1 ansible_python_interpreter=python

+ 66
- 0
workloads/ansible/shade/k8s/roles/common/tasks/main.yml View File

@@ -0,0 +1,66 @@
1
+---
2
+- name: Setup couple variables
3
+  set_fact:
4
+    service_path: "/etc/systemd/system/"
5
+  when: app_env.target_os == "coreos"
6
+
7
+- name: Setup couple variables
8
+  set_fact:
9
+    service_path: "/lib/systemd/system/"
10
+  when: app_env.target_os == "ubuntu"
11
+
12
+- name: Install Docker Engine
13
+  apt:
14
+    name: docker.io
15
+    update_cache: no
16
+  when: app_env.target_os == "ubuntu"
17
+
18
+- name: Ensure config directories are present
19
+  file:
20
+    path: "{{ item }}"
21
+    state: directory
22
+    mode: 0755
23
+    owner: root
24
+  with_items:
25
+    - "/etc/kubernetes"
26
+    - "/opt"
27
+    - "/opt/bin"
28
+    - "~/.ssh"
29
+    - "~/.kube"
30
+
31
+- name: Place the certificate in the right place
32
+  copy:
33
+    src: "{{ item.src }}"
34
+    dest: "{{ item.target }}"
35
+    mode: 0400
36
+  with_items:
37
+    - { src: "{{ app_env.public_key_file }}", target: "~/.ssh/id_rsa.pub" }
38
+    - { src: "{{ app_env.private_key_file }}", target: "~/.ssh/id_rsa" }
39
+
40
+- name: List all k8s service on the node
41
+  stat:
42
+    path: "{{ service_path }}{{ item }}.service"
43
+  with_items:
44
+    - kubelet
45
+    - kube-proxy
46
+    - kube-controller-manager
47
+    - kube-schedule
48
+    - kube-apiserver
49
+    - docker
50
+    - flanneld
51
+  register: k8s_services
52
+
53
+- name: Stop k8s related services if they exist
54
+  service:
55
+    name: "{{ item.item }}"
56
+    state: stopped
57
+  with_items: "{{ k8s_services.results }}"
58
+  when: item.stat.exists == true
59
+  no_log: True
60
+
61
+- name: Setup /etc/hosts on every node
62
+  lineinfile:
63
+    dest: /etc/hosts
64
+    line: "{{ item }}"
65
+    state: present
66
+  with_lines: cat "{{ playbook_dir }}/run/k8shosts"

+ 1
- 0
workloads/ansible/shade/k8s/roles/common/templates/k8s.conf.j2 View File

@@ -0,0 +1 @@
1
+DAEMON_ARGS="{{ item.value }}"

+ 13
- 0
workloads/ansible/shade/k8s/roles/common/templates/k8s.service.j2 View File

@@ -0,0 +1,13 @@
1
+[Unit]
2
+Description=Kubernetes on OpenStack {{ item }} Service
3
+
4
+[Service]
5
+EnvironmentFile=/etc/kubernetes/{{ item }}
6
+ExecStart=/opt/bin/{{ item }} "$DAEMON_ARGS"
7
+Restart=always
8
+RestartSec=2s
9
+StartLimitInterval=0
10
+KillMode=process
11
+
12
+[Install]
13
+WantedBy=multi-user.target

+ 134
- 0
workloads/ansible/shade/k8s/roles/master/tasks/main.yml View File

@@ -0,0 +1,134 @@
1
+---
2
+- name: Setup public and privte IP variables
3
+  set_fact:
4
+    public_ip: "{{ ansible_host }}"
5
+    private_ip: "{{ hostvars[ansible_host].inter_ip }}"
6
+
7
+- name: Setup service path variables for coreos
8
+  set_fact:
9
+    service_path: "/etc/systemd/system/"
10
+  when: app_env.target_os == "coreos"
11
+
12
+- name: Setup service path variables for ubuntu
13
+  set_fact:
14
+    service_path: "/lib/systemd/system/"
15
+  when: app_env.target_os == "ubuntu"
16
+
17
+- name: Install etcd
18
+  apt:
19
+    name: etcd
20
+    update_cache: no
21
+  when: app_env.target_os == "ubuntu"
22
+
23
+- name: Download flannel package
24
+  get_url:
25
+    url: "{{ app_env.flannel_repo }}"
26
+    dest: /opt/bin/flanneld.tar.gz
27
+    force: no
28
+
29
+- name: Unpack flannel binaries
30
+  unarchive:
31
+    src: /opt/bin/flanneld.tar.gz
32
+    dest: /opt/bin
33
+    exclude:
34
+      - README.me
35
+      - mk-docker-opts.sh
36
+    copy: no
37
+
38
+- name: List all k8s binaries on the node
39
+  stat: "path=/opt/bin/{{ item }}"
40
+  with_items:
41
+    - kubelet
42
+    - kubectl
43
+    - kube-proxy
44
+    - kube-apiserver
45
+    - kube-controller-manager
46
+    - kube-scheduler
47
+  register: k8s_binaries
48
+
49
+- name: Download k8s binary files if they are not already on the master node
50
+  get_url:
51
+    url: "{{ app_env.k8s_repo }}{{ item.item }}"
52
+    dest: "/opt/bin/{{ item.item }}"
53
+    mode: "0555"
54
+  with_items: "{{ k8s_binaries.results }}"
55
+  when: item.stat.exists == false
56
+  no_log: True
57
+
58
+- name: Config services
59
+  template:
60
+    src: "roles/master/templates/etcd.{{ app_env.target_os }}.j2"
61
+    dest: "{{ service_path }}etcd.service"
62
+    mode: 0644
63
+
64
+- name: Reload services
65
+  command: systemctl daemon-reload
66
+
67
+- name: Enable and start etcd services
68
+  service:
69
+    name: "etcd"
70
+    enabled: yes
71
+    state: restarted
72
+
73
+- name: Reset etcd
74
+  uri:
75
+    url: "http://{{ private_ip }}:2379/v2/keys/{{ item }}?recursive=true"
76
+    method: DELETE
77
+    status_code: 200,202,204,404
78
+  with_items:
79
+    - coreos.com
80
+    - registry
81
+
82
+- name: Initialize the flanneld configuration in etcd
83
+  uri:
84
+    url: http://{{ private_ip }}:2379/v2/keys/coreos.com/network/config
85
+    method: PUT
86
+    body: >-
87
+      value={{ app_env.pod_network | to_nice_json(indent=2) }}
88
+    status_code: 200,201
89
+
90
+- name: Setup service parameters
91
+  set_fact:
92
+    apiserver_params: >-
93
+      --etcd-servers=http://{{ private_ip }}:2379
94
+      --service-cluster-ip-range={{ app_env.service_ip_range }}
95
+      --advertise-address={{ public_ip }}
96
+      --bind-address={{ private_ip }}
97
+      --insecure-bind-address={{ private_ip }}
98
+    controller_params: >-
99
+      --master=http://{{ private_ip }}:8080
100
+      --cluster-cidr={{ app_env.pod_network.Network }}
101
+      --cluster-name=k8sonos
102
+    scheduler_params: >-
103
+      --master=http://{{ private_ip }}:8080
104
+
105
+- name: Configure the services
106
+  template:
107
+    src: roles/common/templates/k8s.conf.j2
108
+    dest: "/etc/kubernetes/{{ item.name }}"
109
+    mode: 0644
110
+  with_items:
111
+    - { name: "kube-apiserver", value: "{{ apiserver_params }}" }
112
+    - { name: "kube-controller-manager", value: "{{ controller_params }}" }
113
+    - { name: "kube-scheduler", value: "{{ scheduler_params }}"}
114
+
115
+- name: Setup services for master node
116
+  template:
117
+    src: "roles/common/templates/k8s.service.j2"
118
+    dest: "{{ service_path }}{{ item }}.service"
119
+    mode: 0644
120
+  with_items:
121
+    - kube-apiserver
122
+    - kube-controller-manager
123
+    - kube-scheduler
124
+
125
+- name: Enable and start the services
126
+  service:
127
+    name: "{{ item }}"
128
+    enabled: yes
129
+    state: restarted
130
+  with_items:
131
+    - kube-apiserver
132
+    - kube-controller-manager
133
+    - kube-scheduler
134
+

+ 15
- 0
workloads/ansible/shade/k8s/roles/master/templates/etcd.coreos.j2 View File

@@ -0,0 +1,15 @@
1
+[Unit]
2
+Description=etcd2 even the name is called etcd
3
+
4
+[Service]
5
+Type=notify
6
+ExecStart=/bin/etcd2 \
7
+  --advertise-client-urls=http://{{ private_ip }}:2379 \
8
+  --listen-client-urls=http://{{ private_ip }}:2379
9
+Restart=always
10
+RestartSec=10s
11
+LimitNOFILE=40000
12
+TimeoutStartSec=0
13
+
14
+[Install]
15
+WantedBy=multi-user.target

+ 15
- 0
workloads/ansible/shade/k8s/roles/master/templates/etcd.ubuntu.j2 View File

@@ -0,0 +1,15 @@
1
+[Unit]
2
+Description=etcd
3
+
4
+[Service]
5
+Type=notify
6
+ExecStart=/usr/bin/etcd \
7
+  --advertise-client-urls=http://{{ private_ip }}:2379 \
8
+  --listen-client-urls=http://{{ private_ip }}:2379
9
+Restart=always
10
+RestartSec=10s
11
+LimitNOFILE=40000
12
+TimeoutStartSec=0
13
+
14
+[Install]
15
+WantedBy=multi-user.target

+ 30
- 0
workloads/ansible/shade/k8s/roles/post/tasks/apply.yml View File

@@ -0,0 +1,30 @@
1
+---
2
+- name: Setup couple variables
3
+  set_fact:
4
+    public_ip: "{{ ansible_host }}"
5
+    private_ip: "{{ hostvars[ansible_host].inter_ip }}"
6
+
7
+- name: Upload addon service configuration files
8
+  template:
9
+    src: "roles/post/templates/{{ item }}.j2"
10
+    dest: "/etc/kubernetes/{{ item }}.yaml"
11
+    mode: 0644
12
+  with_items:
13
+    - dnscontroller
14
+    - dashboard
15
+    - cockroachdb
16
+
17
+- name: Label the master node
18
+  command: >-
19
+    /opt/bin/kubectl --server={{ private_ip }}:8080 label --overwrite=true
20
+    nodes master dashboardId=master
21
+
22
+- name: Create addon services
23
+  command: >-
24
+    /opt/bin/kubectl --server={{ private_ip }}:8080 create
25
+    -f /etc/kubernetes/{{ item }}.yaml
26
+  with_items:
27
+    - dnscontroller
28
+    - dashboard
29
+    - cockroachdb
30
+

+ 1
- 0
workloads/ansible/shade/k8s/roles/post/tasks/destroy.yml View File

@@ -0,0 +1 @@
1
+---

+ 146
- 0
workloads/ansible/shade/k8s/roles/post/templates/cockroachdb.j2 View File

@@ -0,0 +1,146 @@
1
+# Claim: This deployment file was originally developed by cockroachdb Lab
2
+#
3
+# For details, please follow the following link:
4
+# https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes
5
+#
6
+
7
+apiVersion: v1
8
+kind: Service
9
+metadata:
10
+  name: cockroachdb-public
11
+  labels:
12
+    app: cockroachdb
13
+spec:
14
+  type: NodePort
15
+  ports:
16
+  - port: 26257
17
+    targetPort: 26257
18
+    nodePort: 32257
19
+    name: grpc
20
+  - port: 8080
21
+    targetPort: 8080
22
+    nodePort: 32256
23
+    name: http
24
+  selector:
25
+    app: cockroachdb
26
+---
27
+apiVersion: v1
28
+kind: Service
29
+metadata:
30
+  name: cockroachdb
31
+  labels:
32
+    app: cockroachdb
33
+  annotations:
34
+    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
35
+    prometheus.io/scrape: "true"
36
+    prometheus.io/path: "_status/vars"
37
+    prometheus.io/port: "8080"
38
+spec:
39
+  ports:
40
+  - port: 26257
41
+    targetPort: 26257
42
+    name: grpc
43
+  - port: 8080
44
+    targetPort: 8080
45
+    name: http
46
+  clusterIP: None
47
+  selector:
48
+    app: cockroachdb
49
+---
50
+apiVersion: policy/v1beta1
51
+kind: PodDisruptionBudget
52
+metadata:
53
+  name: cockroachdb-budget
54
+  labels:
55
+    app: cockroachdb
56
+spec:
57
+  selector:
58
+    matchLabels:
59
+      app: cockroachdb
60
+  minAvailable: 67%
61
+---
62
+apiVersion: apps/v1beta1
63
+kind: StatefulSet
64
+metadata:
65
+  name: cockroachdb
66
+spec:
67
+  serviceName: "cockroachdb"
68
+  replicas: {{ app_env.stack_size - 1 }}
69
+  template:
70
+    metadata:
71
+      labels:
72
+        app: cockroachdb
73
+      annotations:
74
+        scheduler.alpha.kubernetes.io/affinity: >
75
+            {
76
+              "podAntiAffinity": {
77
+                "preferredDuringSchedulingIgnoredDuringExecution": [{
78
+                  "weight": 100,
79
+                  "labelSelector": {
80
+                    "matchExpressions": [{
81
+                      "key": "app",
82
+                      "operator": "In",
83
+                      "values": ["cockroachdb"]
84
+                    }]
85
+                  },
86
+                  "topologyKey": "kubernetes.io/hostname"
87
+                }]
88
+              }
89
+            }
90
+        pod.alpha.kubernetes.io/init-containers: '[
91
+            {
92
+                "name": "bootstrap",
93
+                "image": "cockroachdb/cockroach-k8s-init",
94
+                "imagePullPolicy": "IfNotPresent",
95
+                "args": [
96
+                  "-on-start=/on-start.sh",
97
+                  "-service=cockroachdb"
98
+                ],
99
+                "env": [
100
+                  {
101
+                      "name": "POD_NAMESPACE",
102
+                      "valueFrom": {
103
+                          "fieldRef": {
104
+                              "apiVersion": "v1",
105
+                              "fieldPath": "metadata.namespace"
106
+                          }
107
+                      }
108
+                   }
109
+                ],
110
+                "volumeMounts": [
111
+                    {
112
+                        "name": "datadir",
113
+                        "mountPath": "/cockroach/cockroach-data"
114
+                    }
115
+                ]
116
+            }
117
+        ]'
118
+    spec:
119
+      containers:
120
+      - name: cockroachdb
121
+        image: cockroachdb/cockroach
122
+        imagePullPolicy: IfNotPresent
123
+        ports:
124
+        - containerPort: 26257
125
+          name: grpc
126
+        - containerPort: 8080
127
+          name: http
128
+        volumeMounts:
129
+        - name: datadir
130
+          mountPath: /cockroach/cockroach-data
131
+        command:
132
+          - "/bin/bash"
133
+          - "-ecx"
134
+          - |
135
+            CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
136
+            if [ ! "$(hostname)" == "cockroachdb-0" ] || \
137
+               [ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
138
+            then
139
+              CRARGS+=("--join" "cockroachdb-public")
140
+            fi
141
+            exec /cockroach/cockroach ${CRARGS[*]}
142
+      terminationGracePeriodSeconds: 60
143
+      volumes:
144
+      - name: datadir
145
+        hostPath:
146
+          path: /storage/cockroachdb

+ 80
- 0
workloads/ansible/shade/k8s/roles/post/templates/dashboard.j2 View File

@@ -0,0 +1,80 @@
1
+# Copyright 2015 Google Inc. All Rights Reserved.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#     http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+# Configuration to deploy release version of the Dashboard UI.
16
+#
17
+# Example usage: kubectl create -f <this_file>
18
+
19
+kind: Deployment
20
+apiVersion: extensions/v1beta1
21
+metadata:
22
+  labels:
23
+    app: kubernetes-dashboard
24
+  name: kubernetes-dashboard
25
+  namespace: kube-system
26
+spec:
27
+  replicas: 1
28
+  selector:
29
+    matchLabels:
30
+      app: kubernetes-dashboard
31
+  template:
32
+    metadata:
33
+      labels:
34
+        app: kubernetes-dashboard
35
+      # Comment the following annotation if Dashboard must not be deployed on master
36
+      annotations:
37
+        scheduler.alpha.kubernetes.io/tolerations: |
38
+          [
39
+            {
40
+              "key": "dedicated",
41
+              "operator": "Equal",
42
+              "value": "master",
43
+              "effect": "NoSchedule"
44
+            }
45
+          ]
46
+    spec:
47
+      nodeSelector:
48
+        dashboardId: master
49
+      containers:
50
+      - name: kubernetes-dashboard
51
+        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0
52
+        imagePullPolicy: Always
53
+        ports:
54
+        - containerPort: 9090
55
+          protocol: TCP
56
+        args:
57
+          - --apiserver-host=http://{{ private_ip }}:8080
58
+        livenessProbe:
59
+          httpGet:
60
+            path: /
61
+            port: 9090
62
+          initialDelaySeconds: 30
63
+          timeoutSeconds: 30
64
+---
65
+kind: Service
66
+apiVersion: v1
67
+metadata:
68
+  labels:
69
+    app: kubernetes-dashboard
70
+  name: kubernetes-dashboard
71
+  namespace: kube-system
72
+spec:
73
+  type: NodePort
74
+  clusterIP: {{ app_env.dashboard_service_ip }}
75
+  ports:
76
+  - port: 80
77
+    targetPort: 9090
78
+    nodePort: 30000
79
+  selector:
80
+    app: kubernetes-dashboard

+ 151
- 0
workloads/ansible/shade/k8s/roles/post/templates/dnscontroller.j2 View File

@@ -0,0 +1,151 @@
1
+apiVersion: extensions/v1beta1
2
+kind: Deployment
3
+metadata:
4
+  name: kube-dns
5
+  namespace: kube-system
6
+  labels:
7
+    k8s-app: kube-dns
8
+    kubernetes.io/cluster-service: "true"
9
+spec:
10
+  strategy:
11
+    rollingUpdate:
12
+      maxSurge: 10%
13
+      maxUnavailable: 0
14
+  selector:
15
+    matchLabels:
16
+      k8s-app: kube-dns
17
+  template:
18
+    metadata:
19
+      labels:
20
+        k8s-app: kube-dns
21
+      annotations:
22
+        scheduler.alpha.kubernetes.io/critical-pod: ''
23
+        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
24
+    spec:
25
+      dnsPolicy: Default
26
+      volumes:
27
+      - name: kube-dns-config
28
+        hostPath:
29
+          path: /root/.kube/config
30
+      nodeSelector:
31
+        dashboardId: master
32
+      containers:
33
+      - name: kubedns
34
+        image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.13.0
35
+        resources:
36
+          limits:
37
+            memory: 170Mi
38
+          requests:
39
+            cpu: 100m
40
+            memory: 70Mi
41
+        livenessProbe:
42
+          httpGet:
43
+            path: /healthcheck/kubedns
44
+            port: 10054
45
+            scheme: HTTP
46
+          initialDelaySeconds: 60
47
+          timeoutSeconds: 5
48
+          successThreshold: 1
49
+          failureThreshold: 2
50
+        readinessProbe:
51
+          httpGet:
52
+            path: /readiness
53
+            port: 8081
54
+            scheme: HTTP
55
+          initialDelaySeconds: 3
56
+          timeoutSeconds: 5
57
+        args:
58
+        - --domain={{ app_env.domain }}.
59
+        - --dns-port=10053
60
+        - --kubecfg-file=/kube-dns-config
61
+        - --kube-master-url=http://{{ private_ip }}:8080
62
+        - --v=2
63
+        env:
64
+        - name: PROMETHEUS_PORT
65
+          value: "10055"
66
+        ports:
67
+        - containerPort: 10053
68
+          name: dns-local
69
+          protocol: UDP
70
+        - containerPort: 10053
71
+          name: dns-tcp-local
72
+          protocol: TCP
73
+        - containerPort: 10055
74
+          name: metrics
75
+          protocol: TCP
76
+        volumeMounts:
77
+        - name: kube-dns-config
78
+          mountPath: /kube-dns-config
79
+      - name: dnsmasq
80
+        image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.13.0
81
+        livenessProbe:
82
+          httpGet:
83
+            path: /healthcheck/dnsmasq
84
+            port: 10054
85
+            scheme: HTTP
86
+          initialDelaySeconds: 60
87
+          timeoutSeconds: 5
88
+          successThreshold: 1
89
+          failureThreshold: 5
90
+        args:
91
+        - --cache-size=1000
92
+        - --server=/{{ app_env.domain }}/127.0.0.1#10053
93
+        - --server=/in-addr.arpa/127.0.0.1#10053
94
+        - --server=/ip6.arpa/127.0.0.1#10053
95
+        - --log-facility=-
96
+        ports:
97
+        - containerPort: 53
98
+          name: dns
99
+          protocol: UDP
100
+        - containerPort: 53
101
+          name: dns-tcp
102
+          protocol: TCP
103
+        resources:
104
+          requests:
105
+            cpu: 150m
106
+            memory: 10Mi
107
+      - name: sidecar
108
+        image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.13.0
109
+        livenessProbe:
110
+          httpGet:
111
+            path: /metrics
112
+            port: 10054
113
+            scheme: HTTP
114
+          initialDelaySeconds: 60
115
+          timeoutSeconds: 5
116
+          successThreshold: 1
117
+          failureThreshold: 5
118
+        args:
119
+        - --v=2
120
+        - --logtostderr
121
+        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ app_env.domain }},5,A
122
+        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ app_env.domain }},5,A
123
+        ports:
124
+        - containerPort: 10054
125
+          name: metrics
126
+          protocol: TCP
127
+        resources:
128
+          requests:
129
+            memory: 20Mi
130
+            cpu: 10m
131
+---
132
+apiVersion: v1
133
+kind: Service
134
+metadata:
135
+  name: kube-dns
136
+  namespace: kube-system
137
+  labels:
138
+    k8s-app: kube-dns
139
+    kubernetes.io/cluster-service: "true"
140
+    kubernetes.io/name: "KubeDNS"
141
+spec:
142
+  selector:
143
+    k8s-app: kube-dns
144
+  clusterIP: {{ app_env.dns_service_ip }}
145
+  ports:
146
+  - name: dns
147
+    port: 53
148
+    protocol: UDP
149
+  - name: dns-tcp
150
+    port: 53
151
+    protocol: TCP

+ 87
- 0
workloads/ansible/shade/k8s/roles/postprovision/tasks/apply.yml View File

@@ -0,0 +1,87 @@
1
+---
2
+- name: Setup node group name for coreos
3
+  set_fact:
4
+    target_interpreter: "/home/core/bin/python"
5
+    wgroups: "cworkers"
6
+    mgroups: "cmasters"
7
+  when: app_env.target_os == "coreos"
8
+
9
+- name: Setup node group name for ubuntu
10
+  set_fact:
11
+    target_interpreter: "python"
12
+    wgroups: "uworkers"
13
+    mgroups: "umasters"
14
+  when: app_env.target_os == "ubuntu"
15
+
16
+- name: Remove the runhost file
17
+  file:
18
+    path: "{{ playbook_dir }}/run/runhosts"
19
+    state: absent
20
+
21
+- name: Setup host cloud
22
+  lineinfile:
23
+    dest: "{{ playbook_dir }}/run/runhosts"
24
+    create: yes
25
+    insertafter: EOF
26
+    line: "cloud ansible_host=127.0.0.1 ansible_python_interpreter=python"
27
+
28
+- name: Add the node to host group with private IP
29
+  add_host:
30
+    name: "{{ hostvars[item].public_ip }}"
31
+    inter_ip: "{{ hostvars[item].private_ip }}"
32
+    inter_name: "{{ item }}"
33
+    ansible_python_interpreter: "{{ hostvars[item].target_interpreter }}"
34
+    groups: "{{ hostvars[item].targetgroup }}"
35
+  with_items: "{{ groups['prohosts'] }}"
36
+
37
+- name: Remove the k8shosts file
38
+  file:
39
+    path: "{{ playbook_dir }}/run/k8shosts"
40
+    state: absent
41
+
42
+- name: Build up hosts file
43
+  lineinfile:
44
+    dest: "{{ playbook_dir }}/run/k8shosts"
45
+    line: "{{ hostvars[item].inter_ip }} {{ hostvars[item].inter_name }}"
46
+    state: present
47
+    create: yes
48
+  with_flattened:
49
+    - '{{ groups[mgroups] }}'
50
+    - '{{ groups[wgroups] }}'
51
+
52
+- name: Add all the hosts to the file
53
+  lineinfile:
54
+    dest: "{{ playbook_dir }}/run/runhosts"
55
+    create: yes
56
+    insertafter: EOF
57
+    line: >-
58
+      {{ item }} inter_ip={{ hostvars[item].inter_ip }}
59
+      inter_name={{ hostvars[item].inter_name }}
60
+      ansible_python_interpreter={{ target_interpreter }}
61
+  with_items:
62
+    - '{{ groups[mgroups] }}'
63
+    - '{{ groups[wgroups] }}'
64
+
65
+- name: Setup groups in the inventory file
66
+  lineinfile:
67
+    dest: "{{ playbook_dir }}/run/runhosts"
68
+    insertafter: EOF
69
+    line: "{{ item }}"
70
+  with_items:
71
+    - '[{{ mgroups }}]'
72
+    - '{{ groups[mgroups] }}'
73
+    - '[{{ wgroups }}]'
74
+    - '{{ groups[wgroups] }}'
75
+
76
+- name: Wait until servers are up and runnning
77
+  wait_for:
78
+    host: "{{ item }}"
79
+    port: 22
80
+    state: started
81
+    delay: 15
82
+    connect_timeout: 20
83
+    timeout: 300
84
+  with_items:
85
+    - "{{ groups[mgroups] }}"
86
+    - "{{ groups[wgroups] }}"
87
+

+ 20
- 0
workloads/ansible/shade/k8s/roles/postprovision/tasks/destroy.yml View File

@@ -0,0 +1,20 @@
1
+---
2
+- name: Delete key pairs
3
+  os_keypair:
4
+    state: "absent"
5
+    auth: "{{ auth }}"
6
+    region_name: "{{ app_env.region_name }}"
7
+    availability_zone: "{{ app_env.availability_zone }}"
8
+    validate_certs: "{{ app_env.validate_certs }}"
9
+    name: "k8s"
10
+    public_key_file: "{{ app_env.public_key_file }}"
11
+
12
+- name: Delete security group
13
+  os_security_group:
14
+    state: absent
15
+    auth: "{{ auth }}"
16
+    region_name: "{{ app_env.region_name }}"
17
+    availability_zone: "{{ app_env.availability_zone }}"
18
+    validate_certs: "{{ app_env.validate_certs }}"
19
+    name: k8s_sg
20
+    description: secuirty group for lampstack

+ 92
- 0
workloads/ansible/shade/k8s/roles/prepare/tasks/apply.yml View File

@@ -0,0 +1,92 @@
1
+---
2
+- name: Ensure we have a working directory to save runtime files
3
+  file: "path={{ playbook_dir }}/run state=directory"
4
+
5
+- name: Setup host couple variables
6
+  set_fact:
7
+    target_interpreter: "/home/core/bin/python"
8
+    wgroups: "cworkers"
9
+    mgroups: "cmasters"
10
+  when: app_env.target_os == "coreos"
11
+
12
+- name: Setup couple variables
13
+  set_fact:
14
+    target_interpreter: "python"
15
+    wgroups: "uworkers"
16
+    mgroups: "umasters"
17
+  when: app_env.target_os == "ubuntu"
18
+
19
+- name: Retrieve specified flavor
20
+  os_flavor_facts:
21
+    auth: "{{ auth }}"
22
+    region_name: "{{ app_env.region_name }}"
23
+    availability_zone: "{{ app_env.availability_zone }}"
24
+    validate_certs: "{{ app_env.validate_certs }}"
25
+    name: "{{ app_env.flavor_name }}"
26
+
27
+- name: Create a key-pair
28
+  os_keypair:
29
+    state: "present"
30
+    auth: "{{ auth }}"
31
+    region_name: "{{ app_env.region_name }}"
32
+    availability_zone: "{{ app_env.availability_zone }}"
33
+    validate_certs: "{{ app_env.validate_certs }}"
34
+    name: "k8s"
35
+    public_key_file: "{{ app_env.public_key_file }}"
36
+
37
+- name: Create security group
38
+  os_security_group:
39
+    state: present
40
+    auth: "{{ auth }}"
41
+    region_name: "{{ app_env.region_name }}"
42
+    availability_zone: "{{ app_env.availability_zone }}"
43
+    validate_certs: "{{ app_env.validate_certs }}"
44
+    name: k8s_sg
45
+    description: security group for lampstack
46
+
47
+- name: Add security rules
48
+  os_security_group_rule:
49
+    state: present
50
+    auth: "{{ auth }}"
51
+    region_name: "{{ app_env.region_name }}"
52
+    availability_zone: "{{ app_env.availability_zone }}"
53
+    validate_certs: "{{ app_env.validate_certs }}"
54
+    security_group: k8s_sg
55
+    protocol: "{{ item.protocol }}"
56
+    direction: "{{ item.dir }}"
57
+    port_range_min: "{{ item.p_min }}"
58
+    port_range_max: "{{ item.p_max }}"
59
+    remote_ip_prefix: 0.0.0.0/0
60
+  with_items:
61
+    - { p_min: 22, p_max: 22, dir: ingress, protocol: tcp }
62
+    - { p_min: 80, p_max: 80, dir: ingress, protocol: tcp }
63
+    - { p_min: 53, p_max: 53, dir: ingress, protocol: udp }
64
+    - { p_min: 53, p_max: 53, dir: egress, protocol: udp }
65
+    - { p_min: 8080, p_max: 8080, dir: ingress, protocol: tcp }
66
+    - { p_min: 8285, p_max: 8285, dir: ingress, protocol: udp }
67
+    - { p_min: 2379, p_max: 2380, dir: ingress, protocol: tcp }
68
+    - { p_min: 2379, p_max: 2380, dir: egress, protocol: tcp }
69
+    - { p_min: 10250, p_max: 10250, dir: ingress, protocol: tcp }
70
+    - { p_min: 30000, p_max: 32767, dir: ingress, protocol: tcp }
71
+    - { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
72
+    - { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
73
+
74
+- name: Add provisioning host group
75
+  add_host:
76
+    name: "worker-{{ item }}"
77
+    targetgroup: "{{ wgroups }}"
78
+    ansible_host: "127.0.0.1"
79
+    ansible_python_interpreter: "python"
80
+    groups: "prohosts"
81
+  with_sequence: count={{ app_env.stack_size - 1 }}
82
+  no_log: True
83
+
84
+- name: Add provisioning host group
85
+  add_host:
86
+    name: "master"
87
+    targetgroup: "{{ mgroups }}"
88
+    ansible_host: "127.0.0.1"
89
+    ansible_python_interpreter: "python"
90
+    groups: "prohosts"
91
+  no_log: True
92
+

+ 34
- 0
workloads/ansible/shade/k8s/roles/prepare/tasks/destroy.yml View File

@@ -0,0 +1,34 @@
1
+---
2
+- name: Setup host couple variables
3
+  set_fact:
4
+    target_interpreter: "/home/core/bin/python"
5
+    wgroups: "cworkers"
6
+    mgroups: "cmasters"
7
+  when: app_env.target_os == "coreos"
8
+
9
+- name: Setup couple variables
10
+  set_fact:
11
+    target_interpreter: "python"
12
+    wgroups: "uworkers"
13
+    mgroups: "umasters"
14
+  when: app_env.target_os == "ubuntu"
15
+
16
+- name: Add provisioning host group
17
+  add_host:
18
+    name: "worker-{{ item }}"
19
+    targetgroup: "{{ wgroups }}"
20
+    ansible_host: "127.0.0.1"
21
+    ansible_python_interpreter: "python"
22
+    groups: "prohosts"
23
+  with_sequence: count={{ app_env.stack_size - 1 }}
24
+  no_log: True
25
+
26
+- name: Add provisioning host group
27
+  add_host:
28
+    name: "master"
29
+    targetgroup: "{{ mgroups }}"
30
+    ansible_host: "127.0.0.1"
31
+    ansible_python_interpreter: "python"
32
+    groups: "prohosts"
33
+  no_log: True
34
+

+ 72
- 0
workloads/ansible/shade/k8s/roles/provision/tasks/apply.yml View File

@@ -0,0 +1,72 @@
1
+---
2
+- name: Setup variables
3
+  set_fact:
4
+    target_interpreter: "/home/core/bin/python"
5
+    tp_path: "roles/provision/templates/{{ app_env.target_os }}.j2"
6
+  when: app_env.target_os == "coreos"
7
+
8
+- name: Setup variables
9
+  set_fact:
10
+    target_interpreter: "python"
11
+    tp_path: "roles/provision/templates/{{ app_env.target_os }}.j2"
12
+  when: app_env.target_os == "ubuntu"
13
+
14
+- name: Create an OpenStack virtual machine
15
+  os_server:
16
+    state: "present"
17
+    auth: "{{ auth }}"
18
+    region_name: "{{ app_env.region_name }}"
19
+    availability_zone: "{{ app_env.availability_zone }}"
20
+    validate_certs: "{{ app_env.validate_certs }}"
21
+    name: "{{ inventory_hostname }}"
22
+    image: "{{ app_env.image_name }}"
23
+    key_name: "k8s"
24
+    timeout: 200
25
+    flavor: "{{ app_env.flavor_name }}"
26
+    network: "{{ app_env.private_net_name }}"
27
+    floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
28
+    reuse_ips: False
29
+    userdata: "{{ lookup('template', tp_path) }}"
30
+    config_drive: "{{ app_env.config_drive | default('no') }}"
31
+    security_groups: k8s_sg
32
+    meta:
33
+      hostname: "{{ inventory_hostname }}"
34
+  register: osvm
35
+
36
+- name: Setup variables for generate host groups
37
+  set_fact:
38
+    inter_name: "{{ osvm.openstack.name }}"
39
+    public_ip: "{{ osvm.openstack.public_v4 }}"
40
+    private_ip: "{{ osvm.openstack.private_v4 }}"
41
+
42
+- name: Use public ip address when private ip is empty
43
+  set_fact:
44
+    private_ip: "{{ osvm.openstack.public_v4 }}"
45
+  when: osvm.openstack.private_v4 == ""
46
+
47
+- name: Use private ip address when public ip is empty
48
+  set_fact:
49
+    public_ip: "{{ osvm.openstack.private_v4 }}"
50
+  when: osvm.openstack.public_v4 == ""
51
+
52
+- name: Create volumes for the node
53
+  os_volume:
54
+    state: present
55
+    auth: "{{ auth }}"
56
+    region_name: "{{ app_env.region_name }}"
57
+    availability_zone: "{{ app_env.availability_zone }}"
58
+    validate_certs: "{{ app_env.validate_certs }}"
59
+    size: "{{ app_env.volume_size }}"
60
+    wait: yes
61
+    display_name: "{{ inventory_hostname }}_volume"
62
+
63
+- name: Attach a volume to the node
64
+  os_server_volume:
65
+    state: present
66
+    auth: "{{ auth }}"
67
+    region_name: "{{ app_env.region_name }}"
68
+    availability_zone: "{{ app_env.availability_zone }}"
69
+    validate_certs: "{{ app_env.validate_certs }}"
70
+    server: "{{ inventory_hostname }}"
71
+    volume: "{{ inventory_hostname }}_volume"
72
+    device: "{{ app_env.block_device_name }}"

+ 27
- 0
workloads/ansible/shade/k8s/roles/provision/tasks/destroy.yml View File

@@ -0,0 +1,27 @@
1
+---
2
+
3
+- name: Destroy the OpenStack VM
4
+  os_server:
5
+    state: "absent"
6
+    auth: "{{ auth }}"
7
+    region_name: "{{ app_env.region_name }}"
8
+    availability_zone: "{{ app_env.availability_zone }}"
9
+    validate_certs: "{{ app_env.validate_certs }}"
10
+    name: "{{ inventory_hostname }}"
11
+    image: "{{ app_env.image_name }}"
12
+    delete_fip: True
13
+    key_name: "k8s"
14
+    timeout: 200
15
+    network: "{{ app_env.private_net_name }}"
16
+    meta:
17
+      hostname: "{{ inventory_hostname }}"
18
+
19
+- name: Destroy the OpenStack volume
20
+  os_volume:
21
+    state: absent
22
+    auth: "{{ auth }}"
23
+    region_name: "{{ app_env.region_name }}"
24
+    availability_zone: "{{ app_env.availability_zone }}"
25
+    validate_certs: "{{ app_env.validate_certs }}"
26
+    wait: yes
27
+    display_name: "{{ inventory_hostname }}_volume"

+ 2
- 0
workloads/ansible/shade/k8s/roles/provision/templates/coreos.j2 View File

@@ -0,0 +1,2 @@
1
+#cloud-config
2
+hostname: {{ inventory_hostname }}.

+ 6
- 0
workloads/ansible/shade/k8s/roles/provision/templates/ubuntu.j2 View File

@@ -0,0 +1,6 @@
1
+#cloud-config
2
+hostname: {{ inventory_hostname }}.
3
+packages:
4
+ - python
5
+ - bridge-utils
6
+

+ 150
- 0
workloads/ansible/shade/k8s/roles/worker/tasks/main.yml View File

@@ -0,0 +1,150 @@
1
+---
2
+- name: Setup few variables for coreos target
3
+  set_fact:
4
+    public_ip: "{{ groups['cmasters'][0] }}"
5
+    private_ip: "{{ hostvars[groups['cmasters'][0]].inter_ip }}"
6
+    this_ip: "{{ hostvars[ansible_host].inter_ip }}"
7
+    service_path: "/etc/systemd/system/"
8
+  when: app_env.target_os == "coreos"
9
+
10
+- name: Setup few variables for ubuntu target
11
+  set_fact:
12
+    public_ip: "{{ groups['umasters'][0] }}"
13
+    private_ip: "{{ hostvars[groups['umasters'][0]].inter_ip }}"
14
+    this_ip: "{{ hostvars[ansible_host].inter_ip }}"
15
+    service_path: "/lib/systemd/system/"
16
+  when: app_env.target_os == "ubuntu"
17
+
18
+- stat: path=/tmp/diskflag
19
+  register: diskflag
20
+
21
+- shell: parted -s "{{ app_env.block_device_name }}" mklabel msdos
22
+  when: diskflag.stat.exists == false
23
+
24
+- shell: parted -s "{{ app_env.block_device_name }}" mkpart primary ext4 1049kb 100%
25
+  when: diskflag.stat.exists == false
26
+
27
+- lineinfile: dest=/tmp/diskflag line="disk is now partitioned!" create=yes
28
+
29
+- name: Create file system on the volume
30
+  filesystem: fstype=ext4 dev="{{ app_env.block_device_name }}1"
31
+
32
+- name: Mount the volume at /storage
33
+  mount: name=/storage src="{{ app_env.block_device_name }}1" fstype=ext4 state=mounted
34
+
35
+- name: Get the network interface name
36
+  shell: >-
37
+    ip -4 -o addr | grep "{{ this_ip }}" | awk '{print $2}'
38
+  register: nodeif_name
39
+
40
+- name: List all k8s service on the node
41
+  stat: "path=/opt/bin/{{ item }}"
42
+  with_items:
43
+    - kubelet
44
+    - kubectl
45
+    - kube-proxy
46
+    - flanneld
47
+  register: k8s_binaries
48
+
49
+- name: Pull k8s binaries from the master
50
+  command: >-
51
+    scp -i "~/.ssh/id_rsa" -o "StrictHostKeyChecking no" "{{ app_env.
52
+    ssh_user }}@{{ private_ip }}:/opt/bin/{{ item.item }}"
53
+    "/opt/bin/{{ item.item }}"
54
+  with_items: " {{ k8s_binaries.results }} "
55
+  when: item.stat.exists == false
56
+  no_log: True
57
+
58
+- name: Setup services for worker node
59
+  template:
60
+    src: roles/common/templates/k8s.service.j2
61
+    dest: "{{ service_path }}{{ item }}.service"
62
+    mode: 0644
63
+  with_items:
64
+    - flanneld
65
+    - kubelet
66
+    - kube-proxy
67
+
68
+- name: Setup kubeconfig for each node
69
+  template:
70
+    src: roles/worker/templates/kubeconfig.j2
71
+    dest: "~/.kube/config"
72
+    mode: 0600
73
+
74
+- name: Setup worker node service variables
75
+  set_fact:
76
+    kubelet_params: >-
77
+      --api-servers={{ private_ip }}:8080
78
+      --container-runtime=docker
79
+      --cluster-dns={{ app_env.dns_service_ip }}
80
+      --cluster-domain={{ app_env.domain }}
81
+      --hostname-override={{ inter_name }}
82
+      --resolv-conf=''
83
+    proxy_params: >-
84
+      --master={{ private_ip }}:8080
85
+      --cluster-cidr={{ app_env.pod_network.Network }}
86
+    flanneld_params: >-
87
+      -iface={{ nodeif_name.stdout }}
88
+      -etcd-endpoints=http://{{ private_ip }}:2379
89
+      -ip-masq=false
90
+      -etcd-prefix=/coreos.com/network/
91
+
92
+- name: Configure the worker node services
93
+  template:
94
+    src: roles/common/templates/k8s.conf.j2
95
+    dest: "/etc/kubernetes/{{ item.name }}"
96
+    mode: 0644
97
+  with_items:
98
+    - { name: "kubelet", value: "{{ kubelet_params }}" }
99
+    - { name: "kube-proxy", value: "{{ proxy_params }}" }
100
+    - { name: "flanneld", value: "{{ flanneld_params }}" }
101
+
102
+- name: Start the flanneld service
103
+  service:
104
+    name: flanneld
105
+    enabled: yes
106
+    state: started
107
+
108
+- name: Wait for the flannel to setup the subnets
109
+  wait_for:
110
+    path: /run/flannel/subnet.env
111
+    search_regex: FLANNEL_SUBNET
112
+
113
+- name: Get the bip address
114
+  shell: >-
115
+    . /run/flannel/subnet.env && echo $FLANNEL_SUBNET
116
+  register: bip
117
+
118
+- name: Get the mtu
119
+  shell: >-
120
+    . /run/flannel/subnet.env && echo $FLANNEL_MTU
121
+  register: mtu
122
+
123
+- name: Setup Docker service file
124
+  template:
125
+    src: "roles/worker/templates/docker.{{ app_env.target_os }}.j2"
126
+    dest: "{{ service_path }}docker.service"
127
+
128
+- name: Reload daemon service
129
+  command: systemctl daemon-reload
130
+
131
+- name: Start the worker services
132
+  service:
133
+    name: "{{ item }}"
134
+    enabled: yes
135
+    state: restarted
136
+  with_items:
137
+    - docker.socket
138
+    - docker
139
+    - kubelet
140
+    - kube-proxy
141
+
142
+- name: Load cockroachdb images
143
+  command: "{{ item }}"
144
+  with_items:
145
+    - "wget -q -O /opt/bin/cockroachdb.tar.gz {{ app_env.cockroachdb_repo }}"
146
+    - "tar xf /opt/bin/cockroachdb.tar.gz -C /opt/bin"
147
+    - "docker load --input /opt/bin/cockroachdb.tar"
148
+  when: app_env.cockroachdb_repo != ""
149
+  no_log: True
150
+

+ 27
- 0
workloads/ansible/shade/k8s/roles/worker/templates/docker.coreos.j2 View File

@@ -0,0 +1,27 @@
1
+[Unit]
2
+Description=Docker Application Container Engine
3
+Documentation=http://docs.docker.com
4
+After=containerd.service docker.socket network.target
5
+Requires=containerd.service docker.socket
6
+
7
+[Service]
8
+Type=notify
9
+Environment="DOCKER_OPT_BIP=--bip={{ bip.stdout }}"
10
+Environment="DOCKER_OPT_MTU=--mtu={{ mtu.stdout }}"
11
+
12
+ExecStart=/usr/lib/coreos/dockerd --host=fd:// \
13
+  --containerd=/var/run/docker/libcontainerd/docker-containerd.sock \
14
+  $DOCKER_OPTS $DOCKER_CGROUPS $DOCKER_OPT_BIP $DOCKER_OPT_MTU \
15
+  $DOCKER_OPT_IPMASQ
16
+
17
+ExecReload=/bin/kill -s HUP $MAINPID
18
+
19
+LimitNOFILE=1048576
20
+LimitNPROC=infinity
21
+LimitCORE=infinity
22
+TasksMax=infinity
23
+TimeoutStartSec=0
24
+Delegate=yes
25
+
26
+[Install]
27
+WantedBy=multi-user.target

+ 25
- 0
workloads/ansible/shade/k8s/roles/worker/templates/docker.ubuntu.j2 View File

@@ -0,0 +1,25 @@
1
+[Unit]
2
+Description=Docker Application Container Engine
3
+Documentation=http://docs.docker.com
4
+After=docker.socket network.target
5
+Requires=docker.socket
6
+
7
+[Service]
8
+Type=notify
9
+Environment="DOCKER_OPT_BIP=--bip={{ bip.stdout }}"
10
+Environment="DOCKER_OPT_MTU=--mtu={{ mtu.stdout }}"
11
+
12
+ExecStart=/usr/bin/dockerd -H fd:// \
13
+  $DOCKER_OPTS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ
14
+
15
+ExecReload=/bin/kill -s HUP $MAINPID
16
+
17
+LimitNOFILE=1048576
18
+LimitNPROC=infinity
19
+LimitCORE=infinity
20
+TasksMax=infinity
21
+TimeoutStartSec=0
22
+Delegate=yes
23
+
24
+[Install]
25
+WantedBy=multi-user.target

+ 15
- 0
workloads/ansible/shade/k8s/roles/worker/templates/kubeconfig.j2 View File

@@ -0,0 +1,15 @@
1
+apiVersion: v1
2
+clusters:
3
+- cluster:
4
+    insecure-skip-tls-verify: true
5
+    server: http://{{ private_ip }}:8080
6
+  name: k8sonos
7
+contexts:
8
+- context:
9
+    cluster: k8sonos
10
+    user: ""
11
+  name: k8s
12
+current-context: k8s
13
+kind: Config
14
+preferences: {}
15
+users: []

+ 131
- 0
workloads/ansible/shade/k8s/site.yml View File

@@ -0,0 +1,131 @@
1
+---
2
+- name: Get start timestamp
3
+  hosts: cloud
4
+  connection: local
5
+  tasks:
6
+    - set_fact:
7
+        starttime: "{{ ansible_date_time }}"
8
+  tags: "info"
9
+
10
+- name: Prepare to run the workload
11
+  hosts: cloud
12
+  connection: local
13
+  vars_files:
14
+    - "vars/{{ env }}.yml"
15
+  tasks:
16
+    - include: "roles/prepare/tasks/{{ action }}.yml"
17
+  roles:
18
+    - prepare
19
+  tags: "{{ action }}"
20
+
21
+- name: provision servers
22
+  hosts: prohosts
23
+  connection: local
24
+  strategy: free
25
+  vars_files:
26
+    - "vars/{{ env }}.yml"
27
+  tasks:
28
+    - include: "roles/provision/tasks/{{ action }}.yml"
29
+  roles:
30
+    - provision
31
+  tags: "{{ action }}"
32
+
33
+- name: Post provision process
34
+  hosts: cloud
35
+  connection: local
36
+  vars_files:
37
+    - "vars/{{ env }}.yml"
38
+  tasks:
39
+    - include: "roles/postprovision/tasks/{{ action }}.yml"
40
+  roles:
41
+    - postprovision
42
+  tags: "{{ action }}"
43
+
44
+- name: Boot strap all the target nodes
45
+  hosts: cmasters, cworkers
46
+  gather_facts: False
47
+  user: "{{ app_env.ssh_user }}"
48
+  become: true
49
+  become_user: root
50
+  strategy: free
51
+  vars_files:
52
+    - "vars/{{ env }}.yml"
53
+  roles:
54
+    - vmware.coreos-bootstrap
55
+  tags: "apply"
56
+
57
+- name: Install required packages for all nodes
58
+  hosts: cworkers, cmasters, uworkers, umasters
59
+  gather_facts: False
60
+  user: "{{ app_env.ssh_user }}"
61
+  become: true
62
+  become_user: root
63
+  strategy: free
64
+  vars_files:
65
+    - "vars/{{ env }}.yml"
66
+  roles:
67
+    - common
68
+  environment: "{{ proxy_env }}"
69
+  tags: "common"
70
+
71
+- name: Setup master
72
+  hosts: cmasters, umasters
73
+  gather_facts: true
74
+  user: "{{ app_env.ssh_user }}"
75
+  become: true
76
+  become_user: root
77
+  vars_files:
78
+    - "vars/{{ env }}.yml"
79
+  roles:
80
+    - master
81
+  environment: "{{ proxy_env }}"
82
+  tags: "master"
83
+
84
+- name: Setup workers
85
+  hosts: cworkers, cmasters, uworkers, umasters
86
+  gather_facts: true
87
+  user: "{{ app_env.ssh_user }}"
88
+  become: true
89
+  become_user: root
90
+  strategy: free
91
+  vars_files:
92
+    - "vars/{{ env }}.yml"
93
+  roles:
94
+    - worker
95
+  environment: "{{ proxy_env }}"
96
+  tags: "worker"
97
+
98
+- name: Post configurations
99
+  hosts: cmasters, umasters
100
+  gather_facts: true
101
+  user: "{{ app_env.ssh_user }}"
102
+  become: true
103
+  become_user: root
104
+  vars_files:
105
+    - "vars/{{ env }}.yml"
106
+  tasks:
107
+    - include: "roles/post/tasks/{{ action }}.yml"
108
+  roles:
109
+    - post
110
+  environment: "{{ proxy_env }}"
111
+  tags: "post"
112
+
113
+- name: Inform the installer
114
+  hosts: cloud
115
+  connection: local
116
+  tasks:
117
+    - debug:
118
+        msg: >-
119
+          Access kubernetes dashboard at
120
+          http://{{ groups['umasters'][0] }}:30000
121
+      when: groups['umasters'] is defined
122
+    - debug:
123
+        msg: >-
124
+          Access kubernetes dashboard at
125
+          http://{{ groups['cmasters'][0] }}:30000
126
+      when: groups['cmasters'] is defined
127
+    - debug:
128
+        msg: >-
129
+          The work load started at {{ hostvars.cloud.starttime.time }},
130
+          ended at {{ ansible_date_time.time }}
131
+  tags: "info"

+ 47
- 0
workloads/ansible/shade/k8s/vars/coreos.yml View File

@@ -0,0 +1,47 @@
1
+---
2
+# This is an example configuration file when use coreos image.
3
+horizon_url: "http://9.30.217.9"
4
+
5
+auth: {
6
+  auth_url: "http://9.30.217.9:5000/v3",
7
+  username: "demo",
8
+  password: "{{ password }}",
9
+  domain_name: "default",
10
+  project_name: "demo"
11
+}
12
+
13
+app_env: {
14
+  target_os: "coreos",
15
+  image_name: "coreos",
16
+  region_name: "RegionOne",
17
+  availability_zone: "nova",
18
+  validate_certs: False,
19
+  ssh_user: "core",
20
+  private_net_name: "demonet",
21
+  flavor_name: "m1.large",
22
+  public_key_file: "/home/ubuntu/.ssh/interop.pub",
23
+  private_key_file: "/home/ubuntu/.ssh/interop",
24
+  stack_size: 4,
25
+  volume_size: 1,
26
+  block_device_name: "/dev/vdb",
27
+
28
+  domain: "cluster.local",
29
+  pod_network: {
30
+    Network: "172.17.0.0/16",
31
+    SubnetLen: 24,
32
+    SubnetMin: "172.17.0.0",
33
+    SubnetMax: "172.17.255.0",
34
+    Backend: {
35
+      Type: "udp",
36
+      Port: 8285
37
+    }
38
+  },
39
+  service_ip_range: "172.16.0.0/24",
40
+  dns_service_ip: "172.16.0.4",
41
+  dashboard_service_ip: "172.16.0.5",
42
+
43
+  # The following section shows an example when use a local repo.
44
+  cockroachdb_repo: "http://10.0.10.12/cockroachdb.tar.gz",
45
+  flannel_repo: "http://10.0.10.12/flannel-v0.7.0-linux-amd64.tar.gz",
46
+  k8s_repo: "http://10.0.10.12/v1.5.4/"
47
+}

+ 47
- 0
workloads/ansible/shade/k8s/vars/ubuntu.yml View File

@@ -0,0 +1,47 @@
1
+---
2
+# This is an example configuration file when use ubuntu image.
3
+horizon_url: "http://9.30.217.9"
4
+
5
+auth: {
6
+  auth_url: "http://9.30.217.9:5000/v3",
7
+  username: "demo",
8
+  password: "{{ password }}",
9
+  domain_name: "default",
10
+  project_name: "demo"
11
+}
12
+
13
+app_env: {
14
+  target_os: "ubuntu",
15
+  image_name: "ubuntu-16.04",
16
+  region_name: "RegionOne",
17
+  availability_zone: "nova",
18
+  validate_certs: False,
19
+  ssh_user: "ubuntu",
20
+  private_net_name: "demonet",
21
+  flavor_name: "m1.medium",
22
+  public_key_file: "/home/ubuntu/.ssh/interop.pub",
23
+  private_key_file: "/home/ubuntu/.ssh/interop",
24
+  stack_size: 3,
25
+  volume_size: 1,
26
+  block_device_name: "/dev/vdb",
27
+
28
+  domain: "cluster.local",
29
+  pod_network: {
30
+    Network: "172.17.0.0/16",
31
+    SubnetLen: 24,
32
+    SubnetMin: "172.17.0.0",
33
+    SubnetMax: "172.17.255.0",
34
+    Backend: {
35
+      Type: "udp",
36
+      Port: 8285
37
+    }
38
+  },
39
+  service_ip_range: "172.16.0.0/24",
40
+  dns_service_ip: "172.16.0.4",
41
+  dashboard_service_ip: "172.16.0.5",
42
+
43
+  # The following section shows an example when use a remote repo.
44
+  cockroachdb_repo: "",
45
+  flannel_repo: "https://github.com/coreos/flannel/releases/download/v0.7.0/flannel-v0.7.0-linux-amd64.tar.gz",
46
+  k8s_repo: "https://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/"
47
+}

Loading…
Cancel
Save