Browse Source

Added cockroachdb cluster setup across multiple clouds

This patch does the following:
1. Added the apps role so that k8s app deployment can be placed in the role
2. Added cockroachdb-init-pod deployment inthe post role since this has to
   be deployed first.
3. Added cockroachdb-pod deployment in the apps role
4. Allow this workload to stand up a standalone cockroachdb cluster or
   join in an existing cockroachdb cluster
5. Added the cockroachdb load generator container so that once the cluster
   is started, there will be some load generated.
6. Added a way to pre-allocating floating IP addresses so that the workload
   can use pre-allocated floating IPs for VMs.

Change-Id: Ifa9eeb9d761d9801cab580445e6c43c8cf1dfdaa
Tong Li 2 years ago
parent
commit
f455064066

+ 42
- 0
workloads/ansible/shade/k8s/roles/apps/tasks/main.yml View File

@@ -0,0 +1,42 @@
1
+---
2
+- name: Setup few variables for coreos target
3
+  set_fact:
4
+    master_ip: "{{ groups['cmasters'][0] }}"
5
+    public_ip: "{{ ansible_host }}"
6
+  when: app_env.target_os == "coreos"
7
+
8
+- name: Setup few variables for ubuntu target
9
+  set_fact:
10
+    master_ip: "{{ groups['umasters'][0] }}"
11
+    public_ip: "{{ ansible_host }}"
12
+  when: app_env.target_os == "ubuntu"
13
+
14
+- name: Setup private IP variable for master node
15
+  set_fact:
16
+    private_ip: "{{ hostvars[master_ip].inter_ip }}"
17
+
18
+- name: Setup first node IP when stand alone
19
+  set_fact:
20
+    first_node_ip: "{{ master_ip }}"
21
+  when: app_env.app_setting.own_cluster == True
22
+
23
+- name: Setup first node IP when join others
24
+  set_fact:
25
+    first_node_ip: "{{ app_env.app_setting.public_node }}"
26
+  when: app_env.app_setting.own_cluster == False
27
+
28
+- name: Upload app configuration files
29
+  template:
30
+    src: "roles/apps/templates/{{ item }}.j2"
31
+    dest: "/etc/kubernetes/{{ item }}.yaml"
32
+    mode: 0644
33
+  with_items:
34
+    - cockroachdb-pod
35
+
36
+- name: Create the app pod
37
+  command: >-
38
+    /opt/bin/kubectl --server="{{ private_ip }}:8080" create
39
+    -f "/etc/kubernetes/{{ item }}.yaml"
40
+  with_items:
41
+    - cockroachdb-pod
42
+

+ 45
- 0
workloads/ansible/shade/k8s/roles/apps/templates/cockroachdb-pod.j2 View File

@@ -0,0 +1,45 @@
1
+
2
+apiVersion: v1
3
+kind: Pod
4
+metadata:
5
+  name: cockroachdb-{{ inter_name }}
6
+  labels:
7
+    app: cockroachdb
8
+spec:
9
+  nodeName: {{ inter_name }}
10
+  restartPolicy: Always
11
+  containers:
12
+  - name: cockroachdb
13
+    image: cockroachdb/cockroach
14
+    imagePullPolicy: IfNotPresent
15
+    ports:
16
+    - containerPort: 26257
17
+      hostPort: 26257
18
+      name: grpc
19
+    - containerPort: 8080
20
+      hostPort: 8090
21
+      name: http
22
+    volumeMounts:
23
+    - name: datadir
24
+      mountPath: /cockroach/cockroach-data
25
+    command:
26
+      - "/bin/bash"
27
+      - "-ecx"
28
+      - |
29
+        CRARGS=("start" "--logtostderr" "--insecure" "--http-host" "0.0.0.0" )
30
+        CRARGS+=("--advertise-host" "{{ public_ip }}" )
31
+        CRARGS+=("--locality" "cloud={{ env }}")
32
+        CRARGS+=("--join" "{{ first_node_ip }}")
33
+        exec /cockroach/cockroach ${CRARGS[*]}
34
+  - name: loadgen
35
+    image: cockroachdb/interop-demo:1.1
36
+    imagePullPolicy: IfNotPresent
37
+    command:
38
+    - /kv
39
+    - --read-percent=50
40
+    - --max-rate=500
41
+  terminationGracePeriodSeconds: 30
42
+  volumes:
43
+  - name: datadir
44
+    hostPath:
45
+      path: /storage/cockroachdb

+ 6
- 2
workloads/ansible/shade/k8s/roles/post/tasks/apply.yml View File

@@ -12,7 +12,7 @@
12 12
   with_items:
13 13
     - dnscontroller
14 14
     - dashboard
15
-    - cockroachdb
15
+    - cockroachdb-init-pod
16 16
 
17 17
 - name: Label the master node
18 18
   command: >-
@@ -26,5 +26,9 @@
26 26
   with_items:
27 27
     - dnscontroller
28 28
     - dashboard
29
-    - cockroachdb
30 29
 
30
+- name: Setup first cockroachdb node
31
+  command: >-
32
+    /opt/bin/kubectl --server={{ private_ip }}:8080 create
33
+    -f /etc/kubernetes/cockroachdb-init-pod.yaml
34
+  when: app_env.app_setting.own_cluster == True

+ 44
- 0
workloads/ansible/shade/k8s/roles/post/templates/cockroachdb-init-pod.j2 View File

@@ -0,0 +1,44 @@
1
+apiVersion: v1
2
+kind: Pod
3
+metadata:
4
+  name: cockroachdb-{{ inter_name }}
5
+  labels:
6
+    app: cockroachdb
7
+spec:
8
+  nodeName: {{ inter_name }}
9
+  restartPolicy: Always
10
+  containers:
11
+  - name: cockroachdb
12
+    image: cockroachdb/cockroach
13
+    imagePullPolicy: IfNotPresent
14
+    ports:
15
+    - containerPort: 26257
16
+      hostPort: 26257
17
+      name: grpc
18
+    - containerPort: 8080
19
+      hostPort: 8090
20
+      name: http
21
+    volumeMounts:
22
+    - name: datadir
23
+      mountPath: /cockroach/cockroach-data
24
+    command:
25
+      - "/bin/bash"
26
+      - "-ecx"
27
+      - |
28
+        CRARGS=("start" "--logtostderr" "--insecure" "--http-host")
29
+        CRARGS+=("0.0.0.0" "--advertise-host" "{{ public_ip }}")
30
+        CRARGS+=("--locality" "cloud={{ env }}")
31
+        exec /cockroach/cockroach ${CRARGS[*]}
32
+  - name: loadgen
33
+    image: cockroachdb/interop-demo:1.1
34
+    imagePullPolicy: IfNotPresent
35
+    command:
36
+    - /kv
37
+    - --splits=100
38
+    - --read-percent=50
39
+    - --max-rate=500
40
+  terminationGracePeriodSeconds: 30
41
+  volumes:
42
+  - name: datadir
43
+    hostPath:
44
+      path: /storage/cockroachdb

+ 2
- 1
workloads/ansible/shade/k8s/roles/prepare/tasks/apply.yml View File

@@ -63,11 +63,12 @@
63 63
     - { p_min: 53, p_max: 53, dir: ingress, protocol: udp }
64 64
     - { p_min: 53, p_max: 53, dir: egress, protocol: udp }
65 65
     - { p_min: 8080, p_max: 8080, dir: ingress, protocol: tcp }
66
+    - { p_min: 8090, p_max: 8090, dir: ingress, protocol: tcp }
66 67
     - { p_min: 8285, p_max: 8285, dir: ingress, protocol: udp }
67 68
     - { p_min: 2379, p_max: 2380, dir: ingress, protocol: tcp }
68 69
     - { p_min: 2379, p_max: 2380, dir: egress, protocol: tcp }
69 70
     - { p_min: 10250, p_max: 10250, dir: ingress, protocol: tcp }
70
-    - { p_min: 30000, p_max: 32767, dir: ingress, protocol: tcp }
71
+    - { p_min: 20000, p_max: 32767, dir: ingress, protocol: tcp }
71 72
     - { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
72 73
     - { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
73 74
 

+ 1
- 0
workloads/ansible/shade/k8s/roles/provision/tasks/apply.yml View File

@@ -25,6 +25,7 @@
25 25
     flavor: "{{ app_env.flavor_name }}"
26 26
     network: "{{ app_env.private_net_name }}"
27 27
     floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
28
+    floating_ips: "{{ app_env.app_setting.ips[inventory_hostname] }}"
28 29
     reuse_ips: False
29 30
     userdata: "{{ lookup('template', tp_path) }}"
30 31
     config_drive: "{{ app_env.config_drive | default('no') }}"

+ 5
- 1
workloads/ansible/shade/k8s/roles/provision/tasks/destroy.yml View File

@@ -1,5 +1,9 @@
1 1
 ---
2 2
 
3
+- name: Setup release floating IP flag
4
+  set_fact:
5
+    ip_flag: "{{ app_env.app_setting.ips[inventory_hostname] == '' }}"
6
+
3 7
 - name: Destroy the OpenStack VM
4 8
   os_server:
5 9
     state: "absent"
@@ -9,7 +13,7 @@
9 13
     validate_certs: "{{ app_env.validate_certs }}"
10 14
     name: "{{ inventory_hostname }}"
11 15
     image: "{{ app_env.image_name }}"
12
-    delete_fip: True
16
+    delete_fip: "{{ ip_flag }}"
13 17
     key_name: "k8s"
14 18
     timeout: 200
15 19
     network: "{{ app_env.private_net_name }}"

+ 13
- 0
workloads/ansible/shade/k8s/site.yml View File

@@ -110,6 +110,19 @@
110 110
   environment: "{{ proxy_env }}"
111 111
   tags: "post"
112 112
 
113
+- name: Start up applications
114
+  hosts: cworkers, uworkers
115
+  gather_facts: true
116
+  user: "{{ app_env.ssh_user }}"
117
+  become: true
118
+  become_user: root
119
+  vars_files:
120
+    - "vars/{{ env }}.yml"
121
+  roles:
122
+    - apps
123
+  environment: "{{ proxy_env }}"
124
+  tags: "apps"
125
+
113 126
 - name: Inform the installer
114 127
   hosts: cloud
115 128
   connection: local

+ 18
- 0
workloads/ansible/shade/k8s/vars/coreos.yml View File

@@ -40,7 +40,25 @@ app_env: {
40 40
   dns_service_ip: "172.16.0.4",
41 41
   dashboard_service_ip: "172.16.0.5",
42 42
 
43
+  app_setting: {
44
+    public_node: "",
45
+    own_cluster: True,
46
+    # The following section shows how to pre allocate floating IPs for each
47
+    # server. If you wish not to pre allocate floating IPs or your cloud
48
+    # does not support floating IPs, leave them empty. The stack_size
49
+    # above should dictate how many worker nodes should be. For example, if
50
+    # your stack_size is 10, you will need to add worker-1 to worker-9.
51
+    ips: {
52
+      master: [],
53
+      worker-1: [],
54
+      worker-2: []
55
+    }
56
+  },
57
+
43 58
   # The following section shows an example when use a local repo.
59
+  # If you have exported some container images such as images that being used
60
+  # by this workload, you can place the url point to tar.gz file for
61
+  # cimages_repo
44 62
   cimages_repo: "http://10.0.10.12/cimages.tar.gz",
45 63
   flannel_repo: "http://10.0.10.12/flannel-v0.7.0-linux-amd64.tar.gz",
46 64
   k8s_repo: "http://10.0.10.12/v1.5.4/"

+ 18
- 0
workloads/ansible/shade/k8s/vars/ubuntu.yml View File

@@ -40,7 +40,25 @@ app_env: {
40 40
   dns_service_ip: "172.16.0.4",
41 41
   dashboard_service_ip: "172.16.0.5",
42 42
 
43
+  app_setting: {
44
+    public_node: "",
45
+    own_cluster: True,
46
+    # The following section shows how to pre allocate floating IPs for each
47
+    # server. If you wish not to pre allocate floating IPs or your cloud
48
+    # does not support floating IPs, leave them empty. The stack_size
49
+    # above should dictate how many worker nodes should be. For example, if
50
+    # your stack_size is 10, you will need to add worker-1 to worker-9.
51
+    ips: {
52
+      master: [],
53
+      worker-1: [],
54
+      worker-2: []
55
+    }
56
+  },
57
+
43 58
   # The following section shows an example when use a remote repo.
59
+  # If you have exported some container images such as images that being used
60
+  # by this workload, you can place the url point to tar.gz file for
61
+  # cimages_repo
44 62
   cimages_repo: "",
45 63
   flannel_repo: "https://github.com/coreos/flannel/releases/download/v0.7.0/flannel-v0.7.0-linux-amd64.tar.gz",
46 64
   k8s_repo: "https://storage.googleapis.com/kubernetes-release/release/v1.5.3/bin/linux/amd64/"

Loading…
Cancel
Save