Browse Source

move both ansible and terraform existing workloads to this repository

Change-Id: Ie1091cb4c5bbefa7da8f3d095be33860a56432cc
Tong Li 2 years ago
parent
commit
c2d9031536
56 changed files with 2337 additions and 0 deletions
  1. 4
    0
      workloads/ansible/shade/dockerswarm/.gitignore
  2. 3
    0
      workloads/ansible/shade/dockerswarm/ansible.cfg
  3. 1
    0
      workloads/ansible/shade/dockerswarm/hosts
  4. 19
    0
      workloads/ansible/shade/dockerswarm/roles/post_apply/tasks/main.yml
  5. 27
    0
      workloads/ansible/shade/dockerswarm/roles/post_destroy/tasks/main.yml
  6. 96
    0
      workloads/ansible/shade/dockerswarm/roles/prep_apply/tasks/main.yml
  7. 47
    0
      workloads/ansible/shade/dockerswarm/roles/prep_apply/templates/cloudinit.j2
  8. 13
    0
      workloads/ansible/shade/dockerswarm/roles/prep_destroy/tasks/main.yml
  9. 39
    0
      workloads/ansible/shade/dockerswarm/roles/prov_apply/tasks/main.yml
  10. 31
    0
      workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap1.j2
  11. 32
    0
      workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap2.j2
  12. 2
    0
      workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/dockerservice.j2
  13. 8
    0
      workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/openssl.cnf
  14. 14
    0
      workloads/ansible/shade/dockerswarm/roles/prov_destroy/tasks/main.yml
  15. 21
    0
      workloads/ansible/shade/dockerswarm/roles/vm_apply/tasks/main.yml
  16. 1
    0
      workloads/ansible/shade/dockerswarm/roles/vm_destroy/tasks/main.yml
  17. 33
    0
      workloads/ansible/shade/dockerswarm/site.yml
  18. 23
    0
      workloads/ansible/shade/dockerswarm/vars/bluebox.yml
  19. 21
    0
      workloads/ansible/shade/dockerswarm/vars/dreamhost.yml
  20. 24
    0
      workloads/ansible/shade/dockerswarm/vars/leap.yml
  21. 24
    0
      workloads/ansible/shade/dockerswarm/vars/osic.yml
  22. 23
    0
      workloads/ansible/shade/dockerswarm/vars/ovh.yml
  23. 6
    0
      workloads/ansible/shade/lampstack/.gitignore
  24. 3
    0
      workloads/ansible/shade/lampstack/ansible.cfg
  25. 7
    0
      workloads/ansible/shade/lampstack/group_vars/all.yml
  26. 1
    0
      workloads/ansible/shade/lampstack/hosts
  27. 193
    0
      workloads/ansible/shade/lampstack/roles/apply/tasks/main.yml
  28. 4
    0
      workloads/ansible/shade/lampstack/roles/apply/templates/userdata.j2
  29. 53
    0
      workloads/ansible/shade/lampstack/roles/balancer/tasks/main.yml
  30. 33
    0
      workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy.cfg.j2
  31. 34
    0
      workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy_fedora.cfg.j2
  32. 23
    0
      workloads/ansible/shade/lampstack/roles/cleaner/tasks/apply.yml
  33. 1
    0
      workloads/ansible/shade/lampstack/roles/cleaner/tasks/destroy.yml
  34. 19
    0
      workloads/ansible/shade/lampstack/roles/common/tasks/main.yml
  35. 164
    0
      workloads/ansible/shade/lampstack/roles/database/tasks/main.yml
  36. 79
    0
      workloads/ansible/shade/lampstack/roles/destroy/tasks/main.yml
  37. 147
    0
      workloads/ansible/shade/lampstack/roles/webserver/tasks/main.yml
  38. 73
    0
      workloads/ansible/shade/lampstack/roles/wordpress/tasks/main.yml
  39. 96
    0
      workloads/ansible/shade/lampstack/site.yml
  40. 9
    0
      workloads/terraform/shade/dockerswarm/.gitignore
  41. 217
    0
      workloads/terraform/shade/dockerswarm/README.md
  42. 60
    0
      workloads/terraform/shade/dockerswarm/_securitygroups.tf
  43. 12
    0
      workloads/terraform/shade/dockerswarm/files/ssl/generate-ssl.sh
  44. 8
    0
      workloads/terraform/shade/dockerswarm/files/ssl/openssl.cnf
  45. 109
    0
      workloads/terraform/shade/dockerswarm/swarm.tf
  46. 2
    0
      workloads/terraform/shade/dockerswarm/templates/10-docker-service.conf
  47. 48
    0
      workloads/terraform/shade/dockerswarm/templates/cloud-init
  48. 4
    0
      workloads/terraform/shade/dockerswarm/vars-coreos.tf
  49. 46
    0
      workloads/terraform/shade/dockerswarm/vars-openstack.tf
  50. 21
    0
      workloads/terraform/shade/dockerswarm/vars-swarm.tf
  51. 162
    0
      workloads/terraform/shade/lampstack/README.md
  52. 108
    0
      workloads/terraform/shade/lampstack/lampstack.tf
  53. 15
    0
      workloads/terraform/shade/lampstack/onvm/app/index.php
  54. 19
    0
      workloads/terraform/shade/lampstack/onvm/scripts/installapache.sh
  55. 21
    0
      workloads/terraform/shade/lampstack/onvm/scripts/installdb.sh
  56. 34
    0
      workloads/terraform/shade/lampstack/vars_lampstack.tf

+ 4
- 0
workloads/ansible/shade/dockerswarm/.gitignore View File

@@ -0,0 +1,4 @@
1
+*.out
2
+*/**/*.log
3
+*/**/.DS_Store
4
+*/**/._

+ 3
- 0
workloads/ansible/shade/dockerswarm/ansible.cfg View File

@@ -0,0 +1,3 @@
1
+[defaults]
2
+inventory = ./hosts
3
+host_key_checking=False

+ 1
- 0
workloads/ansible/shade/dockerswarm/hosts View File

@@ -0,0 +1 @@
1
+cloud ansible_host=127.0.0.1 ansible_python_interpreter=python

+ 19
- 0
workloads/ansible/shade/dockerswarm/roles/post_apply/tasks/main.yml View File

@@ -0,0 +1,19 @@
1
+---
2
+- debug:
3
+    msg: >-
4
+      export DOCKER_HOST=tcp://{{ hostvars.swarmnode1.swarmnode.openstack.public_v4 }}:2375;
5
+      export DOCKER_TLS_VERIFY=1;
6
+      export DOCKER_CERT_PATH=/tmp/{{ env }}/keys
7
+  when: hostvars.swarmnode1.swarmnode.openstack.public_v4 != ""
8
+
9
+- debug:
10
+    msg: >-
11
+      export DOCKER_HOST=tcp://{{ hostvars.swarmnode1.swarmnode.openstack.private_v4 }}:2375;
12
+      export DOCKER_TLS_VERIFY=1;
13
+      export DOCKER_CERT_PATH=/tmp/{{ env }}/keys
14
+  when: hostvars.swarmnode1.swarmnode.openstack.public_v4 == ""
15
+
16
+- debug:
17
+    msg: >-
18
+      The work load test started at {{ starttime.time }},
19
+      ended at {{ ansible_date_time.time }}

+ 27
- 0
workloads/ansible/shade/dockerswarm/roles/post_destroy/tasks/main.yml View File

@@ -0,0 +1,27 @@
1
+---
2
+- name: Remove security group
3
+  os_security_group:
4
+    state: absent
5
+    auth: "{{ auth }}"
6
+    region_name: "{{ app_env.region_name }}"
7
+    availability_zone: "{{ app_env.availability_zone }}"
8
+    validate_certs: "{{ app_env.validate_certs }}"
9
+    name: dockerswarm_sg
10
+    description: secuirty group for dockerswarm
11
+
12
+- name: Delete discovery url directory
13
+  file: path="/tmp/{{ env }}" state=absent
14
+
15
+- name: Delete a key-pair
16
+  os_keypair:
17
+    state: absent
18
+    auth: "{{ auth }}"
19
+    region_name: "{{ app_env.region_name }}"
20
+    availability_zone: "{{ app_env.availability_zone }}"
21
+    validate_certs: "{{ app_env.validate_certs }}"
22
+    name: "dockerswarm"
23
+
24
+- debug:
25
+    msg: >-
26
+      The work load test started at {{ starttime.time }},
27
+      ended at {{ ansible_date_time.time }}

+ 96
- 0
workloads/ansible/shade/dockerswarm/roles/prep_apply/tasks/main.yml View File

@@ -0,0 +1,96 @@
1
+---
2
+- name: Get start timestamp
3
+  set_fact: starttime="{{ ansible_date_time }}"
4
+
5
+- name: Create certificate directory
6
+  file: path="/tmp/{{ env }}/keys" state=directory
7
+
8
+- stat: path="/tmp/{{ env }}/discovery_url"
9
+  register: discovery_url_flag
10
+
11
+- name: Get docker discovery url
12
+  get_url:
13
+    url: "https://discovery.etcd.io/new?size={{ app_env.swarm_size }}"
14
+    dest: "/tmp/{{ env }}/discovery_url"
15
+  when: discovery_url_flag.stat.exists == false
16
+
17
+- shell: openssl genrsa -out "/tmp/{{ env }}/keys/ca-key.pem" 2048
18
+- shell: openssl genrsa -out "/tmp/{{ env }}/keys/key.pem" 2048
19
+
20
+- shell: >-
21
+      openssl req -x509 -new -nodes -key /tmp/{{ env }}/keys/ca-key.pem
22
+      -days 10000 -out /tmp/{{ env }}/keys/ca.pem -subj '/CN=docker-CA'
23
+
24
+- shell: >-
25
+      openssl req -new -key /tmp/{{ env }}/keys/key.pem
26
+      -out /tmp/{{ env }}/keys/cert.csr
27
+      -subj '/CN=docker-client' -config ./roles/prov_apply/templates/openssl.cnf
28
+
29
+- shell: >-
30
+      openssl x509 -req -in /tmp/{{ env }}/keys/cert.csr
31
+      -CA /tmp/{{ env }}/keys/ca.pem -CAkey /tmp/{{ env }}/keys/ca-key.pem
32
+      -CAcreateserial -out /tmp/{{ env }}/keys/cert.pem -days 365
33
+      -extensions v3_req -extfile ./roles/prov_apply/templates/openssl.cnf
34
+
35
+- name: Retrieve specified flavor
36
+  os_flavor_facts:
37
+    auth: "{{ auth }}"
38
+    region_name: "{{ app_env.region_name }}"
39
+    availability_zone: "{{ app_env.availability_zone }}"
40
+    validate_certs: "{{ app_env.validate_certs }}"
41
+    name: "{{ app_env.flavor_name }}"
42
+
43
+- name: Create a key-pair
44
+  os_keypair:
45
+    state: "present"
46
+    auth: "{{ auth }}"
47
+    region_name: "{{ app_env.region_name }}"
48
+    availability_zone: "{{ app_env.availability_zone }}"
49
+    validate_certs: "{{ app_env.validate_certs }}"
50
+    name: "dockerswarm"
51
+    public_key_file: "{{ app_env.public_key_file }}"
52
+
53
+- name: Create security group
54
+  os_security_group:
55
+    state: present
56
+    auth: "{{ auth }}"
57
+    region_name: "{{ app_env.region_name }}"
58
+    availability_zone: "{{ app_env.availability_zone }}"
59
+    validate_certs: "{{ app_env.validate_certs }}"
60
+    name: dockerswarm_sg
61
+    description: secuirty group for dockerswarm
62
+
63
+- name: Add security rules
64
+  os_security_group_rule:
65
+    state: present
66
+    auth: "{{ auth }}"
67
+    region_name: "{{ app_env.region_name }}"
68
+    availability_zone: "{{ app_env.availability_zone }}"
69
+    validate_certs: "{{ app_env.validate_certs }}"
70
+    security_group: dockerswarm_sg
71
+    protocol: "{{ item.protocol }}"
72
+    direction: "{{ item.dir }}"
73
+    port_range_min: "{{ item.p_min }}"
74
+    port_range_max: "{{ item.p_max }}"
75
+    remote_ip_prefix: 0.0.0.0/0
76
+  with_items:
77
+    - { p_min: 22, p_max: 22, dir: ingress, protocol: tcp }
78
+    - { p_min: 2375, p_max: 2376, dir: ingress, protocol: tcp }
79
+    - { p_min: 2379, p_max: 2380, dir: ingress, protocol: tcp }
80
+    - { p_min: 2379, p_max: 2380, dir: egress, protocol: tcp }
81
+    - { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
82
+    - { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
83
+
84
+- name: Create cloudinit file for all nodes
85
+  template:
86
+    src: templates/cloudinit.j2
87
+    dest: "/tmp/{{ env }}/cloudinit"
88
+
89
+- name: Add nodes to host group
90
+  add_host:
91
+    name: "swarmnode{{ item }}"
92
+    hostname: "127.0.0.1"
93
+    groups: dockerswarm
94
+    host_no: "{{ item }}"
95
+  with_sequence: count={{ app_env.swarm_size }}
96
+  no_log: True

+ 47
- 0
workloads/ansible/shade/dockerswarm/roles/prep_apply/templates/cloudinit.j2 View File

@@ -0,0 +1,47 @@
1
+#cloud-config
2
+coreos:
3
+  units:
4
+    - name: etcd.service
5
+      mask: true
6
+    - name: etcd2.service
7
+      command: start
8
+    - name: docker.service
9
+      command: start
10
+    - name: swarm-agent.service
11
+      content: |
12
+        [Unit]
13
+        Description=swarm agent
14
+        Requires=docker.service
15
+        After=docker.service
16
+
17
+        [Service]
18
+        EnvironmentFile=/etc/environment
19
+        TimeoutStartSec=20m
20
+        ExecStartPre=/usr/bin/docker pull swarm:latest
21
+        ExecStartPre=-/usr/bin/docker rm -f swarm-agent
22
+        ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-agent swarm:latest join --addr=$COREOS_PRIVATE_IPV4:2376 etcd://$COREOS_PRIVATE_IPV4:2379/docker"
23
+        ExecStop=/usr/bin/docker stop swarm-agent
24
+    - name: swarm-manager.service
25
+      content: |
26
+        [Unit]
27
+        Description=swarm manager
28
+        Requires=docker.service
29
+        After=docker.service
30
+
31
+        [Service]
32
+        EnvironmentFile=/etc/environment
33
+        TimeoutStartSec=20m
34
+        ExecStartPre=/usr/bin/docker pull swarm:latest
35
+        ExecStartPre=-/usr/bin/docker rm -f swarm-manager
36
+        ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-manager -v /etc/docker/ssl:/etc/docker/ssl --net=host swarm:latest manage --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem etcd://$COREOS_PRIVATE_IPV4:2379/docker"
37
+        ExecStop=/usr/bin/docker stop swarm-manager
38
+  etcd2:
39
+    discovery: {{ lookup('file', '/tmp/'+env+'/discovery_url') }}
40
+    advertise-client-urls: http://$private_ipv4:2379
41
+    initial-advertise-peer-urls: http://$private_ipv4:2380
42
+    listen-client-urls: http://0.0.0.0:2379
43
+    listen-peer-urls: http://$private_ipv4:2380
44
+    data-dir: /var/lib/etcd2
45
+    initial-cluster-token: openstackinterop
46
+  update:
47
+    reboot-strategy: "off"

+ 13
- 0
workloads/ansible/shade/dockerswarm/roles/prep_destroy/tasks/main.yml View File

@@ -0,0 +1,13 @@
1
+---
2
+- name: Get start timestamp
3
+  set_fact: starttime="{{ ansible_date_time }}"
4
+
5
+- name: Add web servers to webservers host group
6
+  add_host:
7
+    name: "swarmnode{{ item }}"
8
+    hostname: "127.0.0.1"
9
+    groups: dockerswarm
10
+    host_no: "{{ item }}"
11
+  with_sequence: count={{ app_env.swarm_size }}
12
+  no_log: True
13
+  

+ 39
- 0
workloads/ansible/shade/dockerswarm/roles/prov_apply/tasks/main.yml View File

@@ -0,0 +1,39 @@
1
+---
2
+- name: Get public IP
3
+  set_fact: node_ip="{{ swarmnode.openstack.public_v4 }}"
4
+  when: swarmnode.openstack.public_v4 != ""
5
+
6
+- name: Get public IP
7
+  set_fact: node_ip="{{ swarmnode.openstack.private_v4 }}"
8
+  when: swarmnode.openstack.public_v4 == ""
9
+
10
+- name: Make certificate configuration file
11
+  copy:
12
+    src: templates/openssl.cnf
13
+    dest: "/tmp/{{ env }}/{{ node_ip }}/keys/"
14
+
15
+- name: Make service file
16
+  template:
17
+    src: templates/dockerservice.j2
18
+    dest: "/tmp/{{ env }}/{{ node_ip }}/keys/dockerservice.cnf"
19
+
20
+- name: Create bootstrap file
21
+  template:
22
+    src: templates/bootstrap1.j2
23
+    dest: "/tmp/{{ env }}/{{ node_ip }}/keys/bootstrap.sh"
24
+  when: swarmnode.openstack.private_v4 == ""
25
+
26
+- name: Create bootstrap file
27
+  template:
28
+    src: templates/bootstrap2.j2
29
+    dest: "/tmp/{{ env }}/{{ node_ip }}/keys/bootstrap.sh"
30
+  when: swarmnode.openstack.private_v4 != ""
31
+
32
+- name: Transfer configureation
33
+  shell: scp -r "/tmp/{{ env }}/{{ node_ip }}/keys" "core@{{ node_ip }}:/home/core"
34
+
35
+- name: Transfer certificate file over to the nodes
36
+  shell: scp -r "/tmp/{{ env }}/keys" "core@{{ node_ip }}:/home/core"
37
+
38
+- name: Start services
39
+  shell: ssh "core@{{ node_ip }}" "sh keys/bootstrap.sh"

+ 31
- 0
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap1.j2 View File

@@ -0,0 +1,31 @@
1
+mkdir -p /home/core/.docker
2
+cp /home/core/keys/ca.pem /home/core/.docker/
3
+cp /home/core/keys/cert.pem /home/core/.docker/
4
+cp /home/core/keys/key.pem /home/core/.docker/
5
+
6
+echo 'subjectAltName = @alt_names' >> /home/core/keys/openssl.cnf
7
+echo '[alt_names]' >> /home/core/keys/openssl.cnf
8
+
9
+cd /home/core/keys
10
+
11
+echo 'IP.1 = {{ swarmnode.openstack.public_v4 }}' >> openssl.cnf
12
+echo 'DNS.1 = {{ app_env.fqdn }}' >> openssl.cnf
13
+echo 'DNS.2 = {{ swarmnode.openstack.public_v4 }}.xip.io' >> openssl.cnf
14
+
15
+openssl req -new -key key.pem -out cert.csr -subj '/CN=docker-client' -config openssl.cnf
16
+openssl x509 -req -in cert.csr -CA ca.pem -CAkey ca-key.pem \
17
+  -CAcreateserial -out cert.pem -days 365 -extensions v3_req -extfile openssl.cnf
18
+
19
+sudo mkdir -p /etc/docker/ssl
20
+sudo cp ca.pem /etc/docker/ssl/
21
+sudo cp cert.pem /etc/docker/ssl/
22
+sudo cp key.pem /etc/docker/ssl/
23
+
24
+# Apply localized settings to services
25
+sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d
26
+
27
+sudo mv /home/core/keys/dockerservice.cnf /etc/systemd/system/docker.service.d/10-docker-service.conf
28
+sudo systemctl daemon-reload
29
+sudo systemctl restart docker.service
30
+sudo systemctl start swarm-agent.service
31
+sudo systemctl start swarm-manager.service

+ 32
- 0
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap2.j2 View File

@@ -0,0 +1,32 @@
1
+mkdir -p /home/core/.docker
2
+cp /home/core/keys/ca.pem /home/core/.docker/
3
+cp /home/core/keys/cert.pem /home/core/.docker/
4
+cp /home/core/keys/key.pem /home/core/.docker/
5
+
6
+echo 'subjectAltName = @alt_names' >> /home/core/keys/openssl.cnf
7
+echo '[alt_names]' >> /home/core/keys/openssl.cnf
8
+
9
+cd /home/core/keys
10
+
11
+echo 'IP.1 = {{ swarmnode.openstack.private_v4 }}' >> openssl.cnf
12
+echo 'IP.2 = {{ swarmnode.openstack.public_v4 }}' >> openssl.cnf
13
+echo 'DNS.1 = {{ app_env.fqdn }}' >> openssl.cnf
14
+echo 'DNS.2 = {{ swarmnode.openstack.public_v4 }}.xip.io' >> openssl.cnf
15
+
16
+openssl req -new -key key.pem -out cert.csr -subj '/CN=docker-client' -config openssl.cnf
17
+openssl x509 -req -in cert.csr -CA ca.pem -CAkey ca-key.pem \
18
+  -CAcreateserial -out cert.pem -days 365 -extensions v3_req -extfile openssl.cnf
19
+
20
+sudo mkdir -p /etc/docker/ssl
21
+sudo cp ca.pem /etc/docker/ssl/
22
+sudo cp cert.pem /etc/docker/ssl/
23
+sudo cp key.pem /etc/docker/ssl/
24
+
25
+# Apply localized settings to services
26
+sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d
27
+
28
+sudo mv /home/core/keys/dockerservice.cnf /etc/systemd/system/docker.service.d/10-docker-service.conf
29
+sudo systemctl daemon-reload
30
+sudo systemctl restart docker.service
31
+sudo systemctl start swarm-agent.service
32
+sudo systemctl start swarm-manager.service

+ 2
- 0
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/dockerservice.j2 View File

@@ -0,0 +1,2 @@
1
+[Service]
2
+Environment="DOCKER_OPTS=-H=0.0.0.0:2376 -H unix:///var/run/docker.sock --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem --cluster-advertise {{app_env.net_device}}:2376 --cluster-store etcd://127.0.0.1:2379/docker"

+ 8
- 0
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/openssl.cnf View File

@@ -0,0 +1,8 @@
1
+[req]
2
+req_extensions = v3_req
3
+distinguished_name = req_distinguished_name
4
+[req_distinguished_name]
5
+[ v3_req ]
6
+basicConstraints = CA:FALSE
7
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
8
+extendedKeyUsage = clientAuth, serverAuth

+ 14
- 0
workloads/ansible/shade/dockerswarm/roles/prov_destroy/tasks/main.yml View File

@@ -0,0 +1,14 @@
1
+---
2
+- name: Remove docker swarm nodes
3
+  os_server:
4
+    state: "absent"
5
+    auth: "{{ auth }}"
6
+    region_name: "{{ app_env.region_name }}"
7
+    availability_zone: "{{ app_env.availability_zone }}"
8
+    validate_certs: "{{ app_env.validate_certs }}"
9
+    name: docker-swarm-{{ host_no }}
10
+    key_name: "dockerswarm"
11
+    timeout: 200
12
+    security_groups: dockerswarm_sg
13
+    meta:
14
+      hostname: docker-swarm-{{ host_no }}

+ 21
- 0
workloads/ansible/shade/dockerswarm/roles/vm_apply/tasks/main.yml View File

@@ -0,0 +1,21 @@
1
+---
2
+- name: Create docker swarm nodes
3
+  os_server:
4
+    state: "present"
5
+    auth: "{{ auth }}"
6
+    region_name: "{{ app_env.region_name }}"
7
+    availability_zone: "{{ app_env.availability_zone }}"
8
+    validate_certs: "{{ app_env.validate_certs }}"
9
+    name: docker-swarm-{{ host_no }}
10
+    image: "{{ app_env.image_name }}"
11
+    key_name: "dockerswarm"
12
+    timeout: 200
13
+    flavor: "{{ hostvars.cloud.openstack_flavors[0].id }}"
14
+    network: "{{ app_env.private_net_name }}"
15
+    auto_ip: yes
16
+    userdata: "{{ lookup('file', '/tmp/' +env+ '/cloudinit') }}"
17
+    security_groups: dockerswarm_sg
18
+    meta:
19
+      hostname: docker-swarm-{{ host_no }}
20
+  register: swarmnode
21
+

+ 1
- 0
workloads/ansible/shade/dockerswarm/roles/vm_destroy/tasks/main.yml View File

@@ -0,0 +1 @@
1
+---

+ 33
- 0
workloads/ansible/shade/dockerswarm/site.yml View File

@@ -0,0 +1,33 @@
1
+---
2
+- name: prepare for provision
3
+  hosts: cloud
4
+  connection: local
5
+  vars_files:
6
+    - "vars/{{ env }}.yml"
7
+  roles:
8
+    - "prep_{{ action }}"
9
+
10
+- name: provision swarm nodes
11
+  hosts: dockerswarm
12
+  serial: 1
13
+  connection: local
14
+  vars_files:
15
+    - "vars/{{ env }}.yml"
16
+  roles:
17
+    - "vm_{{ action }}"
18
+
19
+- name: setup swarm nodes
20
+  hosts: dockerswarm
21
+  connection: local
22
+  vars_files:
23
+    - "vars/{{ env }}.yml"
24
+  roles:
25
+    - "prov_{{ action }}"
26
+
27
+- name: post provisioning
28
+  hosts: cloud
29
+  connection: local
30
+  vars_files:
31
+    - "vars/{{ env }}.yml"
32
+  roles:
33
+    - "post_{{ action }}"

+ 23
- 0
workloads/ansible/shade/dockerswarm/vars/bluebox.yml View File

@@ -0,0 +1,23 @@
1
+---
2
+horizon_url: "https://salesdemo-sjc.openstack.blueboxgrid.com"
3
+
4
+auth: {
5
+  auth_url: "https://salesdemo-sjc.openstack.blueboxgrid.com:5000/v2.0",
6
+  username: "litong01",
7
+  password: "{{ password }}",
8
+  project_name: "Interop"
9
+}
10
+
11
+app_env: {
12
+  image_name: "coreos",
13
+  private_net_name: "interopnet",
14
+  net_device: "eth0",
15
+  flavor_name: "m1.small",
16
+  swarm_version: "latest",
17
+  swarm_size: 3,
18
+  region_name: "",
19
+  availability_zone: "",
20
+  validate_certs: True,
21
+  fqdn: "swarm.example.com",
22
+  public_key_file: "/home/tong/.ssh/id_rsa.pub"
23
+}

+ 21
- 0
workloads/ansible/shade/dockerswarm/vars/dreamhost.yml View File

@@ -0,0 +1,21 @@
1
+---
2
+horizon_url: "https://iad2.dreamcompute.com"
3
+
4
+auth: {
5
+  auth_url: "https://iad2.dream.io:5000/v2.0",
6
+  username: "stemaf4",
7
+  password: "{{ password }}",
8
+  project_name: "dhc2131831"
9
+}
10
+
11
+app_env: {
12
+  region_name: "RegionOne",
13
+  image_name: "CoreOS Sept16",
14
+  private_net_name: "",
15
+  flavor_name: "gp1.subsonic",
16
+  public_key_file: "/home/reed/.ssh/id_rsa.pub",
17
+  swarm_version: "latest",
18
+  swarm_size: 3,
19
+  fqdn: "swarm.example.com",
20
+  net_device: "eth0",
21
+}

+ 24
- 0
workloads/ansible/shade/dockerswarm/vars/leap.yml View File

@@ -0,0 +1,24 @@
1
+---
2
+horizon_url: "http://9.30.217.9"
3
+
4
+auth: {
5
+  auth_url: "http://9.30.217.9:5000/v3",
6
+  username: "demo",
7
+  password: "{{ password }}",
8
+  domain_name: "default",
9
+  project_name: "demo"
10
+}
11
+
12
+app_env: {
13
+  image_name: "coreos",
14
+  private_net_name: "Bluebox",
15
+  net_device: "eth0",
16
+  flavor_name: "m1.small",
17
+  swarm_version: "latest",
18
+  swarm_size: 3,
19
+  region_name: "RegionOne",
20
+  availability_zone: "nova",
21
+  validate_certs: False,
22
+  fqdn: "swarm.example.com",
23
+  public_key_file: "/home/tong/.ssh/id_rsa.pub"
24
+}

+ 24
- 0
workloads/ansible/shade/dockerswarm/vars/osic.yml View File

@@ -0,0 +1,24 @@
1
+---
2
+horizon_url: "https://cloud1.osic.org"
3
+
4
+auth: {
5
+  auth_url: "https://cloud1.osic.org:5000/v3",
6
+  username: "litong01",
7
+  password: "{{ password }}",
8
+  domain_name: "default",
9
+  project_name: "interop_challenge"
10
+}
11
+
12
+app_env: {
13
+  image_name: "coreos",
14
+  private_net_name: "interopnet",
15
+  net_device: "eth0",
16
+  flavor_name: "m1.small",
17
+  swarm_version: "latest",
18
+  swarm_size: 3,
19
+  region_name: "",
20
+  availability_zone: "",
21
+  validate_certs: True,
22
+  fqdn: "swarm.example.com",
23
+  public_key_file: "/home/tong/.ssh/id_rsa.pub"
24
+}

+ 23
- 0
workloads/ansible/shade/dockerswarm/vars/ovh.yml View File

@@ -0,0 +1,23 @@
1
+---
2
+horizon_url: "https://horizon.cloud.ovh.net"
3
+
4
+auth: {
5
+  auth_url: "https://auth.cloud.ovh.net/v2.0",
6
+  username: "SXYbmFhC4aqQ",
7
+  password: "{{ password }}",
8
+  project_name: "2487610196015734"
9
+}
10
+
11
+app_env: {
12
+  image_name: "coreos",
13
+  private_net_name: "",
14
+  net_device: "eth0",
15
+  flavor_name: "eg-15-ssd",
16
+  swarm_version: "latest",
17
+  swarm_size: 3,
18
+  region_name: "BHS1",
19
+  availability_zone: "",
20
+  validate_certs: True,
21
+  fqdn: "swarm.example.com",
22
+  public_key_file: "/home/tong/.ssh/id_rsa.pub"
23
+}

+ 6
- 0
workloads/ansible/shade/lampstack/.gitignore View File

@@ -0,0 +1,6 @@
1
+*.out
2
+vars/*
3
+*/**/*.log
4
+*/**/.DS_Store
5
+*/**/._
6
+*/**/*.tfstate*

+ 3
- 0
workloads/ansible/shade/lampstack/ansible.cfg View File

@@ -0,0 +1,3 @@
1
+[defaults]
2
+inventory = ./hosts
3
+host_key_checking = False

+ 7
- 0
workloads/ansible/shade/lampstack/group_vars/all.yml View File

@@ -0,0 +1,7 @@
1
+---
2
+db_user: "wpdbuser"
3
+db_pass: "{{ lookup('password',
4
+             '/tmp/sqlpassword chars=ascii_letters,digits length=8') }}"
5
+
6
+proxy_env: {
7
+}

+ 1
- 0
workloads/ansible/shade/lampstack/hosts View File

@@ -0,0 +1 @@
1
+cloud ansible_host=127.0.0.1 ansible_python_interpreter=python

+ 193
- 0
workloads/ansible/shade/lampstack/roles/apply/tasks/main.yml View File

@@ -0,0 +1,193 @@
1
+---
2
+- name: Get start timestamp
3
+  set_fact: starttime="{{ ansible_date_time }}"
4
+
5
+- name: Retrieve specified flavor
6
+  os_flavor_facts:
7
+    auth: "{{ auth }}"
8
+    region_name: "{{ app_env.region_name }}"
9
+    availability_zone: "{{ app_env.availability_zone }}"
10
+    validate_certs: "{{ app_env.validate_certs }}"
11
+    name: "{{ app_env.flavor_name }}"
12
+
13
+- name: Create a key-pair
14
+  os_keypair:
15
+    state: "present"
16
+    auth: "{{ auth }}"
17
+    region_name: "{{ app_env.region_name }}"
18
+    availability_zone: "{{ app_env.availability_zone }}"
19
+    validate_certs: "{{ app_env.validate_certs }}"
20
+    name: "lampstack"
21
+    public_key_file: "{{ app_env.public_key_file }}"
22
+
23
+- name: Create volume
24
+  os_volume:
25
+    state: present
26
+    auth: "{{ auth }}"
27
+    region_name: "{{ app_env.region_name }}"
28
+    availability_zone: "{{ app_env.availability_zone }}"
29
+    validate_certs: "{{ app_env.validate_certs }}"
30
+    size: "{{ app_env.volume_size }}"
31
+    wait: yes
32
+    display_name: db_volume
33
+
34
+- name: Create security group
35
+  os_security_group:
36
+    state: present
37
+    auth: "{{ auth }}"
38
+    region_name: "{{ app_env.region_name }}"
39
+    availability_zone: "{{ app_env.availability_zone }}"
40
+    validate_certs: "{{ app_env.validate_certs }}"
41
+    name: lampstack_sg
42
+    description: security group for lampstack
43
+
44
+- name: Add security rules
45
+  os_security_group_rule:
46
+    state: present
47
+    auth: "{{ auth }}"
48
+    region_name: "{{ app_env.region_name }}"
49
+    availability_zone: "{{ app_env.availability_zone }}"
50
+    validate_certs: "{{ app_env.validate_certs }}"
51
+    security_group: lampstack_sg
52
+    protocol: "{{ item.protocol }}"
53
+    direction: "{{ item.dir }}"
54
+    port_range_min: "{{ item.p_min }}"
55
+    port_range_max: "{{ item.p_max }}"
56
+    remote_ip_prefix: 0.0.0.0/0
57
+  with_items:
58
+    - { p_min: 22, p_max: 22, dir: ingress, protocol: tcp }
59
+    - { p_min: 80, p_max: 80, dir: ingress, protocol: tcp }
60
+    - { p_min: 2049, p_max: 2049, dir: ingress, protocol: tcp }
61
+    - { p_min: 2049, p_max: 2049, dir: egress, protocol: tcp }
62
+    - { p_min: 3306, p_max: 3306, dir: ingress, protocol: tcp }
63
+    - { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
64
+    - { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
65
+
66
+- name: Create database node
67
+  os_server:
68
+    state: "present"
69
+    auth: "{{ auth }}"
70
+    region_name: "{{ app_env.region_name }}"
71
+    availability_zone: "{{ app_env.availability_zone }}"
72
+    validate_certs: "{{ app_env.validate_certs }}"
73
+    name: database
74
+    image: "{{ app_env.image_name }}"
75
+    key_name: "lampstack"
76
+    timeout: 200
77
+    flavor: "{{ app_env.flavor_name }}"
78
+    network: "{{ app_env.private_net_name }}"
79
+    userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
80
+    config_drive: "{{ app_env.config_drive | default('no') }}"
81
+    security_groups: lampstack_sg
82
+    floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
83
+    meta:
84
+      hostname: database
85
+  register: database
86
+
87
+- name: Add database node to the dbservers host group
88
+  add_host:
89
+    name: "{{ database.openstack.public_v4 }}"
90
+    groups: dbservers
91
+  when: database.openstack.public_v4 != ""
92
+
93
+- name: Add database node to the dbservers host group
94
+  add_host:
95
+    name: "{{ database.openstack.private_v4 }}"
96
+    groups: dbservers
97
+  when: database.openstack.public_v4 == ""
98
+
99
+- name: Create balancer node
100
+  os_server:
101
+    state: "present"
102
+    auth: "{{ auth }}"
103
+    region_name: "{{ app_env.region_name }}"
104
+    availability_zone: "{{ app_env.availability_zone }}"
105
+    validate_certs: "{{ app_env.validate_certs }}"
106
+    name: balancer
107
+    image: "{{ app_env.image_name }}"
108
+    key_name: "lampstack"
109
+    timeout: 200
110
+    flavor: "{{ app_env.flavor_name }}"
111
+    network: "{{ app_env.private_net_name }}"
112
+    userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
113
+    config_drive: "{{ app_env.config_drive | default('no') }}"
114
+    security_groups: lampstack_sg
115
+    floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
116
+    meta:
117
+      hostname: balancer
118
+  register: balancer
119
+
120
+- name: Add balancer node to the balancers host group
121
+  add_host:
122
+    name: "{{ balancer.openstack.public_v4 }}"
123
+    groups: balancers
124
+  when: balancer.openstack.public_v4 != ""
125
+
126
+- name: Add balancer node to the balancers host group
127
+  add_host:
128
+    name: "{{ balancer.openstack.private_v4 }}"
129
+    groups: balancers
130
+  when: balancer.openstack.public_v4 == ""
131
+
132
+- name: Create a volume for database to save data
133
+  os_server_volume:
134
+    state: present
135
+    auth: "{{ auth }}"
136
+    region_name: "{{ app_env.region_name }}"
137
+    availability_zone: "{{ app_env.availability_zone }}"
138
+    validate_certs: "{{ app_env.validate_certs }}"
139
+    server: database
140
+    volume: db_volume
141
+    device: "{{ app_env.block_device_name }}"
142
+
143
+- name: Create web server nodes to host application
144
+  os_server:
145
+    state: "present"
146
+    auth: "{{ auth }}"
147
+    region_name: "{{ app_env.region_name }}"
148
+    availability_zone: "{{ app_env.availability_zone }}"
149
+    validate_certs: "{{ app_env.validate_certs }}"
150
+    name: apache-{{ item }}
151
+    image: "{{ app_env.image_name }}"
152
+    key_name: "lampstack"
153
+    timeout: 200
154
+    flavor: "{{ app_env.flavor_name }}"
155
+    network: "{{ app_env.private_net_name }}"
156
+    floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
157
+    userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
158
+    config_drive: "{{ app_env.config_drive | default('no') }}"
159
+    security_groups: lampstack_sg
160
+    meta:
161
+      hostname: apache-{{ item }}
162
+  with_sequence: count={{ app_env.stack_size - 2 }}
163
+  register: webserver
164
+
165
+- name: Add web servers to webservers host group
166
+  add_host:
167
+    name: "{{ item.openstack.public_v4 }}"
168
+    groups: webservers
169
+  when: item.openstack.public_v4 != ""
170
+  with_items: "{{ webserver.results }}"
171
+  no_log: True
172
+
173
+- name: Add web servers to webservers host group
174
+  add_host:
175
+    name: "{{ item.openstack.private_v4 }}"
176
+    groups: webservers
177
+  when: item.openstack.public_v4 == ""
178
+  with_items: "{{ webserver.results }}"
179
+  no_log: True
180
+
181
+- name: Add one web servers to wps host group
182
+  add_host:
183
+    name: "{{ webserver.results[0].openstack.public_v4 }}"
184
+    groups: wps
185
+  when: webserver.results[0].openstack.public_v4 != ""
186
+  no_log: True
187
+
188
+- name: Add one web servers to wps host group
189
+  add_host:
190
+    name: "{{ webserver.results[0].openstack.private_v4 }}"
191
+    groups: wps
192
+  when: webserver.results[0].openstack.public_v4 == ""
193
+  no_log: True

+ 4
- 0
workloads/ansible/shade/lampstack/roles/apply/templates/userdata.j2 View File

@@ -0,0 +1,4 @@
1
+#cloud-config
2
+runcmd:
3
+ - addr=$(ip -4 -o addr | grep -v '127.0.0.1' | awk 'NR==1{print $4}' | cut -d '/' -f 1)
4
+ - echo $addr `hostname` >> /etc/hosts

+ 53
- 0
workloads/ansible/shade/lampstack/roles/balancer/tasks/main.yml View File

@@ -0,0 +1,53 @@
1
+---
2
+- name: Haproxy install
3
+  package:
4
+    name="{{ item }}"
5
+    state=latest
6
+    update_cache=yes
7
+  with_items:
8
+    - haproxy
9
+  when: ansible_distribution == 'Ubuntu'
10
+
11
+- name: Haproxy install
12
+  package:
13
+    name="{{ item }}"
14
+    state=latest
15
+  with_items:
16
+    - haproxy
17
+  when: ansible_distribution == 'Fedora'
18
+
19
+- name: Enable haproxy service
20
+  replace:
21
+    dest: /etc/default/haproxy
22
+    regexp: "ENABLED=0"
23
+    replace: "ENABLED=1"
24
+    backup: no
25
+  when: ansible_distribution == 'Ubuntu'
26
+
27
+- name: Place the haproxy configuration file
28
+  copy:
29
+    src: templates/haproxy.cfg.j2
30
+    dest: /etc/haproxy/haproxy.cfg
31
+    owner: root
32
+    group: root
33
+  when: ansible_distribution == 'Ubuntu'
34
+
35
+- name: Place the haproxy configuration file
36
+  copy:
37
+    src: templates/haproxy_fedora.cfg.j2
38
+    dest: /etc/haproxy/haproxy.cfg
39
+    owner: root
40
+    group: root
41
+  when: ansible_distribution == 'Fedora'
42
+
43
+- name: Add web servers to the haproxy
44
+  lineinfile:
45
+    dest: /etc/haproxy/haproxy.cfg
46
+    line: "    server ws{{ item[0].openstack[item[1]] }} {{ item[0].openstack[item[1]] }}:80 check"
47
+  with_nested:
48
+    - "{{ hostvars.cloud.webserver.results }}"
49
+    - ["private_v4", "public_v4"]
50
+  when: item[0].openstack[item[1]] != ''
51
+  no_log: True
52
+
53
+- service: name=haproxy state=restarted enabled=yes

+ 33
- 0
workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy.cfg.j2 View File

@@ -0,0 +1,33 @@
1
+global
2
+        log /dev/log    local0
3
+        log /dev/log    local1 notice
4
+        chroot /var/lib/haproxy
5
+        user haproxy
6
+        group haproxy
7
+        daemon
8
+
9
+defaults
10
+        log     global
11
+        mode    http
12
+        option  httplog
13
+        option  dontlognull
14
+        option  redispatch
15
+        retries 3
16
+        contimeout 5000
17
+        clitimeout 50000
18
+        srvtimeout 50000
19
+        errorfile 400 /etc/haproxy/errors/400.http
20
+        errorfile 403 /etc/haproxy/errors/403.http
21
+        errorfile 408 /etc/haproxy/errors/408.http
22
+        errorfile 500 /etc/haproxy/errors/500.http
23
+        errorfile 502 /etc/haproxy/errors/502.http
24
+        errorfile 503 /etc/haproxy/errors/503.http
25
+        errorfile 504 /etc/haproxy/errors/504.http
26
+
27
+listen webfarm 0.0.0.0:80
28
+    mode http
29
+    stats enable
30
+    stats uri /haproxy?stats
31
+    balance roundrobin
32
+    option httpclose
33
+    option forwardfor

+ 34
- 0
workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy_fedora.cfg.j2 View File

@@ -0,0 +1,34 @@
1
+global
2
+        log /dev/log    local0
3
+        log /dev/log    local1 notice
4
+        chroot /var/lib/haproxy
5
+        user haproxy
6
+        group haproxy
7
+        daemon
8
+
9
+defaults
10
+        log     global
11
+        mode    http
12
+        option  httplog
13
+        option  dontlognull
14
+        option  redispatch
15
+        retries 3
16
+        contimeout 5000
17
+        clitimeout 50000
18
+        srvtimeout 50000
19
+        errorfile 400 /usr/share/haproxy/400.http
20
+        errorfile 403 /usr/share/haproxy/403.http
21
+        errorfile 408 /usr/share/haproxy/408.http
22
+        errorfile 500 /usr/share/haproxy/500.http
23
+        errorfile 502 /usr/share/haproxy/502.http
24
+        errorfile 503 /usr/share/haproxy/503.http
25
+        errorfile 504 /usr/share/haproxy/504.http
26
+
27
+listen webfarm
28
+    bind 0.0.0.0:80
29
+    mode http
30
+    stats enable
31
+    stats uri /haproxy?stats
32
+    balance roundrobin
33
+    option httpclose
34
+    option forwardfor

+ 23
- 0
workloads/ansible/shade/lampstack/roles/cleaner/tasks/apply.yml View File

@@ -0,0 +1,23 @@
1
+---
2
+- os_floating_ip:
3
+    auth: "{{ auth }}"
4
+    region_name: "{{ app_env.region_name }}"
5
+    availability_zone: "{{ app_env.availability_zone }}"
6
+    validate_certs: "{{ app_env.validate_certs }}"
7
+    state: absent
8
+    floating_ip_address: "{{ database.openstack.public_v4 }}"
9
+    server: "{{ database.openstack.name }}"
10
+  when: database.openstack.private_v4 != ""
11
+  no_log: True
12
+
13
+- os_floating_ip:
14
+    auth: "{{ auth }}"
15
+    region_name: "{{ app_env.region_name }}"
16
+    availability_zone: "{{ app_env.availability_zone }}"
17
+    validate_certs: "{{ app_env.validate_certs }}"
18
+    state: absent
19
+    floating_ip_address: "{{ item.openstack.public_v4 }}"
20
+    server: "{{ item.openstack.name }}"
21
+  with_items: "{{ webserver.results }}"
22
+  when: item.openstack.private_v4 != ""
23
+  no_log: True

+ 1
- 0
workloads/ansible/shade/lampstack/roles/cleaner/tasks/destroy.yml View File

@@ -0,0 +1 @@
1
+---

+ 19
- 0
workloads/ansible/shade/lampstack/roles/common/tasks/main.yml View File

@@ -0,0 +1,19 @@
1
+---
2
+- name: Wait until server is up and runnning
3
+  local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
4
+  become: no
5
+
6
+- name: Check if running on Fedora
7
+  raw: "[ -f /etc/fedora-release ]"
8
+  register: fedora_release
9
+  ignore_errors: yes
10
+
11
+- name: Install python2 for Ansible
12
+  raw: dnf install -y python2 python2-dnf libselinux-python
13
+  register: result
14
+  until: result|success
15
+  when: fedora_release.rc == 0
16
+
17
+- name: Set SELinux to permisive
18
+  selinux: policy=targeted state=permissive
19
+  when: fedora_release.rc == 0

+ 164
- 0
workloads/ansible/shade/lampstack/roles/database/tasks/main.yml View File

@@ -0,0 +1,164 @@
1
+---
2
+- stat: path=/tmp/diskflag
3
+  register: diskflag
4
+
5
+- name: update apt cache
6
+  apt: update_cache=yes
7
+  when: ansible_os_family == "Debian"
8
+
9
+- name: install scsitools
10
+  package: name=scsitools state=latest
11
+  when: ansible_distribution == 'Ubuntu'
12
+
13
+- name: install sg3_utils
14
+  package: name=sg3_utils state=latest
15
+  when: ansible_distribution == 'Fedora'
16
+
17
+- shell: /sbin/rescan-scsi-bus
18
+  when: diskflag.stat.exists == false and ansible_distribution == 'Ubuntu'
19
+
20
+- shell: /bin/rescan-scsi-bus.sh
21
+  when: diskflag.stat.exists == false and ansible_distribution == 'Fedora'
22
+
23
+- shell: parted -s "{{ app_env.block_device_name }}" mklabel msdos
24
+  when: diskflag.stat.exists == false
25
+
26
+- shell: parted -s "{{ app_env.block_device_name }}" mkpart primary ext4 1049kb 100%
27
+  when: diskflag.stat.exists == false
28
+
29
+- lineinfile: dest=/tmp/diskflag line="disk is now partitioned!" create=yes
30
+
31
+- filesystem: fstype=ext4 dev="{{ app_env.block_device_name }}1"
32
+- mount: name=/storage src="{{ app_env.block_device_name }}1" fstype=ext4 state=mounted
33
+
34
+- shell: ip -4 -o addr | grep -v '127.0.0.1' | awk 'NR==1{print $4}' | cut -d '/' -f 1
35
+  register: local_ip
36
+
37
+- name: Creates share directory for database
38
+  file: path=/storage/sqldatabase state=directory
39
+
40
+- name: Creates share directory for wpcontent
41
+  file: path=/storage/wpcontent state=directory
42
+
43
+- name: Creates directory for database mounting point
44
+  file: path=/var/lib/mysql state=directory
45
+
46
+- name: Install NFS server
47
+  package:
48
+    name=nfs-kernel-server
49
+    state=latest
50
+    update_cache=yes
51
+  when: ansible_distribution == 'Ubuntu'
52
+
53
+- name: Install NFS server
54
+  package: name=nfs-utils state=latest
55
+  when: ansible_distribution == 'Fedora'
56
+
57
+- name: Setup NFS database access
58
+  lineinfile:
59
+    dest: /etc/exports
60
+    line: "/storage/sqldatabase {{ local_ip.stdout }}/32(rw,sync,no_root_squash,no_subtree_check)"
61
+
62
+- name: Setup NFS webserver access
63
+  lineinfile:
64
+    dest: /etc/exports
65
+    line: "/storage/wpcontent {{ item[0].openstack[item[1]] }}/32(rw,sync,no_root_squash,no_subtree_check)"
66
+  with_nested:
67
+    - "{{ hostvars.cloud.webserver.results }}"
68
+    - ["private_v4", "public_v4"]
69
+  when: item[0].openstack[item[1]] != ''
70
+  no_log: True
71
+
72
+- name: nfs export
73
+  shell: exportfs -a
74
+
75
+- service: name=nfs-kernel-server state=restarted enabled=yes
76
+  when: ansible_distribution == 'Ubuntu'
77
+
78
+- service: name=nfs-server state=restarted enabled=yes
79
+  when: ansible_distribution == 'Fedora'
80
+
81
+- name: Mount the database data directory
82
+  mount:
83
+    name: /var/lib/mysql
84
+    src: "{{ local_ip.stdout }}:/storage/sqldatabase"
85
+    state: mounted
86
+    fstype: nfs
87
+
88
+- name: Install mysql and libraries
89
+  package:
90
+    name="{{ item }}"
91
+    state=latest
92
+    update_cache=yes
93
+  with_items:
94
+    - mysql-server
95
+    - python-mysqldb
96
+  when: ansible_distribution == 'Ubuntu'
97
+
98
+- name: Install mysql and libraries
99
+  package:
100
+    name="{{ item }}"
101
+    state=latest
102
+  with_items:
103
+    - mariadb-server
104
+    - python2-mysql
105
+  when: ansible_distribution == 'Fedora'
106
+
107
+- service: name=mysql state=stopped enabled=yes
108
+  when: ansible_distribution == 'Ubuntu'
109
+
110
+- service: name=mariadb state=stopped enabled=yes
111
+  when: ansible_distribution == 'Fedora'
112
+
113
+- stat: path=/etc/mysql/my.cnf
114
+  register: mysqlflag
115
+
116
+- name: Configure mysql 5.5
117
+  replace:
118
+    dest: "/etc/mysql/my.cnf"
119
+    regexp: '^bind-address[ \t]*=[ ]*127\.0\.0\.1'
120
+    replace: "bind-address  = {{ local_ip.stdout }}"
121
+    backup: no
122
+  when: mysqlflag.stat.exists == true
123
+
124
+- stat: path=/etc/mysql/mysql.conf.d/mysqld.cnf
125
+  register: mysqlflag
126
+
127
+- name: Configure mysql 5.6+
128
+  replace:
129
+    dest: "/etc/mysql/mysql.conf.d/mysqld.cnf"
130
+    replace: "bind-address  = {{ local_ip.stdout }}"
131
+    backup: no
132
+  when: mysqlflag.stat.exists == true
133
+
134
+- stat: path=/etc/my.cnf
135
+  register: mariadbflag
136
+
137
+- name: Configure MariaDB 10.1
138
+  ini_file:
139
+    dest=/etc/my.cnf
140
+    section=mysqld
141
+    option=bind-address
142
+    value={{ local_ip.stdout }}
143
+  when: mariadbflag.stat.exists == true
144
+
145
+- service: name=mysql state=started enabled=yes
146
+  when: ansible_distribution == 'Ubuntu'
147
+
148
+- service: name=mariadb state=started enabled=yes
149
+  when: ansible_distribution == 'Fedora'
150
+
151
+- name: create wordpress database
152
+  mysql_db:
153
+    name: "decision2016"
154
+    state: "{{ item }}"
155
+  with_items:
156
+    - ['present', 'absent', 'present']
157
+
158
+- name: Add a user
159
+  mysql_user:
160
+    name: "{{ db_user }}"
161
+    password: "{{ db_pass }}"
162
+    host: "%"
163
+    priv: 'decision2016.*:ALL'
164
+    state: present

+ 79
- 0
workloads/ansible/shade/lampstack/roles/destroy/tasks/main.yml View File

@@ -0,0 +1,79 @@
1
+---
2
+- name: Get start timestamp
3
+  set_fact: starttime="{{ ansible_date_time }}"
4
+
5
+- name: Delete key pairs
6
+  os_keypair:
7
+    state: "absent"
8
+    auth: "{{ auth }}"
9
+    region_name: "{{ app_env.region_name }}"
10
+    availability_zone: "{{ app_env.availability_zone }}"
11
+    validate_certs: "{{ app_env.validate_certs }}"
12
+    name: "lampstack"
13
+    public_key_file: "{{ app_env.public_key_file }}"
14
+
15
+- name: Delete database node
16
+  os_server:
17
+    state: "absent"
18
+    auth: "{{ auth }}"
19
+    region_name: "{{ app_env.region_name }}"
20
+    availability_zone: "{{ app_env.availability_zone }}"
21
+    validate_certs: "{{ app_env.validate_certs }}"
22
+    name: database
23
+    image: "{{ app_env.image_name }}"
24
+    key_name: "lampstack"
25
+    timeout: 200
26
+    network: "{{ app_env.private_net_name }}"
27
+    meta:
28
+      hostname: database
29
+
30
+- name: Delete balancer node
31
+  os_server:
32
+    state: "absent"
33
+    auth: "{{ auth }}"
34
+    region_name: "{{ app_env.region_name }}"
35
+    availability_zone: "{{ app_env.availability_zone }}"
36
+    validate_certs: "{{ app_env.validate_certs }}"
37
+    name: balancer
38
+    image: "{{ app_env.image_name }}"
39
+    key_name: "lampstack"
40
+    timeout: 200
41
+    network: "{{ app_env.private_net_name }}"
42
+    meta:
43
+      hostname: balancer
44
+
45
+- name: Delete web server nodes
46
+  os_server:
47
+    state: "absent"
48
+    auth: "{{ auth }}"
49
+    region_name: "{{ app_env.region_name }}"
50
+    availability_zone: "{{ app_env.availability_zone }}"
51
+    validate_certs: "{{ app_env.validate_certs }}"
52
+    name: apache-{{ item }}
53
+    image: "{{ app_env.image_name }}"
54
+    key_name: "lampstack"
55
+    timeout: 200
56
+    network: "{{ app_env.private_net_name }}"
57
+    meta:
58
+      hostname: apache-{{ item }}
59
+  with_sequence: count={{ app_env.stack_size - 2 }}
60
+
61
+- name: Delete security group
62
+  os_security_group:
63
+    state: absent
64
+    auth: "{{ auth }}"
65
+    region_name: "{{ app_env.region_name }}"
66
+    availability_zone: "{{ app_env.availability_zone }}"
67
+    validate_certs: "{{ app_env.validate_certs }}"
68
+    name: lampstack_sg
69
+    description: secuirty group for lampstack
70
+
71
+- name: Delete cinder volume
72
+  os_volume:
73
+    state: absent
74
+    auth: "{{ auth }}"
75
+    region_name: "{{ app_env.region_name }}"
76
+    availability_zone: "{{ app_env.availability_zone }}"
77
+    validate_certs: "{{ app_env.validate_certs }}"
78
+    wait: yes
79
+    display_name: db_volume

+ 147
- 0
workloads/ansible/shade/lampstack/roles/webserver/tasks/main.yml View File

@@ -0,0 +1,147 @@
1
+---
2
+- name: Apache and php 5
3
+  package:
4
+    name="{{ item }}"
5
+    state=latest
6
+    update_cache=yes
7
+  with_items:
8
+    - apache2
9
+    - php5
10
+    - php5-mysql
11
+    - nfs-common
12
+    - unzip
13
+    - ssmtp
14
+  when: ansible_distribution == 'Ubuntu'
15
+
16
+- name: Apache and php 5
17
+  package:
18
+    name="{{ item }}"
19
+    state=latest
20
+  with_items:
21
+    - httpd
22
+    - php
23
+    - php-mysqlnd
24
+    - nfs-utils
25
+    - unzip
26
+    - ssmtp
27
+  when: ansible_distribution == 'Fedora'
28
+
29
+- shell: rm -rf /var/www/html/index.html
30
+  args:
31
+    warn: no
32
+
33
+- name: Creates share directory for wpcontent
34
+  file:
35
+    path: /var/www/html/wp-content/uploads
36
+    state: directory
37
+    owner: www-data
38
+    group: www-data
39
+  when: ansible_distribution == 'Ubuntu'
40
+
41
+- name: Creates share directory for wpcontent
42
+  file:
43
+    path: /var/www/html/wp-content/uploads
44
+    state: directory
45
+    owner: apache
46
+    group: apache
47
+  when: ansible_distribution == 'Fedora'
48
+
49
+- name: Mount the directory using private IP
50
+  mount:
51
+    name: /var/www/html/wp-content/uploads
52
+    src: "{{ hostvars.cloud.database.openstack.private_v4 }}:/storage/wpcontent"
53
+    state: mounted
54
+    fstype: nfs
55
+  when: hostvars.cloud.database.openstack.private_v4 != ""
56
+
57
+- name: Mount the directory using public IP
58
+  mount:
59
+    name: /var/www/html/wp-content/uploads
60
+    src: "{{ hostvars.cloud.database.openstack.public_v4 }}:/storage/wpcontent"
61
+    state: mounted
62
+    fstype: nfs
63
+  when: hostvars.cloud.database.openstack.private_v4 == ""
64
+
65
+- lineinfile: dest=/etc/apache2/apache2.conf line="ServerName localhost"
66
+  when: ansible_distribution == 'Ubuntu'
67
+
68
+- lineinfile: dest=/etc/httpd/conf/httpd.conf line="ServerName localhost"
69
+  when: ansible_distribution == 'Fedora'
70
+
71
+- name: Download wordpress
72
+  get_url:
73
+    url: "{{ app_env.wp_latest | default('https://wordpress.org/latest.tar.gz') }}"
74
+    dest: /var/www/latest.tar.gz
75
+
76
+- name: Unpack latest wordpress
77
+  shell: tar -xf /var/www/latest.tar.gz -C /var/www/html --strip-components=1
78
+  args:
79
+    warn: no
80
+
81
+- name: Create wordpress configuration
82
+  shell: cp /var/www/html/wp-config-sample.php /var/www/html/wp-config.php
83
+  args:
84
+    warn: no
85
+
86
+- name: Configure wordpress database, username and password
87
+  replace:
88
+    dest: /var/www/html/wp-config.php
89
+    regexp: "'{{ item.then }}'"
90
+    replace: "'{{ item.now }}'"
91
+    backup: no
92
+  with_items:
93
+    - { then: 'database_name_here', now: 'decision2016' }
94
+    - { then: 'username_here', now: "{{ db_user }}" }
95
+    - { then: 'password_here', now: "{{ db_pass }}" }
96
+
97
+- name: Configure wordpress network access using private IP
98
+  replace:
99
+    dest: /var/www/html/wp-config.php
100
+    regexp: "'localhost'"
101
+    replace: "'{{ hostvars.cloud.database.openstack.private_v4 }}'"
102
+    backup: no
103
+  when: hostvars.cloud.database.openstack.private_v4 != ""
104
+
105
+- name: Configure wordpress network access using public IP
106
+  replace:
107
+    dest: /var/www/html/wp-config.php
108
+    regexp: "'localhost'"
109
+    replace: "'{{ hostvars.cloud.database.openstack.public_v4 }}'"
110
+    backup: no
111
+  when: hostvars.cloud.database.openstack.private_v4 == ""
112
+
113
+- name: Change ownership of wordpress
114
+  shell: chown -R www-data:www-data /var/www/html
115
+  args:
116
+    warn: no
117
+  when: ansible_distribution == 'Ubuntu'
118
+
119
+- name: Change ownership of wordpress
120
+  shell: chown -R apache:apache /var/www/html
121
+  args:
122
+    warn: no
123
+  when: ansible_distribution == 'Fedora'
124
+
125
+- service: name=apache2 state=restarted enabled=yes
126
+  when: ansible_distribution == 'Ubuntu'
127
+
128
+- service: name=httpd state=restarted enabled=yes
129
+  when: ansible_distribution == 'Fedora'
130
+
131
+- name: Install wordpress command line tool
132
+  get_url:
133
+    url: "{{ app_env.wp_cli | default('https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar') }}"
134
+    dest: /usr/local/bin/wp
135
+    mode: "a+x"
136
+    force: no
137
+
138
+- name: Download a wordpress theme
139
+  get_url:
140
+    url: "{{ app_env.wp_theme }}"
141
+    dest: /tmp/wptheme.zip
142
+    force: yes
143
+
144
+- name: Install the theme
145
+  shell: unzip -o -q /tmp/wptheme.zip -d /var/www/html/wp-content/themes
146
+  args:
147
+    warn: no

+ 73
- 0
workloads/ansible/shade/lampstack/roles/wordpress/tasks/main.yml View File

@@ -0,0 +1,73 @@
1
+---
2
+- name: Install wordpress
3
+  command: >
4
+    wp core install --path=/var/www/html
5
+    --url="http://{{ hostvars.cloud.balancer.openstack.public_v4 }}"
6
+    --title='OpenStack Interop Challenge'
7
+    --admin_user=wpuser
8
+    --admin_password="{{ db_pass }}"
9
+    --admin_email='interop@openstack.org'
10
+  when: hostvars.cloud.balancer.openstack.public_v4 != ""
11
+
12
+- name: Install wordpress
13
+  command: >
14
+    wp core install --path=/var/www/html
15
+    --url="http://{{ hostvars.cloud.balancer.openstack.private_v4 }}"
16
+    --title='OpenStack Interop Challenge'
17
+    --admin_user=wpuser
18
+    --admin_password="{{ db_pass }}"
19
+    --admin_email='interop@openstack.org'
20
+  when: hostvars.cloud.balancer.openstack.public_v4 == ""
21
+
22
+- name: Activate wordpress theme
23
+  command: >
24
+    wp --path=/var/www/html theme activate
25
+    "{{ app_env.wp_theme.split('/').pop().split('.')[0] }}"
26
+
27
+- name: Download wordpress importer plugin
28
+  get_url:
29
+    url: "{{ app_env.wp_importer | default('http://downloads.wordpress.org/plugin/wordpress-importer.0.6.3.zip') }}"
30
+    dest: "/tmp/wordpress-importer.zip"
31
+    force: "yes"
32
+
33
+- name: Install wordpress importer plugin
34
+  command: >
35
+    sudo -u www-data wp --path=/var/www/html plugin install /tmp/wordpress-importer.zip --activate
36
+  args:
37
+    warn: "no"
38
+  when: ansible_distribution == 'Ubuntu'
39
+
40
+- name: Install wordpress importer plugin
41
+  command: >
42
+    sudo -u apache /usr/local/bin/wp --path=/var/www/html plugin install /tmp/wordpress-importer.zip
43
+  args:
44
+    warn: "no"
45
+  when: ansible_distribution == 'Fedora'
46
+
47
+- name: Enable wordpress importer plugin
48
+  command: >
49
+    sudo -u apache /usr/local/bin/wp --path=/var/www/html plugin activate wordpress-importer
50
+  args:
51
+    warn: "no"
52
+  when: ansible_distribution == 'Fedora'
53
+
54
+- name: Download wordpress sample posts
55
+  get_url:
56
+    url: "{{ app_env.wp_posts }}"
57
+    dest: "/tmp/wpposts.zip"
58
+    force: "yes"
59
+
60
+- name: Unpack the posts
61
+  command: unzip -o -q /tmp/wpposts.zip -d /tmp/posts
62
+  args:
63
+    warn: "no"
64
+
65
+- name: Import wordpress posts
66
+  command: >
67
+    sudo -u www-data wp --path=/var/www/html import /tmp/posts/*.xml --authors=create --quiet
68
+  when: ansible_distribution == 'Ubuntu'
69
+
70
+- name: Import wordpress posts
71
+  shell: >
72
+    sudo -u apache /usr/local/bin/wp --path=/var/www/html import /tmp/posts/*.xml --authors=create --quiet
73
+  when: ansible_distribution == 'Fedora'

+ 96
- 0
workloads/ansible/shade/lampstack/site.yml View File

@@ -0,0 +1,96 @@
1
+---
2
+- name: provision servers
3
+  hosts: cloud
4
+  connection: local
5
+  vars_files:
6
+    - "vars/{{ env }}.yml"
7
+  roles:
8
+    - "{{ action }}"
9
+
10
+- name: Install python2 for ansible to work
11
+  hosts: dbservers, webservers, balancers, wps
12
+  gather_facts: false
13
+  user: "{{ app_env.ssh_user }}"
14
+  become: true
15
+  become_user: root
16
+  vars_files:
17
+    - "vars/{{ env }}.yml"
18
+  roles:
19
+    - common
20
+  environment: "{{ proxy_env }}"
21
+
22
+- name: setup database
23
+  hosts: dbservers
24
+  user: "{{ app_env.ssh_user }}"
25
+  become: true
26
+  become_user: root
27
+  vars_files:
28
+    - "vars/{{ env }}.yml"
29
+  roles:
30
+    - database
31
+  environment: "{{proxy_env}}"
32
+
33
+- name: setup web servers
34
+  hosts: webservers
35
+  user: "{{ app_env.ssh_user }}"
36
+  become: true
37
+  become_user: root
38
+  vars_files:
39
+    - "vars/{{ env }}.yml"
40
+  roles:
41
+    - webserver
42
+  environment: "{{proxy_env}}"
43
+
44
+- name: setup load balancer servers
45
+  hosts: balancers
46
+  user: "{{ app_env.ssh_user }}"
47
+  become: true
48
+  become_user: root
49
+  vars_files:
50
+    - "vars/{{ env }}.yml"
51
+  roles:
52
+    - balancer
53
+  environment: "{{proxy_env}}"
54
+
55
+- name: install wordpress
56
+  hosts: wps
57
+  user: "{{ app_env.ssh_user }}"
58
+  vars_files:
59
+    - "vars/{{ env }}.yml"
60
+  roles:
61
+    - wordpress
62
+  environment: "{{proxy_env}}"
63
+
64
+- name: clean up resources
65
+  hosts: cloud
66
+  connection: local
67
+  vars_files:
68
+    - "vars/{{ env }}.yml"
69
+  tasks:
70
+    - include: "roles/cleaner/tasks/{{action}}.yml"
71
+  roles:
72
+    - cleaner
73
+  environment: "{{proxy_env}}"
74
+
75
+- name: Inform the installer
76
+  hosts: cloud
77
+  connection: local
78
+  tasks:
79
+    - debug:
80
+        msg: >-
81
+          Access wordpress at
82
+          http://{{ hostvars.cloud.balancer.openstack.public_v4 }}.
83
+          wordpress userid is wpuser, password is {{ db_pass }}
84
+      when: hostvars.cloud.balancer is defined and
85
+            hostvars.cloud.balancer.openstack.public_v4 != ""
86
+    - debug:
87
+        msg: >-
88
+          Access wordpress at
89
+          http://{{ hostvars.cloud.balancer.openstack.private_v4 }}.
90
+          wordpress userid is wpuser, password is {{ db_pass }}
91
+      when: hostvars.cloud.balancer is defined and
92
+            hostvars.cloud.balancer.openstack.public_v4 == ""
93
+    - debug:
94
+        msg: >-
95
+          The work load test started at {{ hostvars.cloud.starttime.time }},
96
+          ended at {{ ansible_date_time.time }}

+ 9
- 0
workloads/terraform/shade/dockerswarm/.gitignore View File

@@ -0,0 +1,9 @@
1
+*.tfvars
2
+*.tfstate
3
+*.backup
4
+
5
+files/ssl/*.pem
6
+files/ssl/*.csr
7
+files/ssl/*.srl
8
+
9
+templates/discovery_url

+ 217
- 0
workloads/terraform/shade/dockerswarm/README.md View File

@@ -0,0 +1,217 @@
1
+# Docker Swarm on Openstack with Terraform
2
+
3
+Provision a Docker Swarm cluster with [Terraform](https://www.terraform.io) on Openstack.
4
+
5
+## Status
6
+
7
+This will install a fully HA docker swarm cluster on an Openstack Cloud. It is tested on a OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and should work on most modern installs of OpenStack that support the basic services.
8
+
9
+It also supports overlay networks using the `docker network` command, see documentation below.
10
+
11
+
12
+## Requirements
13
+
14
+- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
15
+- Upload a CoreOS image to glance and remember the image name.
16
+
17
+## Terraform
18
+
19
+Terraform will be used to provision all of the OpenStack resources required to run Docker Swarm.   It is also used to deploy and provision the software requirements.
20
+
21
+### Prep
22
+
23
+#### Openstack Authentication
24
+
25
+Ensure your local ssh-agent is running and your ssh key has been added. This step is required by the terraform provisioner.
26
+
27
+```
28
+$ eval $(ssh-agent -s)
29
+$ ssh-add ~/.ssh/id_rsa
30
+```
31
+
32
+Ensure that you have your Openstack credentials loaded into Terraform environment variables. Likely via a command similar to:
33
+
34
+```
35
+$ source ~/.stackrc
36
+$ export TF_VAR_username=${OS_USERNAME} 
37
+$ export TF_VAR_password=${OS_PASSWORD}
38
+$ export TF_VAR_tenant=${OS_TENANT_NAME}
39
+$ export TF_VAR_auth_url=${OS_AUTH_URL}
40
+
41
+```
42
+
43
+#### General Openstack Settings
44
+
45
+By default security_groups will allow certain traffic from `0.0.0.0/0`.  If you want to restrict it to a specific network you can set the terraform variable `whitelist_network`.  I like to set it to only allow my current IP:
46
+
47
+```
48
+$ export TF_VAR_whitelist_network=$(curl -s icanhazip.com)/32
49
+```
50
+
51
+You also want to specify the name of your CoreOS `glance` image as well as flavor,networks, and keys.  Since these do not change often I like to add them to `terraform.tfvars`:
52
+
53
+```
54
+image_name = "coreos-alpha-884-0-0"
55
+network_name = "internal"
56
+net_device = "eth0"
57
+floatingip_pool = "external"
58
+flavor = "m1.medium"
59
+public_key_path = "~/.ssh/id_rsa.pub"
60
+```
61
+
62
+_Remove the `*.tfvars` line from `.gitignore` if you wish to save this file into source control_
63
+
64
+see `vars-openstack.tf` for the full list of variables you can set.
65
+
66
+#### Docker Swarm Settings
67
+
68
+You can alter the number of instances to be built and added to the cluster by modifying the `cluster_size` variable (default is 3).
69
+
70
+If you have a FQDN you plan at pointing at one of more of the swarm-manager hosts you can set it via the `fqdn` variable.
71
+
72
+Terraform will attempt to run `openssl` commands to create a CA and server/client certificates used to secure the docker/swarm endpoints.  If you do not have `openssl` on your local machine or want to re-use existing CA / Client certificates you can set the TF variable `generate_ssl` to `0`.  The certificates are created in `files/ssl`.
73
+
74
+see `vars-swarm.tf` for the full list of variables you can set.
75
+
76
+#### CoreOS Settings
77
+
78
+Terraform will attempt to generate an etcd discovery token by running `curl` against the etcd discovery service.  If do not have `curl` or do not wish to generate a new discovery url you can set `generate_discovery_url` to `0` and create a file `templates/discovery_url` which contains the discovery url you wish to use.
79
+
80
+## Provision the Docker Swarm
81
+
82
+With all your TF vars set you should be able to run `terraform apply` but lets check with `terraform plan` that things look correct first:
83
+
84
+
85
+```
86
+$ terraform plan
87
+Refreshing Terraform state prior to plan...
88
+...
89
+...
90
++ template_file.discovery_url
91
+    rendered: "" => "<computed>"
92
+    template: "" => "templates/discovery_url"
93
+
94
+Plan: 14 to add, 0 to change, 0 to destroy.
95
+```
96
+
97
+With no errors showing here we can go ahead and run
98
+
99
+```
100
+$ terraform apply
101
+...
102
+...
103
+Apply complete! Resources: 14 added, 0 changed, 0 destroyed.
104
+
105
+The state of your infrastructure has been saved to the path
106
+below. This state is required to modify and destroy your
107
+infrastructure, so keep it safe. To inspect the complete state
108
+use the `terraform show` command.
109
+
110
+State path: terraform.tfstate
111
+
112
+Outputs:
113
+
114
+  swarm_cluster = 
115
+Environment Variables for accessing Docker Swarm via floating IP of first host:
116
+export DOCKER_HOST=tcp://x.x.x.x:2375
117
+export DOCKER_TLS_VERIFY=1
118
+export DOCKER_CERT_PATH=/home/bacon/development/personal/terraform-dockerswarm-coreos/files/ssl
119
+```
120
+
121
+_the final output uses the floating IP of the first Host. You could point at any of the hosts, or use a FQDN with round robin DNS pointing at all the hosts.  I avoided using neutron's load balancing service as it is not yet standard on OpenStack installs._
122
+
123
+## Next Steps
124
+
125
+### Check its up
126
+
127
+copy and paste the above output into your shell and attempt to run `docker info`:
128
+
129
+```
130
+$ export DOCKER_HOST=tcp://x.x.x.x:2375
131
+$ export DOCKER_TLS_VERIFY=1
132
+$ export DOCKER_CERT_PATH=/home/bacon/development/personal/terraform-dockerswarm-coreos/files/ssl
133
+
134
+$ docker info
135
+Containers: 6
136
+Images: 6
137
+Engine Version: 
138
+Role: primary
139
+Strategy: spread
140
+Filters: health, port, dependency, affinity, constraint
141
+Nodes: 3
142
+ swarm-testing-0.novalocal: 10.230.7.171:2376
143
+  └ Status: Healthy
144
+  └ Containers: 2
145
+  └ Reserved CPUs: 0 / 2
146
+  └ Reserved Memory: 0 B / 4.057 GiB
147
+  └ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
148
+ swarm-testing-1.novalocal: 10.230.7.172:2376
149
+  └ Status: Healthy
150
+  └ Containers: 2
151
+  └ Reserved CPUs: 0 / 2
152
+  └ Reserved Memory: 0 B / 4.057 GiB
153
+  └ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
154
+ swarm-testing-2.novalocal: 10.230.7.173:2376
155
+  └ Status: Healthy
156
+  └ Containers: 2
157
+  └ Reserved CPUs: 0 / 2
158
+  └ Reserved Memory: 0 B / 4.057 GiB
159
+  └ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
160
+CPUs: 6
161
+Total Memory: 12.17 GiB
162
+Name: swarm-testing-0.novalocal
163
+```
164
+
165
+### Create an overlay network and run a container
166
+
167
+Create a network overlay called `my-net`
168
+
169
+```
170
+$ docker network create --driver overlay my-net
171
+ecfefdff938f506b09c5ea5b505ee8ace0ee7297d9d617d06b9bbaac5bf10fea
172
+$ docker network ls
173
+NETWORK ID          NAME                               DRIVER
174
+38338f0ec63a        swarm-testing-1.novalocal/host     host                
175
+c41436d91f29        swarm-testing-0.novalocal/none     null                
176
+e29c4451483f        swarm-testing-0.novalocal/bridge   bridge              
177
+400130ea105b        swarm-testing-2.novalocal/none     null                
178
+c8f15676b2a5        swarm-testing-2.novalocal/host     host                
179
+493127ad6577        swarm-testing-2.novalocal/bridge   bridge              
180
+74f862f34921        swarm-testing-1.novalocal/none     null                
181
+ecfefdff938f        my-net                             overlay             
182
+b09a38662087        swarm-testing-0.novalocal/host     host                
183
+cfbcfbd7de02        swarm-testing-1.novalocal/bridge   bridge              
184
+```
185
+
186
+Run a container on the network on a specific host, then try to access it from another:
187
+
188
+```
189
+$ docker run -itd --name=web --net=my-net --env="constraint:node==swarm-testing-1.novalocal" nginx
190
+53166b97adf2397403f00a2ffcdba635a7f08852c5fe4f452d6ca8c6f40bb80c
191
+$ docker run -it --rm --net=my-net --env="constraint:node==swarm-testing-2.novalocal" busybox wget -O- http://web
192
+Connecting to web (10.0.0.2:80)
193
+<!DOCTYPE html>
194
+<html>
195
+...
196
+...
197
+<p><em>Thank you for using nginx.</em></p>
198
+</body>
199
+</html>
200
+
201
+```
202
+
203
+## Cleanup
204
+
205
+Once you're done with it, don't forget to nuke the whole thing.
206
+
207
+```
208
+$ terraform destroy \
209
+Do you really want to destroy?
210
+  Terraform will delete all your managed infrastructure.
211
+  There is no undo. Only 'yes' will be accepted to confirm.
212
+
213
+  Enter a value: yes
214
+...
215
+...
216
+Apply complete! Resources: 0 added, 0 changed, 14 destroyed.
217
+```

+ 60
- 0
workloads/terraform/shade/dockerswarm/_securitygroups.tf View File

@@ -0,0 +1,60 @@
1
+resource "openstack_compute_secgroup_v2" "swarm_base" {
2
+  name = "${var.cluster_name}_swarm_base"
3
+  description = "${var.cluster_name} - Docker Swarm Security Group"
4
+  # SSH
5
+  rule {
6
+    ip_protocol = "tcp"
7
+    from_port = "22"
8
+    to_port = "22"
9
+    cidr = "${var.whitelist_network}"
10
+  }
11
+  # DOCKER SWARM
12
+  rule {
13
+    ip_protocol = "tcp"
14
+    from_port = "2375"
15
+    to_port = "2375"
16
+    cidr = "${var.whitelist_network}"
17
+  }
18
+  # DOCKER
19
+  rule {
20
+    ip_protocol = "tcp"
21
+    from_port = "2376"
22
+    to_port = "2376"
23
+    cidr = "${var.whitelist_network}"
24
+  }
25
+  # INTERNAL Communication only
26
+  rule {
27
+    ip_protocol = "icmp"
28
+    from_port = "-1"
29
+    to_port = "-1"
30
+    self = true
31
+  }
32
+  rule {
33
+    ip_protocol = "tcp"
34
+    from_port = "1"
35
+    to_port = "65535"
36
+    self = true
37
+  }
38
+  rule {
39
+    ip_protocol = "udp"
40
+    from_port = "1"
41
+    to_port = "65535"
42
+    self = true
43
+  }
44
+
45
+  # DANGER DANGER DANGER
46
+  # Uncomment these if you want to allow
47
+  # unrestricted inbound access
48
+  #rule {
49
+  #  ip_protocol = "tcp"
50
+  #  from_port = "1"
51
+  #  to_port = "65535"
52
+  #  cidr = "${var.whitelist_network}"
53
+  #}
54
+  #rule {
55
+  #  ip_protocol = "udp"
56
+  #  from_port = "1"
57
+  #  to_port = "65535"
58
+  #  cidr = "${var.whitelist_network}"
59
+  #}
60
+}

+ 12
- 0
workloads/terraform/shade/dockerswarm/files/ssl/generate-ssl.sh View File

@@ -0,0 +1,12 @@
1
+#!/bin/bash
2
+
3
+openssl genrsa -out files/ssl/ca-key.pem 2048
4
+
5
+openssl req -x509 -new -nodes -key files/ssl/ca-key.pem -days 10000 -out files/ssl/ca.pem -subj '/CN=docker-CA'
6
+
7
+openssl genrsa -out files/ssl/key.pem 2048
8
+
9
+openssl req -new -key files/ssl/key.pem -out files/ssl/cert.csr -subj '/CN=docker-client' -config files/ssl/openssl.cnf
10
+
11
+openssl x509 -req -in files/ssl/cert.csr -CA files/ssl/ca.pem -CAkey files/ssl/ca-key.pem \
12
+  -CAcreateserial -out files/ssl/cert.pem -days 365 -extensions v3_req -extfile files/ssl/openssl.cnf

+ 8
- 0
workloads/terraform/shade/dockerswarm/files/ssl/openssl.cnf View File

@@ -0,0 +1,8 @@
1
+[req]
2
+req_extensions = v3_req
3
+distinguished_name = req_distinguished_name
4
+[req_distinguished_name]
5
+[ v3_req ]
6
+basicConstraints = CA:FALSE
7
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
8
+extendedKeyUsage = clientAuth, serverAuth

+ 109
- 0
workloads/terraform/shade/dockerswarm/swarm.tf View File

@@ -0,0 +1,109 @@
1
+resource "null_resource" "discovery_url_template" {
2
+    count = "${var.generate_discovery_url}"
3
+    provisioner "local-exec" {
4
+        command = "curl -s 'https://discovery.etcd.io/new?size=${var.cluster_size}' > templates/discovery_url"
5
+    }
6
+}
7
+
8
+resource "null_resource" "generate_ssl" {
9
+    count = "${var.generate_ssl}"
10
+    provisioner "local-exec" {
11
+        command = "bash files/ssl/generate-ssl.sh"
12
+    }
13
+}
14
+
15
+resource "template_file" "discovery_url" {
16
+    template = "templates/discovery_url"
17
+    depends_on = [
18
+        "null_resource.discovery_url_template"
19
+    ]
20
+}
21
+
22
+resource "template_file" "cloud_init" {
23
+    template = "templates/cloud-init"
24
+    vars {
25
+        cluster_token = "${var.cluster_name}"
26
+        discovery_url = "${template_file.discovery_url.rendered}"
27
+        swarm_version = "${var.swarm_version}"
28
+    }
29
+}
30
+
31
+resource "template_file" "10_docker_service" {
32
+    template = "templates/10-docker-service.conf"
33
+    vars {
34
+        net_device = "${ var.net_device }"
35
+    }
36
+}
37
+
38
+resource "openstack_networking_floatingip_v2" "coreos" {
39
+    count = "${var.cluster_size}"
40
+    pool = "${var.floatingip_pool}"
41
+}
42
+
43
+resource "openstack_compute_keypair_v2" "coreos" {
44
+    name = "swarm-${var.cluster_name}"
45
+    public_key = "${file(var.public_key_path)}"
46
+}
47
+
48
+resource "openstack_compute_instance_v2" "coreos" {
49
+    name = "swarm-${var.cluster_name}-${count.index}"
50
+    count = "${var.cluster_size}"
51
+    image_name = "${var.image_name}"
52
+    flavor_name = "${var.flavor}"
53
+    key_pair = "${openstack_compute_keypair_v2.coreos.name}"
54
+    network {
55
+        name = "${var.network_name}"
56
+    }
57
+    security_groups = [
58
+        "${openstack_compute_secgroup_v2.swarm_base.name}"
59
+    ]
60
+    floating_ip = "${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}"
61
+    user_data = "${template_file.cloud_init.rendered}"
62
+    provisioner "file" {
63
+        source = "files"
64
+        destination = "/tmp/files"
65
+        connection {
66
+            user = "core"
67
+        }
68
+    }
69
+    provisioner "remote-exec" {
70
+        inline = [
71
+            # Create TLS certs
72
+            "mkdir -p /home/core/.docker",
73
+            "cp /tmp/files/ssl/ca.pem /home/core/.docker/",
74
+            "cp /tmp/files/ssl/cert.pem /home/core/.docker/",
75
+            "cp /tmp/files/ssl/key.pem /home/core/.docker/",
76
+            "echo 'subjectAltName = @alt_names' >> /tmp/files/ssl/openssl.cnf",
77
+            "echo '[alt_names]' >> /tmp/files/ssl/openssl.cnf",
78
+            "echo 'IP.1 = ${self.network.0.fixed_ip_v4}' >> /tmp/files/ssl/openssl.cnf",
79
+            "echo 'IP.2 = ${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}' >> /tmp/files/ssl/openssl.cnf",
80
+            "echo 'DNS.1 = ${var.fqdn}' >> /tmp/files/ssl/openssl.cnf",
81
+            "echo 'DNS.2 = ${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}.xip.io' >> /tmp/files/ssl/openssl.cnf",
82
+            "openssl req -new -key /tmp/files/ssl/key.pem -out /tmp/files/ssl/cert.csr -subj '/CN=docker-client' -config /tmp/files/ssl/openssl.cnf",
83
+            "openssl x509 -req -in /tmp/files/ssl/cert.csr -CA /tmp/files/ssl/ca.pem -CAkey /tmp/files/ssl/ca-key.pem \\",
84
+            "-CAcreateserial -out /tmp/files/ssl/cert.pem -days 365 -extensions v3_req -extfile /tmp/files/ssl/openssl.cnf",
85
+            "sudo mkdir -p /etc/docker/ssl",
86
+            "sudo cp /tmp/files/ssl/ca.pem /etc/docker/ssl/",
87
+            "sudo cp /tmp/files/ssl/cert.pem /etc/docker/ssl/",
88
+            "sudo cp /tmp/files/ssl/key.pem /etc/docker/ssl/",
89
+            # Apply localized settings to services
90
+            "sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d",
91
+            "cat <<'EOF' > /tmp/10-docker-service.conf\n${template_file.10_docker_service.rendered}\nEOF",
92
+            "sudo mv /tmp/10-docker-service.conf /etc/systemd/system/docker.service.d/",
93
+            "sudo systemctl daemon-reload",
94
+            "sudo systemctl restart docker.service",
95
+            "sudo systemctl start swarm-agent.service",
96
+            "sudo systemctl start swarm-manager.service",
97
+        ]
98
+        connection {
99
+            user = "core"
100
+        }
101
+    }
102
+    depends_on = [
103
+        "template_file.cloud_init"
104
+    ]
105
+}
106
+
107
+output "swarm_cluster" {
108
+    value = "\nEnvironment Variables for accessing Docker Swarm via floating IP of first host:\nexport DOCKER_HOST=tcp://${openstack_networking_floatingip_v2.coreos.0.address}:2375\nexport DOCKER_TLS_VERIFY=1\nexport DOCKER_CERT_PATH=${path.module}/files/ssl"
109
+}

+ 2
- 0
workloads/terraform/shade/dockerswarm/templates/10-docker-service.conf View File

@@ -0,0 +1,2 @@
1
+[Service]
2
+Environment="DOCKER_OPTS=-H=0.0.0.0:2376 -H unix:///var/run/docker.sock --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem --cluster-advertise ${net_device}:2376 --cluster-store etcd://127.0.0.1:2379/docker"

+ 48
- 0
workloads/terraform/shade/dockerswarm/templates/cloud-init View File

@@ -0,0 +1,48 @@
1
+#cloud-config
2
+
3
+coreos:
4
+  units:
5
+    - name: etcd.service
6
+      mask: true
7
+    - name: etcd2.service
8
+      command: start
9
+    - name: docker.service
10
+      command: start
11
+    - name: swarm-agent.service
12
+      content: |
13
+        [Unit]
14
+        Description=swarm agent
15
+        Requires=docker.service
16
+        After=docker.service
17
+
18
+        [Service]
19
+        EnvironmentFile=/etc/environment
20
+        TimeoutStartSec=20m
21
+        ExecStartPre=/usr/bin/docker pull swarm:${swarm_version}
22
+        ExecStartPre=-/usr/bin/docker rm -f swarm-agent
23
+        ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-agent swarm:${swarm_version} join --addr=$COREOS_PRIVATE_IPV4:2376 etcd://$COREOS_PRIVATE_IPV4:2379/docker"
24
+        ExecStop=/usr/bin/docker stop swarm-agent
25
+    - name: swarm-manager.service
26
+      content: |
27
+        [Unit]
28
+        Description=swarm manager
29
+        Requires=docker.service
30
+        After=docker.service
31
+
32
+        [Service]
33
+        EnvironmentFile=/etc/environment
34
+        TimeoutStartSec=20m
35
+        ExecStartPre=/usr/bin/docker pull swarm:${swarm_version}
36
+        ExecStartPre=-/usr/bin/docker rm -f swarm-manager
37
+        ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-manager -v /etc/docker/ssl:/etc/docker/ssl --net=host swarm:${swarm_version} manage --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem etcd://$COREOS_PRIVATE_IPV4:2379/docker"
38
+        ExecStop=/usr/bin/docker stop swarm-manager
39
+  etcd2:
40
+    discovery: ${discovery_url}
41
+    advertise-client-urls: http://$private_ipv4:2379
42
+    initial-advertise-peer-urls: http://$private_ipv4:2380
43
+    listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
44
+    listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
45
+    data-dir: /var/lib/etcd2
46
+    initial-cluster-token: ${cluster_token}
47
+  update:
48
+    reboot-strategy: "off"

+ 4
- 0
workloads/terraform/shade/dockerswarm/vars-coreos.tf View File

@@ -0,0 +1,4 @@
1
+variable "generate_discovery_url" {
2
+    default = 1
3
+    description = "set to 0 if you do not want to autogenerate the discovery url"
4
+}

+ 46
- 0
workloads/terraform/shade/dockerswarm/vars-openstack.tf View File

@@ -0,0 +1,46 @@
1
+variable "image_name" {
2
+    default = "coreos"
3
+}
4
+
5
+variable "network_name" {
6
+    default = "internal"
7
+}
8
+
9
+variable "floatingip_pool" {
10
+    default = "external"
11
+}
12
+
13
+variable "net_device" {
14
+  description = "Network interface device in the system"
15
+  default = "eth0"
16
+}
17
+
18
+variable "flavor" {
19
+    default = "m1.medium"
20
+}
21
+
22
+variable "username" {
23
+  description = "Your openstack username"
24
+}
25
+
26
+variable "password" {
27
+  description = "Your openstack password"
28
+}
29
+
30
+variable "tenant" {
31
+  description = "Your openstack tenant/project"
32
+}
33
+
34
+variable "auth_url" {
35
+  description = "Your openstack auth URL"
36
+}
37
+
38
+variable "public_key_path" {
39
+  description = "The path of the ssh pub key"
40
+  default = "~/.ssh/id_rsa.pub"
41
+}
42
+
43
+variable "whitelist_network" {
44
+  description = "network to allow connectivity from"
45
+  default = "0.0.0.0/0"
46
+}

+ 21
- 0
workloads/terraform/shade/dockerswarm/vars-swarm.tf View File

@@ -0,0 +1,21 @@
1
+variable "cluster_size" {
2
+    default = 3
3
+}
4
+
5
+variable "cluster_name" {
6
+    default = "testing"
7
+}
8
+
9
+variable "swarm_version" {
10
+    default = "latest"
11
+}
12
+
13
+variable "generate_ssl" {
14
+  description = "set to 0 if you want to reuse ssl certs"
15
+  default = 1
16
+}
17
+
18
+variable "fqdn" {
19
+  description = "Fully Qualified DNS to add to TLS certs"
20
+  default = "swarm.example.com"
21
+}

+ 162
- 0
workloads/terraform/shade/lampstack/README.md View File

@@ -0,0 +1,162 @@
1
+# LAMPstack Terraform deployments
2
+
3
+## Status
4
+
5
+This will install a 3 node lampstack by defulat. Two nodes will be used as
6
+web servers and one node will be used as database node.
7
+
8
+Once the script finishes, a set of URL will be displayed at the end for
9
+verification.
10
+
11
+## Requirements
12
+
13
+- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
14
+- Make sure there is an Ubuntu image available on your cloud.
15
+
16
+## Terraform
17
+
18
+Terraform will be used to provision all of the OpenStack resources required to
19
+LAMP stack and all required software.
20
+
21
+### Prep
22
+
23
+#### Deal with ssh keys for Openstack Authentication
24
+
25
+Ensure your local ssh-agent is running and your ssh key has been added.
26
+This step is required by the terraform provisioner. Otherwise, you will have
27
+to use a key pair without passphrase.
28
+
29
+```
30
+eval $(ssh-agent -s)
31
+ssh-add ~/.ssh/id_rsa
32
+
33
+```
34
+
35
+#### General Openstack Settings
36
+
37
+Terraform OpenStack provider requires few environment variables to be set
38
+before you can run the scripts. In general, you can simply export OS
39
+environment varialbes like the following:
40
+
41
+```
42
+export OS_REGION_NAME=RegionOne
43
+export OS_PROJECT_NAME=demo
44
+export OS_IDENTITY_API_VERSION=3
45
+export OS_PASSWORD=secret
46
+export OS_DOMAIN_NAME=default
47
+export OS_USERNAME=demo
48
+export OS_TENANT_NAME=demo
49
+export OS_PROJECT_DOMAIN_NAME=default
50
+export OS_AUTH_URL=http://9.30.217.9:5000/v3
51
+
52
+```
53
+
54
+The values of these variables should be provided by your cloud provider. When
55
+use keystone 2.0 API, you will not need to setup domain name.
56
+
57
+#### LAMP Stack Settings
58
+
59
+You most likely will need to specify the name of your Ubuntu `glance` image,
60
+flavor, lamp stack size (how many nodes in the stack), private and public
61
+network names, and keys. Here is the list of the default values defined in file
62
+vars_lampstack.tf.
63
+
64
+```
65
+image_name = "ubuntu-14.04"
66
+private_net = "internal"
67
+public_net = "external"
68
+flavor = "m1.medium"
69
+public_key_path = "~/.ssh/id_rsa.pub"
70
+stack_size = 3
71
+db_username = dbuser
72
+db_password = dbpass
73
+```
74
+
75
+You can change the settings in the file or you can simply set in environment
76
+variables like the following:
77
+
78
+```
79
+export TF_VAR_image_name="trusty 1404"
80
+export TF_VAR_private_net=Bluebox
81
+export TF_VAR_public_net=internet
82
+export TF_VAR_flavor="m1.small"
83
+export TF_VAR_public_key_path="~/.ssh/id_rsa.pub"
84
+export TF_VAR_stack_size=5
85
+export TF_VAR_db_username=george
86
+export TF_VAR_db_password=secret
87
+
88
+```
89
+
90
+## Provision the LAMP stack
91
+
92
+With all your OpenStack and TF vars set, you should be able to run
93
+`terraform apply`.  But lets check with `terraform plan` that things look
94
+correct first:
95
+
96
+
97
+```
98
+$ terraform plan
99
+Refreshing Terraform state prior to plan...
100
+...
101
+...
102
++ openstack_networking_floatingip_v2.database
103
+    address:   "<computed>"
104
+    fixed_ip:  "<computed>"
105
+    pool:      "internet"
106
+    port_id:   "<computed>"
107
+    region:    "RegionOne"
108
+    tenant_id: "<computed>"
109
+
110
+
111
+Plan: 8 to add, 0 to change, 0 to destroy.
112
+```
113
+
114
+If there is no errors showing,  we can go ahead and run
115
+
116
+```
117
+$ terraform apply
118
+...
119
+...
120
+Outputs:
121
+
122
+lampstack = Success!!!
123
+
124
+Access service at the following URLs:
125
+http://99.30.217.44
126
+http://99.30.217.42
127
+
128
+```
129
+
130
+The above results show that the LAMP Stack actually provisioned correctly
131
+and the LAMP application is up running and can be accessed by either of the
132
+urls.
133
+
134
+
135
+## Next Steps
136
+
137
+### Check its up
138
+
139
+Use the access urls to access the application. Since there are multiple web
140
+server nodes, any of the urls should work.
141
+
142
+```
143
+$ curl http://99.30.217.44
144
+$ curl http://99.30.217.42
145
+
146
+```
147
+
148
+## Cleanup
149
+
150
+Once you're done with it, don't forget to nuke the whole thing.
151
+
152
+```
153
+$ terraform destroy \
154
+Do you really want to destroy?
155
+  Terraform will delete all your managed infrastructure.
156
+  There is no undo. Only 'yes' will be accepted to confirm.
157
+
158
+  Enter a value: yes
159
+...
160
+...
161
+Apply complete! Resources: 0 added, 0 changed, 8 destroyed.
162
+```

+ 108
- 0
workloads/terraform/shade/lampstack/lampstack.tf View File

@@ -0,0 +1,108 @@
1
+# The terraform to stand up LAMP stack
2
+
3
+resource "openstack_compute_keypair_v2" "lampstack_key" {
4
+  name = "lampstack_key"
5
+  public_key = "${file(var.public_key_path)}"
6
+}
7
+
8
+resource "openstack_compute_secgroup_v2" "lampstack_sg" {
9
+  name = "lampstack_sg"
10
+  description = "lampstack security group"
11
+  rule {
12
+    from_port = 22
13
+    to_port = 22
14
+    ip_protocol = "tcp"
15
+    cidr = "0.0.0.0/0"
16
+  }
17
+  rule {
18
+    from_port = 80
19
+    to_port = 80
20
+    ip_protocol = "tcp"
21
+    cidr = "0.0.0.0/0"
22
+  }
23
+  rule {
24
+    from_port = 3306
25
+    to_port = 3306
26
+    ip_protocol = "tcp"
27
+    cidr = "0.0.0.0/0"
28
+  }
29
+}
30
+
31
+resource "openstack_networking_floatingip_v2" "database" {
32
+  count = 1
33
+  pool = "${var.public_net}"
34
+}
35
+
36
+resource "openstack_compute_instance_v2" "database" {
37
+  name = "database"
38
+  image_name = "${var.image_name}"
39
+  flavor_name = "${var.flavor}"
40
+  key_pair = "lampstack_key"
41
+  security_groups = ["${openstack_compute_secgroup_v2.lampstack_sg.name}"]
42
+  network {
43
+    name = "${var.private_net}"
44
+  }
45
+  floating_ip = "${openstack_networking_floatingip_v2.database.0.address}"
46
+
47
+  connection {
48
+    user = "ubuntu"
49
+    timeout = "30s"
50
+  }
51
+
52
+  provisioner "file" {
53
+    source = "onvm"
54
+    destination = "/tmp/onvm"
55
+  }
56
+
57
+  provisioner "remote-exec" {
58
+    inline = [
59
+      "echo ${self.network.0.fixed_ip_v4} database > /tmp/onvm/hostname",
60
+      "chmod +x /tmp/onvm/scripts/*",
61
+      "/tmp/onvm/scripts/installdb.sh ${var.db_username} ${var.db_password}"
62
+    ]
63
+  }
64
+}
65
+
66
+resource "openstack_networking_floatingip_v2" "apache" {
67
+    count = "${var.stack_size - 1}"
68
+    pool = "${var.public_net}"
69
+}
70
+
71
+resource "openstack_compute_instance_v2" "apache" {
72
+  name = "apache_${count.index}"
73
+  count = "${var.stack_size - 1}"
74
+  image_name = "${var.image_name}"
75
+  flavor_name = "${var.flavor}"
76
+  key_pair = "lampstack_key"
77
+  security_groups = ["${openstack_compute_secgroup_v2.lampstack_sg.name}"]
78
+  network {
79
+    name = "${var.private_net}"
80
+  }
81
+  floating_ip = "${element(openstack_networking_floatingip_v2.apache.*.address, count.index)}"
82
+
83
+  depends_on = [ "openstack_compute_instance_v2.database" ]
84
+
85
+  connection {
86
+    user = "ubuntu"
87
+    timeout = "30s"
88
+  }
89
+
90
+  provisioner "file" {
91
+    source = "onvm"
92
+    destination = "/tmp/onvm"
93
+  }
94
+
95
+  provisioner "remote-exec" {
96
+    inline = [
97
+      "echo ${openstack_compute_instance_v2.database.network.0.fixed_ip_v4} database > /tmp/onvm/hostname",
98
+      "echo ${self.network.0.fixed_ip_v4} apache-${count.index} >> /tmp/onvm/hostname",
99
+      "chmod +x /tmp/onvm/scripts/*",
100
+      "/tmp/onvm/scripts/installapache.sh ${var.db_username} ${var.db_password}"
101
+    ]
102
+  }
103
+
104
+}
105
+
106
+output "lampstack" {
107
+    value = "Success!!!\n\nAccess service at the following URLs:\nhttp://${join("\nhttp://",openstack_compute_instance_v2.apache.*.floating_ip)}"
108
+}

+ 15
- 0
workloads/terraform/shade/lampstack/onvm/app/index.php View File

@@ -0,0 +1,15 @@
1
+ <?php
2
+$servername = "database";
3
+$username = "TTTFFFdbuser";
4
+$password = "TTTFFFdbpass";
5
+$dbname = "decision2016";
6
+
7
+try {
8
+  $conn = new PDO("mysql:host=$servername;dbname=$dbname", $username, $password);
9
+  $conn->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
10
+  echo "Connected successfully";
11
+}
12
+catch(PDOException $e) {
13
+  echo "Connection failed: " . $e->getMessage();
14
+}
15
+?> 

+ 19
- 0
workloads/terraform/shade/lampstack/onvm/scripts/installapache.sh View File

@@ -0,0 +1,19 @@
1
+#!/usr/bin/env bash
2
+# $1 db_username
3
+# $2 db_password
4
+
5
+cat /tmp/onvm/hostname | sudo tee -a /etc/hosts >/dev/null
6
+echo 'Installing apache2 and php 5...'
7
+sudo apt-get -qqy update
8
+sudo apt-get -qqy install apache2 php5 php5-mysql
9
+echo 'ServerName localhost' | sudo tee -a /etc/apache2/apache2.conf >/dev/null
10
+
11
+sudo mv /tmp/onvm/app/* /var/www/html
12
+sudo chown -R www-data:www-data /var/www/html
13
+sudo rm -r -f /var/www/html/index.html
14
+
15
+cmdStr=$(echo "s/TTTFFFdbuser/$1/g")
16
+sudo sed -i -e "${cmdStr}" /var/www/html/index.php
17
+
18
+cmdStr=$(echo "s/TTTFFFdbpass/$2/g")
19
+sudo sed -i -e "${cmdStr}" /var/www/html/index.php

+ 21
- 0
workloads/terraform/shade/lampstack/onvm/scripts/installdb.sh View File

@@ -0,0 +1,21 @@
1
+#!/usr/bin/env bash 
2
+# $1 db_username
3
+# $2 db_password
4
+
5
+cat /tmp/onvm/hostname | sudo tee -a /etc/hosts >/dev/null
6
+pw=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 10 | head -n 1)
7
+sudo apt-get -qqy update
8
+sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $pw"
9
+sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $pw"
10
+sudo apt-get -qqy install mysql-server
11
+echo 'Creating a database...'
12
+
13
+mysql -uroot -p$pw -e "CREATE DATABASE decision2016;"
14
+mysql -uroot -p$pw -e "use decision2016; GRANT ALL PRIVILEGES ON decision2016.* TO '$1'@'localhost' IDENTIFIED BY '$2';"
15
+mysql -uroot -p$pw -e "use decision2016; GRANT ALL PRIVILEGES ON decision2016.* TO '$1'@'%' IDENTIFIED BY '$2';"
16
+mysql -uroot -p$pw -e "flush privileges"
17
+
18
+cmdStr=$(echo 's/127.0.0.1/database/g')
19
+sudo sed -i -e "${cmdStr}" /etc/mysql/my.cnf
20
+
21
+sudo service mysql restart

+ 34
- 0
workloads/terraform/shade/lampstack/vars_lampstack.tf View File

@@ -0,0 +1,34 @@
1
+variable "image_name" {
2
+    default = "ubuntu-14.04"
3
+}
4
+
5
+variable "private_net" {
6
+    default = "internal"
7
+}
8
+
9
+variable "public_net" {
10
+    default = "external"
11
+}
12
+
13
+variable "flavor" {
14
+    default = "m1.medium"
15
+}
16
+
17
+variable "public_key_path" {
18
+  description = "The path of the ssh pub key"
19
+  default = "~/.ssh/id_rsa.pub"
20
+}
21
+
22
+variable "stack_size" {
23
+  default = 3
24
+}
25
+
26
+variable "db_username" {
27
+  description = "The lamp stack database user for remote access"
28
+  default = "dbuser"
29
+}
30
+
31
+variable "db_password" {
32
+  description = "The lamp stack database user password for remote access"
33
+  default = "dbpass"
34
+}

Loading…
Cancel
Save