Multinode provision role

Role to provision subnodes for reproducing multinode jobs

Change-Id: I3d5ae3ba04dd5d0109762d1d4ef778160751a2cc
This commit is contained in:
Sagi Shnaidman 2017-11-01 00:03:08 +02:00
parent a2668a3277
commit 95a322fee1
9 changed files with 526 additions and 0 deletions

@ -0,0 +1,6 @@
---
- name: Multinode
hosts: localhost
gather_facts: no
roles:
- role: multinodes

@ -0,0 +1,69 @@
multinodes
==========
Provision openstack cloud with subnodes for multinode jobs
Requirements
------------
This Ansible role allows sets up a particular amount of subnodes for reproducing
CI jobs on them.
Role Variables
--------------
* `os_region`: OS region, by default is taken from environment variable OS_REGION_NAME
* `os_tenant`: OS tenant ID, by default is taken from environment variables OS_TENANT_ID
* `os_identity_api_version`: OS identity API version, by default is taken from environment variable OS_IDENTITY_API_VERSION
* `os_password`: OS password, by default is taken from environment variable OS_PASSWORD
* `os_auth_url`: OS auth URL, by default is taken from environment variable OS_AUTH_URL
* `os_username`: OS username, by default is taken from environment variable OS_USERNAME
* `os_tenant_name`: OS tenant name, by default is taken from environment variable OS_TENANT_NAME
* `os_endpoint_type`: OS endpoint type, by default is taken from environment variable OS_ENDPOINT_TYPE
* `prefix`: (default: '') prefix for stack and hosts names
* `remove_previous_stack:`: bool, (default: true) whether to remove previous stack with same name
* `stack_log`: log file for this role
* `key_name`: (default: multinode_admins) keypair name to inject into subnodes, if not present will be
created from "key_location"
* `private_key_location`: (default: ~/.ssh/id_rsa) users private key
* `key_location`: (default: ~/.ssh/id_rsa.pub) users public key, used for creating keypair
* `stack_name`: (default: multinode_stack) name of Heat stack to create
* `public_net_name:`: (default: 38.145.32.0/22) name of public network on the cloud
* `private_net_name`: (default: private_net) name of private network in stack
* `private_net_base`: (default: 192.168.54) base IP range for private network
* `private_net_cidr`: (default: 192.168.54.0/24) CIDR for private network
* `private_net_gateway`: (default: 192.168.54.1) gateway address for private network
* `private_net_pool_start`: (default: 192.168.54.5) DHCP pool start for private network
* `private_net_pool_end`: "(default: 192.168.54.150) DHCP pool end for private network
* `subnode_count`: (default: 2) how many nodes to create
* `subnode_flavor`: (default: m1.large) flavor for nodes
* `subnode_groups`: (default: subnodes) ansible host group names for subnodes
* `image_id`: (default: last image from the cloud) Image ID or name in the cloud, by default
it's image with property "latest" in public images in the cloud
Dependencies
------------
No dependencies
Example Playbook
----------------
---
- name: Multinode
hosts: localhost
gather_facts: no
roles:
- role: multinodes
For deleting stack it's possible to run playbook with "--tags delete".
License
-------
Apache 2.0
Author Information
------------------
RDO-CI Team

@ -0,0 +1,32 @@
os_region: "{{ lookup('env', 'OS_REGION_NAME')|default('regionOne') }}"
os_tenant: "{{ lookup('env', 'OS_TENANT_ID') }}"
os_identity_api_version: "{{ lookup('env', 'OS_IDENTITY_API_VERSION')|default(2) }}"
os_password: "{{ lookup('env', 'OS_PASSWORD') }}"
os_auth_url: "{{ lookup('env', 'OS_AUTH_URL')|default('https://phx2.cloud.rdoproject.org:13000/v2.0') }}"
os_username: "{{ lookup('env', 'OS_USERNAME') }}"
os_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') }}"
os_endpoint_type: "{{ lookup('env', 'OS_ENDPOINT_TYPE')|default('publicURL') }}"
prefix: ''
remove_previous_stack: true
local_working_dir: "{{ lookup('env', 'HOME') }}/.quickstart"
stack_log: "{{ local_working_dir }}/{{ prefix }}provision_multinode_stack.log"
key_name: multinode_admins
private_key_location: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
key_location: "{{ private_key_location }}.pub"
# image_id:
stack_name: "{{ prefix }}multinode_stack"
public_net_name: 38.145.32.0/22
private_net_name: private_net
private_net_base: 192.168.54
private_net_cidr: "{{ private_net_base }}.0/24"
private_net_gateway: "{{ private_net_base }}.1"
private_net_pool_start: "{{ private_net_base }}.5"
private_net_pool_end: "{{ private_net_base }}.150"
subnode_count: 2
subnode_flavor: m1.large
subnode_groups:
- subnodes
nodepool_user: zuul

@ -0,0 +1,30 @@
- name: Delete stack
shell: >
echo "Executing 'openstack stack delete {{ stack_name }} --yes'" >> {{ stack_log }};
openstack stack delete {{ stack_name }} --yes &>> {{ stack_log }} && {
export COUNTER=0;
while openstack stack show {{ stack_name }} &>> {{ stack_log }} ; do
if [ $COUNTER -lt 10 ]; then
sleep 30;
if openstack stack show {{ stack_name }} | grep FAILED ; then
echo "Failed deleting OVB stack" | tee -a {{ stack_log }};
openstack stack show {{ stack_name }} &>> {{ stack_log }};
openstack stack failures list --long {{ stack_name }} &>> {{ stack_log }} || echo 'Failed to list stack failures' >> {{ stack_log }};
exit 0;
fi;
COUNTER=$((COUNTER+1));
fi;
done; } || echo "No stack {{ stack_name }} present" >> {{ stack_log }}
environment:
OS_USERNAME: "{{ os_username }}"
OS_PASSWORD: "{{ os_password }}"
OS_TENANT_NAME: "{{ os_tenant_name }}"
OS_AUTH_URL: "{{ os_auth_url }}"
args:
executable: /bin/bash
register: delete_stack
- name: Fail if stack was not deleted properly
fail:
msg: 'Deletion of stack {{ stack_name }} failed, log: {{ stack_log }}'
when: '"Failed deleting OVB stack" in delete_stack.stdout'

@ -0,0 +1,202 @@
- name: Fail if no auth is provided
fail:
msg: 'Please source credential rc file from your tenant on cloud'
when: not os_username or not os_password or not os_tenant_name
tags:
- delete
- name: Check if openstack client is installed
shell: command -v openstack 2>/dev/null
register: openstack_client_installed
tags:
- delete
- name: Fail if no openstack client is installed
fail:
msg: 'Please install openstack client as it is required for running this role'
when: openstack_client_installed.rc != 0
tags:
- delete
- name: Ensure local working dir exists
file:
dest: "{{ local_working_dir }}"
state: directory
tags:
- delete
- name: Clean log for this role
copy:
dest: "{{ stack_log }}"
content: ''
tags:
- delete
- include: delete_stack.yml
when: remove_previous_stack|bool
tags:
- delete
- name: Image to use
block:
- name: find image
shell: openstack image list --long -f value -c ID --property latest=centos-7-latest
register: image_id_latest
- name: Set fact for image
set_fact:
image_id: "{{ image_id_latest.stdout }}"
when: image_id is not defined
- name: Add keypair
shell: >
set -o pipefail &&
openstack keypair show {{ key_name }} && echo "{{ key_name }} already exists" ||
openstack keypair create --public-key {{ key_location }} {{ key_name }} 2>&1 | tee -a {{ stack_log }}
environment:
OS_USERNAME: "{{ os_username }}"
OS_PASSWORD: "{{ os_password }}"
OS_TENANT_NAME: "{{ os_tenant_name }}"
OS_AUTH_URL: "{{ os_auth_url }}"
args:
executable: /bin/bash
- name: Create parent template
template:
src: stack.yaml
dest: "{{ local_working_dir }}/multinode_stack.yaml"
- name: Create resources group template
template:
src: subnode.yaml
dest: "{{ local_working_dir }}/subnode.yaml"
- name: Create parameters in environment file for templates
template:
src: params.yaml.j2
dest: "{{ local_working_dir }}/params_multinode.yaml"
- name: Create stack
shell: >
echo "Executing 'openstack stack create {{ stack_name }} --template {{ local_working_dir }}/multinode_stack.yaml
-e {{ local_working_dir }}/params_multinode.yaml &>> {{ stack_log }}'" | tee -a {{ stack_log }};
openstack stack create {{ stack_name }} --template {{ local_working_dir }}/multinode_stack.yaml
-e {{ local_working_dir }}/params_multinode.yaml &>> {{ stack_log }} || exit 1;
export COUNTER=0;
while ! openstack stack show {{ stack_name }} | grep CREATE_COMPLETE; do
if [ $COUNTER -lt 30 ]; then
sleep 10;
if openstack stack show {{ stack_name }} | grep FAILED ; then
echo "Failed to create OVB stack {{ stack_name }}" | tee -a {{ stack_log }};
openstack stack show {{ stack_name }} >> {{ stack_log }} 2>&1;
openstack stack failures list --long {{ stack_name }} >> {{ stack_log }} 2>&1 || echo 'Failed to list stack failures' >> {{ stack_log }};
exit 0;
fi;
COUNTER=$((COUNTER+1));
fi;
done;
openstack stack show "{{ stack_name }}" >> {{ stack_log }};
environment:
OS_USERNAME: "{{ os_username }}"
OS_PASSWORD: "{{ os_password }}"
OS_TENANT_NAME: "{{ os_tenant_name }}"
OS_AUTH_URL: "{{ os_auth_url }}"
args:
executable: /bin/bash
register: create_stack
- name: Fail if stack failed to create
fail:
msg: "Failed to create OVB stack {{ stack_name }}, check log: {{ stack_log }}"
when: '"Failed to create OVB stack" in create_stack.stdout'
- name: Log
debug:
msg: "Log from stacks management is in {{ stack_log }}"
tags:
- delete
- name: Get output from nodes
shell: openstack stack output show {{ stack_name }} subnode_ip_pairs -c output_value -f value
register: subnode_ips
- name: Add hosts
add_host:
name: "subnode-{{ item.0 }}"
hostname: "{{ item.1.1 }}"
groups: "{{ subnode_groups| join(',') }}"
ansible_user: centos
ansible_host: "{{ item.1.1 }}"
ansible_fqdn: "subnode-{{ item.0 }}"
ansible_private_key_file: "{{ private_key_location }}"
subnode_public_ip: "{{ item.1.1 }}"
subnode_private_ip: "{{ item.1.0 }}"
with_indexed_items:
- "{{ subnode_ips.stdout }}"
- name: Wait for provisioned hosts to become reachable
command:
ssh -o BatchMode=yes -o "StrictHostKeyChecking=no" centos@{{ hostvars[item].ansible_host }} -i "{{ private_key_location }}"
register: result
until: result|success
retries: 100
delay: 5
with_items:
- "{{ hostvars.keys() }}"
# In case of blank centos image we need to create CI user and update inventory
- block:
- name: Setup user
user:
name: "{{ nodepool_user }}"
groups: wheel
append: yes
become: true
delegate_to: "{{ item}}"
with_items:
- "{{ hostvars.keys() }}"
- name: Setup keys
authorized_key:
user: "{{ nodepool_user }}"
state: present
key: "{{ lookup('file', '{{ key_location }}') }}"
become: true
become_user: "{{ nodepool_user }}"
delegate_to: "{{ item}}"
with_items:
- "{{ hostvars.keys() }}"
- name: Allow user to have passwordless sudo
lineinfile:
dest: /etc/sudoers
state: present
line: '{{ nodepool_user }} ALL=(ALL) NOPASSWD:ALL'
validate: 'visudo -cf %s'
become: true
delegate_to: "{{ item}}"
with_items:
- "{{ hostvars.keys() }}"
- name: Add hosts
add_host:
name: "subnode-{{ item.0 }}"
hostname: "{{ item.1.1 }}"
groups: "{{ subnode_groups| join(',') }}"
ansible_user: "{{ nodepool_user }}"
ansible_host: "{{ item.1.1 }}"
ansible_fqdn: "subnode-{{ item.0 }}"
ansible_private_key_file: "{{ private_key_location }}"
subnode_public_ip: "{{ item.1.1 }}"
subnode_private_ip: "{{ item.1.0 }}"
with_indexed_items:
- "{{ subnode_ips.stdout }}"
when: hostvars['subnode-0'].ansible_user != "{{ nodepool_user }}"
- name: Dump hosts to file
template:
src: multinode_hosts.j2
dest: '{{ local_working_dir }}/multinode_hosts'

@ -0,0 +1,14 @@
{% for h in groups['all'] %}
{{ h }} ansible_host={{ hostvars[h].ansible_host }} ansible_user={{ hostvars[h].ansible_user }} ansible_private_key_file={{ hostvars[h].ansible_private_key_file }}{% if hostvars[h].get('subnode_private_ip') %} subnode_private_ip={{ hostvars[h].subnode_private_ip }}{% endif %}{% if hostvars[h].get('subnode_public_ip') %} subnode_public_ip={{ hostvars[h].subnode_public_ip }}{% endif %}
{% endfor %}
{% for group in groups %}
{% if group not in ['ungrouped', 'all'] %}
[{{ group }}]
{% for host in groups[group] %}
{{ host }}
{% endfor %}
{% endif %}
{% endfor %}

@ -0,0 +1,12 @@
parameters:
image: {{ image_id }}
public_net_name: {{ public_net_name }}
key_name: {{ key_name }}
private_net_name: {{ private_net_name }}
private_net_cidr: {{ private_net_cidr }}
private_net_gateway: {{ private_net_gateway }}
private_net_pool_start: {{ private_net_pool_start }}
private_net_pool_end: {{ private_net_pool_end }}
subnode_count: {{ subnode_count }}
subnode_flavor: {{ subnode_flavor }}
key_name: {{ key_name }}

@ -0,0 +1,99 @@
heat_template_version: newton
description: >
This is for multinode
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
image:
type: string
description: Name of image to use for servers
public_net_name:
type: string
description: >
ID or name of public network for which floating IP addresses will be allocated
private_net_name:
type: string
description: Name of private network to be created
private_net_cidr:
type: string
description: Private network address (CIDR notation)
private_net_gateway:
type: string
description: Private network gateway address
private_net_pool_start:
type: string
description: Start of private network IP address allocation pool
private_net_pool_end:
type: string
description: End of private network IP address allocation pool
subnode_flavor:
type: string
description: Flavor for subnode node
subnode_count:
type: string
description: How many subnodes
resources:
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: private_net_name }
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: { get_param: private_net_cidr }
gateway_ip: { get_param: private_net_gateway }
allocation_pools:
- start: { get_param: private_net_pool_start }
end: { get_param: private_net_pool_end }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: { get_param: public_net_name }
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
server_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: subnode_security_group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
subnodes:
type: OS::Heat::ResourceGroup
properties:
count: { get_param: subnode_count }
resource_def:
type: "{{ local_working_dir }}/subnode.yaml"
properties:
name: "{{ prefix }}subnode-%index%"
private_net: { get_resource: private_net }
private_subnet: { get_resource: private_subnet }
server_security_group: {get_resource: server_security_group}
subnode_flavor: { get_param: subnode_flavor }
public_net_name: { get_param: public_net_name }
key_name: { get_param: key_name }
image: { get_param: image }
outputs:
subnode_ip_pairs:
value: { get_attr: [subnodes, subnode_ip_pair] }

@ -0,0 +1,62 @@
heat_template_version: newton
description: >
Subnode config
parameters:
subnode_flavor:
type: string
description: Flavor for subnode
public_net_name:
type: string
description: >
ID or name of public network for which floating IP addresses will be allocated
key_name:
type: string
description: Name of keypair to assign to servers
image:
type: string
description: Name of image to use for servers
name:
type: string
description: Index for subnodes counting
private_net:
type: string
private_subnet:
type: string
server_security_group:
type: string
resources:
subnode_port:
type: OS::Neutron::Port
properties:
network_id: { get_param: private_net }
fixed_ips:
- subnet_id: { get_param: private_subnet }
security_groups:
- "default"
- { get_param: server_security_group }
subnode_floating_ip:
type: OS::Neutron::FloatingIP
# depends_on: router_interface, subnode_port
properties:
floating_network: { get_param: public_net_name }
port_id: { get_resource: subnode_port }
subnode:
type: OS::Nova::Server
properties:
name: {get_param: name}
image: {get_param: image}
flavor: { get_param: subnode_flavor }
key_name: { get_param: key_name }
networks:
- port: { get_resource: subnode_port }
outputs:
subnode_ip_pair:
description: IP addresses of subnode in network
value: { get_attr: [ subnode, networks, {{ private_net_name }} ] }