Move healthcheck playbooks to the collection

Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-plugins/+/933610
Change-Id: I888aee7544cf16402e5909de01034674928fce92
This commit is contained in:
Dmitriy Rabotyagov 2024-10-28 20:00:13 +01:00
parent 742d933dc8
commit 3486f2dbfa
4 changed files with 6 additions and 1087 deletions

View File

@ -13,116 +13,5 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# This playbook is meant to run after setup-hosts. - name: Importing healthcheck.hosts playbook
# To succeed, it expects the setup-hosts playbook to have run successfuly. import_playbook: openstack.osa.healthcheck.hosts
# Test if the openstack-hosts-setup play was a success.
# TO BE IMPLEMENTED
# Test if security-hardening was a success.
# TO BE IMPLEMENTED
# Test if containers-deploy was a success.
# Ensure the lxc containers are properly setup
- name: Ensuring hosts good behavior
hosts: lxc_hosts
gather_facts: yes
tasks:
- name: Looking for dnsmasq process
command: pgrep dnsmasq
changed_when: false
- name: Ensuring containers creation, connection and good behavior
hosts: all_containers
gather_facts: yes
tasks:
- name: Gather additional facts
setup:
gather_subset: "!all,network"
filter: ansible_interfaces
delegate_to: "{{ physical_host }}"
delegate_facts: true
- name: Ensure the physical host has all the proper interfaces defined
assert:
that:
- item.value.bridge in hostvars[physical_host]['ansible_facts']['interfaces']
with_dict: "{{ container_networks }}"
- name: Check if dns resolution and external connectivity is fine
get_url:
url: https://opendev.org/openstack/openstack-ansible/raw/ansible-role-requirements.yml
dest: /tmp/osa-master-requirements
mode: "0600"
environment: "{{ deployment_environment_variables | default({}) }}"
# Test extra settings before setup-infrastructure
- name: Ensure the internal_interfaces are well in the right range
hosts: localhost
gather_facts: no
tasks:
- name: Check your internal network is using private ips
assert:
that:
- internal_lb_vip_address | ansible.utils.ipaddr('private')
# Test openstack_hosts role
- name: Playbook for role testing
hosts: localhost
become: true
gather_facts: true
tasks:
- name: Open modules file
slurp:
src: "{{ (ansible_facts['os_family'] | lower == 'debian') | ternary('/etc/modules', '/etc/modules-load.d/openstack-ansible.conf') }}"
register: modules_file
- name: Open sysctl file
slurp:
src: /etc/sysctl.conf
register: sysctl_file
- name: Open hosts file
slurp:
src: /etc/hosts
register: hosts_file
- name: Open /etc/environment file
slurp:
src: /etc/environment
register: environment_file
- name: Read files
set_fact:
modules_content: "{{ modules_file.content | b64decode }}"
sysctl_content: "{{ sysctl_file.content | b64decode }}"
hosts_content: "{{ hosts_file.content | b64decode }}"
environment_content: "{{ environment_file.content | b64decode }}"
- name: Check for release file
stat:
path: /etc/openstack-release
register: release_file
- name: Check for systat file
stat:
path: "{{ (ansible_facts['os_family'] | lower == 'debian') | ternary('/etc/default/sysstat', '/etc/sysconfig/sysstat') }}"
register: systat_file
- name: Check for ssh dir
stat:
path: "{{ ansible_facts['env']['HOME'] }}/.ssh"
register: ssh_dir
- name: Check role functions
assert:
that:
- "'dm_multipath' in modules_content"
- "'ebtables' in modules_content"
- "'vm.swappiness' in sysctl_content"
- "('172.29.236.100 ' ~ ansible_facts['fqdn'] ~ ' ' ~ ansible_facts['hostname']) in hosts_content"
- "(hostvars[groups['galera_all'][0]]['management_address'] ~ ' ' ~ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] ~ '.openstack.local ' ~ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] ~ ((hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] != groups['galera_all'][0]) | ternary(' ' ~ groups['galera_all'][0], ''))) in hosts_content" # noqa: yaml[line-length]
- "release_file.stat.exists"
- "systat_file.stat.exists"
- "'PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' in environment_content"
- "ssh_dir.stat.isdir"

View File

@ -13,416 +13,5 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# This playbook is meant to run after setup-infrastructure, and expects - name: Importing healthcheck.infrastructure playbook
# the infrastructure bits to have properly deployed to succeed. import_playbook: openstack.osa.healthcheck.infrastructure
# Test unbound-install.yml
# TO BE IMPLEMENTED
# Test repo-install.yml
- name: Ensure all repo-servers are built and are accessible by hosts.
hosts: all_containers[0]:physical_hosts[0]
gather_facts: yes
vars:
repo_requirements_file: "constraints/upper_constraints_cached.txt"
tasks:
- name: Setup installation variables
include_role:
name: openstack.osa.install_defaults
defaults_from: "{{ install_method }}"
public: true
- name: Check the repo sync file on each repo server
uri:
url: "{{ openstack_repo_protocol }}://{{ hostvars[item]['management_address'] }}:{{ repo_server_port }}/{{ repo_requirements_file }}"
with_inventory_hostnames: "{{ groups['repo_all'] }}"
when: install_method == 'source'
tags:
- healthcheck
- healthcheck-repo-install
# Test haproxy-install.yml
- name: Ensuring haproxy runs
hosts: haproxy
gather_facts: yes
tasks:
- name: Check if host can connect to external keepalived ping IP
command: "ping -c 2 {{ keepalived_external_ping_address }}"
changed_when: false
- name: Check if host can connect to internal keepalived ping IP
command: "ping -c 2 {{ keepalived_internal_ping_address }}"
changed_when: false
- name: Checking if keepalived is running
command: "pgrep keepalived"
changed_when: false
when: groups['haproxy'] | length > 1
- name: Install netcat
package:
name: "{% if ansible_facts['os_family'] | lower == 'redhat' %}nmap-ncat{% else %}netcat-openbsd{% endif %}"
state: present
# Fails if HAProxy is not running
- name: Recording haproxy stats as a way to ensure haproxy runs
shell: 'echo "show info;show stat" | nc -U /var/run/haproxy.stat'
changed_when: false
register: haproxy_stats
# Run this playbook with -v and you'll see your DOWN issues
- name: Printing the output of haproxy stats
debug:
var: haproxy_stats
verbosity: 1
tags:
- healthcheck
- healthcheck-haproxy-install
# Test repo-use.yml
- name: Ensure all the containers can connect to the repos
hosts: all_containers
gather_facts: yes
# By having serial, you ensure that the first three containers are hitting
# the load balancer at the same time, which would then cause hitting three
# different repos servers.
# When this is done, the rest can be done with all the nodes at the same time.
serial:
- 3
- 100%
tasks:
- name: Run check only for source method
when:
- install_method == 'source'
block:
- name: Setup installation variables
include_role:
name: openstack.osa.install_defaults
defaults_from: "{{ install_method }}"
public: true
# Repo release path points to the internal LB vip
- name: Check the presence of upper constraints on your repos and check load balancing
uri:
url: "{{ openstack_repo_url }}/constraints/upper_constraints_cached.txt"
tags:
- healthcheck
- healthcheck-repo-use
# Test utility-install.yml
- name: Ensure the service setup host is ready to run openstack calls
hosts: "{{ openstack_service_setup_host | default('localhost') }}"
gather_facts: no
vars:
ansible_python_interpreter: "{{ openstack_service_setup_host_python_interpreter | default(ansible_facts['python']['executable']) }}"
pre_tasks:
- name: Setup installation variables
import_role:
name: openstack.osa.install_defaults
defaults_from: "{{ install_method | default('source') }}"
tasks:
- name: Get openstack client config
openstack.cloud.config:
- name: Show openstack client config
debug:
var: openstack.clouds
verbosity: 1
tags:
- healthcheck
- healthcheck-utility-install
# Test memcached-install.yml
- name: Check memcached for keystone
hosts: keystone_all
gather_facts: no
tasks:
- name: Set facts about memcached
setup:
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups['memcached'] }}"
- name: Install netcat
package:
name: "{% if ansible_facts['os_family'] | lower == 'redhat' %}nmap-ncat{% else %}netcat-openbsd{% endif %}"
state: present
- name: Connect to remote memcache servers (full mesh testing)
shell: "echo stats | nc -w 3 {{ hostvars[memcached_host]['management_address'] }} {{ memcached_port }}"
changed_when: false
register: memcache_stats
with_items: "{{ groups['memcached'] }}"
loop_control:
loop_var: memcached_host
- name: Output memcache stats if in verbose mode
debug:
var: memcache_stats
verbosity: 1
tags:
- healthcheck
- healthcheck-memcached-install
# Test galera-install.yml
- name: Sanity checks for all containers
hosts: all_containers:physical_hosts
gather_facts: no
tasks:
- name: Connect to galera port
wait_for:
port: 3306
host: "{{ internal_lb_vip_address }}"
state: started
tags:
- healthcheck
- healthcheck-galera-install
- name: Run functional tests
hosts: galera_all
user: root
gather_facts: true
vars:
_mariadb_client_binary: "{{ galera_mariadb_client_binary | default('mariadb') }}"
tasks:
- name: Wait for cluster to be ready
block:
- name: Wait for cluster ready state
command: |
{{ _mariadb_client_binary }} -h {{ management_address }} \
-u "{{ galera_root_user | default('root') }}" \
-p"{{ galera_root_password }}" \
-e "show status like 'wsrep_incoming_addresses';" \
--silent \
--skip-column-names
register: mysql_instance_ready
retries: 20
delay: 5
changed_when: false
until: mysql_instance_ready is success and mysql_instance_ready.stdout.split()[-1].split(',') | length == groups['galera_all'] | length
rescue:
- name: Restarting weird maria instance
service:
name: mariadb
state: restarted
- name: Wait for cluster ready state
command: |
{{ _mariadb_client_binary }} -h {{ management_address }} \
-u "{{ galera_root_user | default('root') }}" \
-p"{{ galera_root_password }}" \
-e "show status like 'wsrep_incoming_addresses';" \
--silent \
--skip-column-names
register: mysql_instance_ready
retries: 20
delay: 5
changed_when: false
until: mysql_instance_ready is success and mysql_instance_ready.stdout.split()[-1].split(',') | length == groups['galera_all'] | length
- name: Check cluster local state
command: |
{{ _mariadb_client_binary }} -h {{ management_address }} \
-u "{{ galera_root_user | default('root') }}" \
-p"{{ galera_root_password }}" \
-e "show status like 'wsrep_local_state_comment';" \
--silent \
--skip-column-names
register: wsrep_local_state_comment
changed_when: false
tags:
- skip_ansible_lint
- name: Check cluster evs state
command: |
{{ _mariadb_client_binary }} -h {{ management_address }} \
-u "{{ galera_root_user | default('root') }}" \
-p"{{ galera_root_password }}" \
-e "show status like 'wsrep_evs_state';" \
--silent \
--skip-column-names
register: wsrep_evs_state
changed_when: false
tags:
- skip_ansible_lint
- name: Check contents
assert:
that:
- "'Synced' in wsrep_local_state_comment.stdout"
- "'OPERATIONAL' in wsrep_evs_state.stdout"
- name: Create DB for service on "{{ groups['galera_all'][0] }}"
community.mysql.mysql_db:
login_user: "{{ galera_root_user | default('root') }}"
login_password: "{{ galera_root_password }}"
login_host: "{{ management_address }}"
name: "OSA-test"
state: "present"
check_hostname: true
when: inventory_hostname == groups['galera_all'][0]
tags:
- skip_ansible_lint
- name: Grant access to the DB on "{{ groups['galera_all'][-1] }}"
community.mysql.mysql_user:
login_user: "{{ galera_root_user | default('root') }}"
login_password: "{{ galera_root_password }}" # noqa no-log-password
login_host: "{{ management_address }}"
name: "osa-tester"
password: "tester-secrete" # noqa no-log-password
host: "{{ item }}"
state: "present"
priv: "OSA-test.*:ALL"
check_hostname: true
with_items:
- "localhost"
- "%"
when: inventory_hostname == groups['galera_all'][-1]
- name: Try to login with user to DB
delegate_to: "{{ groups['utility_all'][0] }}"
command: |
{{ _mariadb_client_binary }} -h {{ internal_lb_vip_address }} \
-p"tester-secrete" \
-u osa-tester \
OSA-test \
-e "SHOW TABLES;"
changed_when: false
when: inventory_hostname == groups['galera_all'][-1]
- name: Remove created user
community.mysql.mysql_user:
login_user: "{{ galera_root_user | default('root') }}"
login_password: "{{ galera_root_password }}" # noqa no-log-password
login_host: "{{ management_address }}"
name: "osa-tester"
state: "absent"
host: "{{ item }}"
check_hostname: true
with_items:
- "localhost"
- "%"
when: inventory_hostname == groups['galera_all'][-1]
- name: Remove created DB
community.mysql.mysql_db:
login_user: "{{ galera_root_user | default('root') }}"
login_password: "{{ galera_root_password }}" # noqa no-log-password
login_host: "{{ management_address }}"
name: "OSA-test"
state: "absent"
check_hostname: true
when: inventory_hostname == groups['galera_all'][0]
tags:
- skip_ansible_lint
# Test rabbitmq-install.yml
- name: Add a user for rabbitmq
hosts: rabbitmq_all[0]
gather_facts: no
tasks:
- name: Configure Rabbitmq vhost
community.rabbitmq.rabbitmq_vhost:
name: "/testvhost"
state: "present"
- name: Configure Rabbitmq user
community.rabbitmq.rabbitmq_user:
user: "testguest"
password: "secrete" # noqa no-log-password
vhost: "/testvhost"
configure_priv: ".*"
read_priv: ".*"
write_priv: ".*"
state: "present"
tags:
- healthcheck
- healthcheck-rabbitmq-install
- name: Ensure all the usual openstack containers can connect to rabbit
hosts: all_containers:!galera_all:!memcached:!haproxy:!rabbitmq_all:!unbound:!repo_all
gather_facts: no
vars:
venv_path: /tmp/rabbitmqtest
post_tasks:
- name: Setup installation variables
include_role:
name: openstack.osa.install_defaults
defaults_from: "{{ install_method }}"
public: true
- name: Generate venv for rabbitmq testing
include_role:
name: "python_venv_build"
vars:
venv_install_destination_path: "{{ venv_path }}"
venv_pip_packages:
- pika
- name: Copying test script
copy:
src: "../scripts/rabbitmq-test.py"
dest: "{{ venv_path }}/rabbitmq-test.py"
mode: "0755"
- name: Connect to rabbitmq
command: "{{ venv_path }}/bin/python {{ venv_path }}/rabbitmq-test.py {{ hostvars[groups['rabbitmq_all'][0]]['management_address'] }}"
changed_when: false
tags:
- healthcheck
- healthcheck-rabbitmq-install
- name: Remove guest user for rabbitmq
hosts: rabbitmq_all[0]
gather_facts: no
tasks:
- name: Remove test user
community.rabbitmq.rabbitmq_user:
user: testguest
password: secrete
vhost: "/testvhost"
state: absent
no_log: true
- name: Remove test vhost
community.rabbitmq.rabbitmq_vhost:
name: "/testvhost"
state: "absent"
tags:
- healthcheck
- healthcheck-rabbitmq-install
- healthcheck-teardown
# Test zookeeper-install
- name: Ensure coordination is running and accepting connections
hosts: utility_all[0]
tasks:
- name: Probing TCP connection to zookeeper
wait_for:
host: "{{ hostvars[item]['management_address'] }}"
port: "{{ coordination_port | default(2181) }}"
with_items: "{{ groups[coordination_host_group | default('zookeeper_all')] }}"
- name: Ensure zookeeper is healthy
hosts: "zookeeper_all"
tasks:
- name: Esuring netcat is installed
package:
name: "{% if ansible_facts['os_family'] | lower == 'redhat' %}nmap-ncat{% else %}netcat-openbsd{% endif %}"
state: present
- name: Gathering zookeeper state
shell: "echo ruok | nc localhost {{ coordination_port | default(2181) }}"
register: zookeeper_ok
changed_when: false
- name: Gathering zookeeper rw/ro
shell: "echo isro | nc localhost {{ coordination_port | default(2181) }}"
register: zookeeper_ro
changed_when: false
- name: Check zookeeper results
assert:
that:
- "'imok' in zookeeper_ok.stdout"
- "'rw' in zookeeper_ro.stdout"
# TODO: Other playbook's tests.

View File

@ -15,517 +15,5 @@
# #
# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me> # (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
# This playbook is meant to run after setup-openstack, and expects - name: Importing healthcheck.openstack playbook
# the openstack plays to have succeeded. import_playbook: openstack.osa.healthcheck.openstack
# Test os-keystone-install.yml
# Many parts of keystone testing is happening in playbooks already, as
# we are using it for setting up all the other openstack services.
- name: Test OpenStack basic functionality
gather_facts: no
hosts: utility_all[0]
vars:
ansible_python_interpreter: "{{ utility_venv_bin }}/python"
vars_files:
- defaults/healthchecks-vars.yml
tasks:
- name: Test keystone
block:
- name: Authenticate to the cloud and retrieve the service catalog
openstack.cloud.catalog_service_info:
cloud: default
interface: "{{ item }}"
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
with_items:
- internal
- public
register: service_catalog
- name: Show service catalog
debug:
var: service_catalog.results
tags:
- healthchecks
- healthchecks-keystone-install
- name: Test Glance
block:
- name: Image(s) download
get_url:
url: "{{ item.url }}"
dest: "{{ item.dest }}"
checksum: "{{ item.checksum | default(omit) }}"
mode: "0640"
with_items: "{{ glance_images }}"
register: fetch_url
until: fetch_url is success
retries: 6
delay: 5
- name: Upload tempest images to glance
openstack.cloud.image:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ item.name | default(item.url | basename) }}"
filename: "{{ item.dest }}"
container_format: bare
disk_format: "{{ item.format }}"
is_public: True
with_items: "{{ glance_images }}"
register: image_create
until: image_create is success
retries: 5
delay: 15
when:
# No point of doing glance tests is glance isn't deployed.
- "groups['glance_all'] | length > 0"
tags:
- healthchecks
- healthchecks-glance-install
- name: Test cinder
block:
# This automatically waits for completion by default.
# There is no module to check the current state of a creation, so we need to run
# This synchronously
- name: Create volumes
openstack.cloud.volume:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
display_name: "{{ item.name }}"
size: "{{ item.size }}"
snapshot_id: "{{ item.snapshot_id | default(omit) }}"
timeout: "{{ item.timeout | default(600) }}" # By default it's 180 but that's low.
volume: "{{ item.volume | default(omit) }}"
volume_type: "{{ item.volume_type | default(omit) }}"
with_items: "{{ cinder_volumes }}"
when:
- groups['cinder_all'] | length > 0
tags:
- healthchecks
- healthchecks-cinder-install
- name: Test neutron
block:
- name: Create networks
openstack.cloud.network:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ item.value.name }}"
provider_network_type: "{{ item.value.pn_type }}"
provider_physical_network: "{{ item.value.physical_network | default('') }}"
provider_segmentation_id: "{{ item.value.segmentation_id | default(omit) }}"
external: "{{ item.value.external | default(omit) }}"
project: "{{ item.value.project | default(omit) }}"
with_dict: "{{ neutron_networks }}"
register: _create_net
- name: Fail if network was not created successfully
fail:
msg: "Creating network failure"
with_items: "{{ _create_net.results }}"
when:
- "item.msg is defined"
- "'Error' in item.msg"
- "not 'is in use' in item.msg"
- name: Store facts to see if everything is ok
openstack.cloud.networks_info:
cloud: default
interface: internal
verify: no
register: openstack_networks
- name: Show networks
debug:
var: openstack_networks.networks
- name: Ensure subnet exists
openstack.cloud.subnet:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
network_name: "{{ item[0].name }}"
name: "{{ item[1].name }}"
ip_version: "{{ item[1].ip_version }}"
cidr: "{{ item[1].cidr }}"
gateway_ip: "{{ item[1].gateway_ip | default(omit) }}"
enable_dhcp: "{{ item[1].enable_dhcp | default(false) }}"
allocation_pool_start: "{{ item[1].allocation_pool_start | default(omit) }}"
allocation_pool_end: "{{ item[1].allocation_pool_end | default(omit) }}"
dns_nameservers: "{{ item[1].dns_nameservers | default([]) }}"
project: "{{ item[0].project | default(omit) }}"
with_subelements:
- "{{ neutron_networks }}"
- "subnets"
- name: Create router
openstack.cloud.router:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: router
network: "{{ neutron_networks['public']['name'] }}"
interfaces:
- "{{ item.name }}"
with_items: "{{ neutron_networks['private']['subnets'] }}"
when:
- "groups['neutron_all'] | length > 0"
tags:
- healthchecks
- healthchecks-neutron-install
# Test os-heat-install.yml
- name: Test heat
block:
- name: Fetch minimum heat stack
get_url:
url: "{{ heat_stack['source_url'] }}"
dest: "{{ heat_stack['dest_file'] }}"
mode: "0640"
- name: Create heat stack
ignore_errors: True
register: stack_create
openstack.cloud.stack:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ heat_stack['name'] }}"
tag: "{{ heat_stack['tag'] }}"
state: present
template: "{{ heat_stack['dest_file'] }}"
parameters: "{{ heat_stack['parameters'] }}"
when:
- "groups['heat_all'] | length > 0"
tags:
- healthchecks
- healthchecks-heat-install
# Test os-nova-install.yml
- name: Test nova
block:
- name: Create keypair for nova
command: "ssh-keygen -b 2048 -t rsa -f {{ ssh_key }} -q -N ''" # noqa no-changed-when
args:
creates: "{{ ssh_key }}"
- name: Upload keypair
openstack.cloud.keypair:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: present
name: "healthcheck"
public_key_file: "{{ ssh_key }}.pub"
- name: Create flavors of nova VMs
openstack.cloud.compute_flavor:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: present
name: "{{ item.name }}"
ram: "{{ item.ram }}"
vcpus: "{{ item.vcpus }}"
disk: "{{ item.disk }}"
swap: "{{ item.swap }}"
ephemeral: "{{ item.ephemeral }}"
with_items: "{{ nova_flavors }}"
- name: Create security group for healthcheck
openstack.cloud.security_group:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ security_group.name }}"
state: present
description: "Healthcheck servers"
- name: Create security group rules
openstack.cloud.security_group_rule:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
security_group: "{{ security_group.name }}"
protocol: "{{ item.protocol }}"
port_range_min: "{{ item.port_range_min }}"
port_range_max: "{{ item.port_range_max }}"
remote_ip_prefix: "{{ item.remote_ip_prefix }}"
state: present
with_items: "{{ security_group.rules }}"
- name: Create instance in a network
openstack.cloud.server:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ nova_vm.name }}"
state: present
image: "{{ nova_vm.image }}"
flavor: "{{ nova_vm.flavor }}"
network: "{{ nova_vm.network }}"
floating_ip_pools: "{{ neutron_networks['public']['name'] }}"
key_name: "healthcheck"
# Ensure user_data is well passed.
userdata: |
cp /etc/fstab /root/fstab
security_groups:
- default
- "{{ security_group.name }}"
- name: Attach volume to instance
when: "groups['cinder_all'] | length > 0"
openstack.cloud.server_volume:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: present
server: "{{ nova_vm.name }}"
volume: "{{ cinder_volumes[0]['name'] }}"
- name: Get server facts
openstack.cloud.server_info:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
server: "{{ nova_vm.name }}"
- name: Show server facts
debug:
var: openstack_servers
- name: Discover the healthcheck vm floating IP
set_fact:
_floating_ip: "{{ openstack_servers | json_query(_query) }}"
vars:
_query: "[?name == '{{ nova_vm.name }}'].addresses.{{ nova_vm.network }}[] | [?contains(*,'floating')].addr"
- name: Ensure connection to node works
command: "scp -o StrictHostKeyChecking=no -i {{ ssh_key }} cirros@{{ _floating_ip[0] }}:/etc/fstab /tmp/fstab"
changed_when: false
when:
- "groups['nova_all'] | length > 0"
tags:
- healthchecks
- healthchecks-nova-install
# Test os-swift-install.yml
- name: Test swift
block:
- name: Store data in swift
openstack.cloud.object:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: present
name: "{{ swift_object['name'] }}"
container: "{{ swift_object['container'] }}"
filename: "{{ swift_object['filename'] }}"
when:
- "groups['swift_all'] | length > 0"
tags:
- healthchecks
- healthchecks-swift-install
# Test os-barbican-install.yml
# TO BE IMPLEMENTED -- there is no ansible module for that so cli might be needed.
# Test os-horizon-install.yml
# TO BE IMPLEMENTED
# Test os-designate-install.yml
# TO BE IMPLEMENTED with os_recordset
# Test os-gnocchi-install.yml
# TO BE IMPLEMENTED
# Test os-ceilometer-install.yml
# TO BE IMPLEMENTED
# Test os-aodh-install.yml
# TO BE IMPLEMENTED
# Test os-ironic-install.yml
# TO BE IMPLEMENTED with os_ironic
# Test os-magnum-install.yml
# TO BE IMPLEMENTED
# Test os-trove-install.yml
# TO BE IMPLEMENTED
# Test os-sahara-install.yml
# TO BE IMPLEMENTED
# Test os-octavia-install.yml
# TO BE IMPLEMENTED
# Test os-tacker-install.yml
# TO BE IMPLEMENTED
# Test os-tempest-install.yml
# Tempest already has a test suite, so nothing should be added here.
# Teardown
- name: Teardown
gather_facts: no
hosts: utility_all[0]
vars:
ansible_python_interpreter: "{{ utility_venv_bin }}/python"
vars_files:
- defaults/healthchecks-vars.yml
tasks:
- name: Teardown images tests
block:
- name: Remove glance downloads
file:
state: absent
path: "{{ item.dest }}"
with_items: "{{ glance_images }}"
- name: Remove glance image from the cloud
openstack.cloud.image:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ item.name | default(item.url | basename) }}"
state: absent
with_items: "{{ glance_images }}"
when:
- "groups['glance_all'] | length > 0"
- healthchecks_teardown | default(True) | bool
tags:
- healthchecks-teardown-glance
- name: Teardown volumes tests
block:
- name: Detach volume if attached
when: "groups['nova_all'] | length > 0"
openstack.cloud.server_volume:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: absent
server: "{{ nova_vm.name }}"
volume: "{{ cinder_volumes[0]['name'] }}"
- name: Remove cinder volumes
openstack.cloud.volume:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
display_name: "{{ item.name }}"
state: absent
with_items: "{{ cinder_volumes }}"
when:
- groups['cinder_all'] | length > 0
- healthchecks_teardown | default(True) | bool
tags:
- healthchecks-teardown-cinder
- name: Teardown heat tests
block:
- name: Remove heat downloads
file:
path: "{{ heat_stack['dest_file'] }}"
state: absent
- name: Remove heat stack
ignore_errors: True
register: _stack_destroy
openstack.cloud.stack:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ heat_stack['name'] }}"
tag: "{{ heat_stack['tag'] }}"
state: absent
- name: Show results of heath stack destroy
debug:
var: _stack_destroy
when:
- "groups['heat_all'] | length > 0"
- healthchecks_teardown | default(True) | bool
tags:
- healthchecks-teardown-heat
- name: Teardown nova tests
block:
- name: Remove nova flavor
openstack.cloud.compute_flavor:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: absent
name: "{{ item.name }}"
with_items: "{{ nova_flavors }}"
- name: Remove nova instance
openstack.cloud.server:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
name: "{{ nova_vm['name'] }}"
state: absent
- name: Remove SSH key(s)
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ ssh_key }}"
- "{{ ssh_key }}.pub"
- "{{ ssh_key | dirname }}/known_hosts"
- name: Remove uploaded keypair
openstack.cloud.keypair:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: absent
name: healthcheck
when:
- "groups['nova_all'] | length > 0"
- healthchecks_teardown | default(True) | bool
tags:
- healthchecks-teardown-nova
- name: Teardown swift tests
block:
- name: Teardown swift data
openstack.cloud.object:
cloud: default
interface: internal
verify: "{{ keystone_service_internaluri_insecure | ternary(false, true) }}"
state: absent
name: "{{ swift_object['name'] }}"
container: "{{ swift_object['container'] }}"
when:
- "groups['swift_all'] | length > 0"
- healthchecks_teardown | default(True) | bool
tags:
- healthchecks-teardown-swift
# - block:
# - name: Remove
#
# when:
# - "groups['_all'] | length > 0"
# - healthchecks-teardown | default(True) | bool
# tags:
# - healthchecks-teardown-
tags:
- healthchecks
- healthchecks-teardown

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
#
"""Tests rabbitmq with our hardcoded test credentials"""
import argparse
import sys
try:
import pika
except Exception:
sys.exit("Can't import pika")
def rabbitmq_connect(ip=None):
"""Connects to ip using standard port and credentials."""
credentials = pika.credentials.PlainCredentials('testguest', 'secrete')
parameters = pika.ConnectionParameters(
host=ip, virtual_host='/testvhost', credentials=credentials)
try:
connection = pika.BlockingConnection(parameters)
connection.channel()
except Exception:
sys.exit("Can't connect to %s" % ip)
else:
print("Connected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("ip", help="The IP to connect to")
args = parser.parse_args()
rabbitmq_connect(args.ip)