openstack-ansible/playbooks/healthcheck-infrastructure.yml
Jesse Pretorius 43a5c874ef Remove apt-cacher-ng
The repo container's package cache causes quite a bit of confusion
given that it's a 'hidden' feature which catches deployers off-guard
when they already have their own cache configured. This is really
the kind of service which people should manage outside of OSA. It
also makes no sense if the deployer is using their own local mirror
which is a fairly common practise. Adding to that, it seems that it
is broken in bionic, causing massive delays in package installs.
Finally, it also adds to quite a bit of complexity due to the fact
that it's in a container - so in the playbooks prior to the container's
existence we have to detect whether it's there and add/remove the config
accordingly.

Let's just remove it and let deployers managing their own caching
infrastructure if they want it.

Depends-On: https://review.openstack.org/608631
Change-Id: Ia0fb41266a6d62073b02c8ad6fa97b8b7d408a67
2018-10-08 17:14:10 +00:00

228 lines
6.8 KiB
YAML

---
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This playbook is meant to run after setup-infrastructure, and expects
# the infrastructure bits to have properly deployed to succeed.
# Test unbound-install.yml
# TO BE IMPLEMENTED
# Test repo-install.yml
- name: Ensure all repo-servers are built and are accessible by hosts.
hosts: all_containers[0]:physical_hosts[0]
gather_facts: yes
vars:
repo_requirements_file: "os-releases/{{ openstack_release }}/{{ os_distro_version }}/requirements_constraints.txt"
tasks:
- name: Check the upper constraint on each repo server
uri:
url: "http://{{ hostvars[item]['container_address'] }}:{{ repo_server_port }}/{{ repo_requirements_file }}"
with_inventory_hostnames: "{{ groups['repo_all'] }}"
when: install_method == 'source'
tags:
- healthcheck
- healthcheck-repo-install
# Test haproxy-install.yml
- name: Ensuring haproxy runs
hosts: haproxy
gather_facts: yes
tasks:
- name: Check if host can connect to keepalived ping IP
command: "ping -c 2 {{ keepalived_ping_address }}"
changed_when: false
- name: Checking if keepalived is running
command: "pgrep keepalived"
changed_when: false
when: groups['haproxy'] | length > 1
- package:
name: netcat
state: present
# Fails if HAProxy is not running
- name: Recording haproxy stats as a way to ensure haproxy runs
shell: 'echo "show info;show stat" | nc -U /var/run/haproxy.stat'
changed_when: false
register: haproxy_stats
# Run this playbook with -v and you'll see your DOWN issues
- name: Printing the output of haproxy stats
debug:
var: haproxy_stats
verbosity: 1
tags:
- healthcheck
- healthcheck-haproxy-install
# Test repo-use.yml
- name: Ensure all the containers can connect to the repos
hosts: all_containers
gather_facts: yes
# By having serial, you ensure that the first three containers are hitting
# the load balancer at the same time, which would then cause hitting three
# different repos servers.
# When this is done, the rest can be done with all the nodes at the same time.
serial:
- 3
- 100%
vars_files:
- defaults/source_install.yml
tasks:
# Repo release path points to the internal LB vip
- name: Check the presence of upper constraints on your repos and check load balancing
uri:
url: "{{ repo_release_path }}/requirements_constraints.txt"
tags:
- healthcheck
- healthcheck-repo-use
# Test utility-install.yml
- name: Ensure the service setup host is ready to run openstack calls
hosts: "{{ openstack_service_setup_host | default('localhost') }}"
gather_facts: no
tasks:
- name: Get openstack client config
os_client_config:
- name: Show openstack client config
debug:
var: openstack.clouds
verbosity: 1
tags:
- healthcheck
- healthcheck-utility-install
# Test memcached-install.yml
- name: Check memcached for keystone
hosts: keystone_all
gather_facts: no
tasks:
- name: Set facts about memcached
setup:
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups['memcached'] }}"
- package:
name: netcat
state: present
- name: Connect to remote memcache servers (full mesh testing)
shell: "echo stats | nc -w 3 {{ hostvars[memcached_host]['container_address'] }} {{ memcached_port }}"
changed_when: false
register: memcache_stats
with_items: "{{ groups['memcached'] }}"
loop_control:
loop_var: memcached_host
- name: Output memcache stats if in verbose mode
debug:
var: memcache_stats
verbosity: 1
tags:
- healthcheck
- healthcheck-memcached-install
# Test galera-install.yml
- name: Sanity checks for all containers
hosts: all_containers:physical_hosts
gather_facts: no
tasks:
- name: Connect to galera port
wait_for:
port: 3306
host: "{{ internal_lb_vip_address }}"
state: started
tags:
- healthcheck
- healthcheck-galera-install
# Test rabbitmq-install.yml
- name: Add a user for rabbitmq
hosts: rabbitmq_all[0]
gather_facts: no
tasks:
- name: Configure Rabbitmq vhost
rabbitmq_vhost:
name: "/testvhost"
state: "present"
- name: Configure Rabbitmq user
rabbitmq_user:
user: "testguest"
password: "secrete"
vhost: "/testvhost"
configure_priv: ".*"
read_priv: ".*"
write_priv: ".*"
state: "present"
no_log: True
tags:
- healthcheck
- healthcheck-rabbitmq-install
- name: Ensure all the usual openstack containers can connect to rabbit
hosts: all_containers:!etcd_all:!galera_all:!memcached:!haproxy:!rabbitmq_all:!rsyslog:!unbound:!repo_all
gather_facts: no
vars:
venv_path: /tmp/rabbitmqtest
post_tasks:
- name: Generate venv for rabbitmq testing
include_role:
name: "python_venv_build"
private: yes
vars:
venv_install_destination_path: "{{ venv_path }}"
venv_pip_packages:
- pika
venv_build_host_wheel_path: "{{ repo_pypiserver_package_path | default('/var/www/repo/pools') }}"
venv_pip_install_args: >-
--index-url {{ repo_build_pip_default_index | default('https://pypi.python.org/simple') }}
--trusted-host {{ (repo_build_pip_default_index | default('https://pypi.python.org/simple')) | netloc_no_port }}
- name: Copying test script
copy:
src: "../scripts/rabbitmq-test.py"
dest: "{{ venv_path }}/rabbitmq-test.py"
mode: 0755
- name: Connect to rabbitmq
command: "{{ venv_path }}/bin/python2 {{ venv_path }}/rabbitmq-test.py {{ hostvars[groups['rabbitmq_all'][0]]['container_address'] }}"
tags:
- healthcheck
- healthcheck-rabbitmq-install
- name: Remove guest user for rabbitmq
hosts: rabbitmq_all[0]
gather_facts: no
tasks:
- name: Remove test user
rabbitmq_user:
user: testguest
password: secrete
vhost: "/testvhost"
state: absent
no_log: true
- name: Remove test vhost
rabbitmq_vhost:
name: "/testvhost"
state: "absent"
tags:
- healthcheck
- healthcheck-rabbitmq-install
- healthcheck-teardown
# TODO: Other playbook's tests.