kolla-ansible/tests/pre.yml
Martin Hiner 53e8b80ed3 Add container engine option to scripts
This patch add a way to choose container engine inside tool and test
scripts. This is in preparation for Podman introduction but still
leaves Docker as default container engine.

Signed-off-by: Martin Hiner <m.hiner@partner.samsung.com>
Change-Id: I395d2bdb0dfb4b325b6ad197c8893c8a0f768324
2023-04-28 16:16:55 +02:00

145 lines
4.6 KiB
YAML

---
- hosts: all
any_errors_fatal: true
vars:
logs_dir: "/tmp/logs"
roles:
- multi-node-firewall
- role: multi-node-vxlan-overlay
vars:
vxlan_interface_name: "{{ api_interface_name }}"
vxlan_vni: 10000
- role: multi-node-managed-addressing
vars:
managed_interface_name: "{{ api_interface_name }}"
managed_network_prefix: "{{ api_network_prefix }}"
managed_network_prefix_length: "{{ api_network_prefix_length }}"
managed_network_address_family: "{{ address_family }}"
# NOTE(yoctozepto): no addressing for neutron_external_interface in here
# because it is enslaved by a bridge
- role: multi-node-vxlan-overlay
vars:
vxlan_interface_name: "{{ neutron_external_vxlan_interface_name }}"
vxlan_vni: 10001
- role: bridge
vars:
bridge_name: "{{ neutron_external_bridge_name }}"
bridge_member_name: "{{ neutron_external_vxlan_interface_name }}"
# TODO(mnasiadka): Update ipv6 jobs to test ipv6 in Neutron
- role: multi-node-managed-addressing
vars:
managed_interface_name: "{{ neutron_external_bridge_name }}"
managed_network_prefix: "{{ neutron_external_network_prefix }}"
managed_network_prefix_length: "{{ neutron_external_network_prefix_length }}"
managed_network_address_family: "ipv4"
- role: veth
vars:
veth_pair:
- "veth-{{ neutron_external_bridge_name }}"
- "veth-{{ neutron_external_bridge_name }}-ext"
bridge_name: "{{ neutron_external_bridge_name }}"
tasks:
# NOTE(yoctozepto): we use gawk to add time to each logged line
# outside of Ansible (e.g. for init-runonce)
- name: Install gawk and required Python modules
become: true
package:
name:
- gawk
- python3-pip
- python3-setuptools
- name: Install lvm on storage scenarios
become: true
package:
name: lvm2
when: scenario in ['cephadm', 'zun', 'swift']
- name: Ensure /tmp/logs/ dir
file:
path: "{{ logs_dir }}"
state: "directory"
- name: Ensure /tmp/logs/pre dir
file:
path: "{{ logs_dir }}/pre"
state: "directory"
- name: Run diagnostics script
environment:
LOG_DIR: "{{ logs_dir }}/pre"
KOLLA_INTERNAL_VIP_ADDRESS: "{{ kolla_internal_vip_address }}"
CONTAINER_ENGINE: "{{ container_engine }}"
script: get_logs.sh
register: get_logs_result
become: true
failed_when: false
- name: Print get_logs output
debug:
msg: "{{ get_logs_result.stdout }}"
- name: Ensure node directories
file:
path: "{{ logs_dir }}/{{ item }}"
state: "directory"
mode: 0777
with_items:
- "container_logs"
- "kolla_configs"
- "system_logs"
- "kolla"
- "ansible"
# NOTE(yoctozepto): let's observe forwarding behavior
- name: iptables - LOG FORWARD
become: true
iptables:
state: present
action: append
chain: FORWARD
jump: LOG
log_prefix: 'iptables FORWARD: '
- name: set new hostname based on ansible inventory file
hostname:
name: "{{ inventory_hostname }}"
use: systemd
become: true
# NOTE(wxy): There are some issues on openEuler, fix them by hand.
# 1. iptables-legacy is used by default.
# 2. NTP sync doesn't work by default.
- block:
# The Ubuntu 22.04 in container uses iptables-nft while the host
# openEuler 22.03 uses iptables-legacy by default. We should update
# openEuler to keep iptables the same.
- name: Set iptables from legacy to nft for container
shell:
cmd: |
dnf install -y iptables-nft
iptables-save > iptables.txt
iptables-nft-restore < iptables.txt
update-alternatives --set iptables /usr/sbin/iptables-nft
become: true
# The command `timedatectl status` always times out if the command
# `timedatectl show-timesync` is not run first.
- name: Install systemd-timesyncd
package:
name: systemd-timesyncd
state: present
become: True
- name: Let ntp service work
shell: timedatectl show-timesync
become: true
when: ansible_facts.distribution == 'openEuler'
- name: Wait for ntp time sync
command: timedatectl status
register: timedatectl_status
changed_when: false
until: "'synchronized: yes' in timedatectl_status.stdout"
retries: 90
delay: 10