CI: Use VXLAN overlay network

VXLAN is necessary to run HA in CI (due to floating VIP
address handled by keepalived).
It also turned out to be required to have private
IPv6 address assignments.
This patch is based on linux bridge rather than OVS
to avoid problems with OVS deployed in containers.

This patch enables haproxy in multinode jobs.

Includes saving of linux networking details.

Makes DASHBOARD_URL agree with OS_AUTH_URL - properly uses the
pre-upgrade value for testing.

Co-authored-by: Radosław Piliszek <radoslaw.piliszek@gmail.com>
Depends-on: https://review.opendev.org/683068
Depends-on: https://review.opendev.org/682957
Change-Id: I66888712da80c3d6f84ee4949762961664d3adea
This commit is contained in:
Mark Goddard 2019-07-13 10:45:18 +01:00 committed by Radosław Piliszek
parent e2f511b7d9
commit 8e40629161
7 changed files with 98 additions and 18 deletions

View File

@ -30,6 +30,26 @@ copy_logs() {
mount > ${LOG_DIR}/system_logs/mount.txt
env > ${LOG_DIR}/system_logs/env.txt
(set -x
ip a
ip l
ip r
ping -c 4 ${KOLLA_INTERNAL_VIP_ADDRESS}) &> ${LOG_DIR}/system_logs/ip.txt
(set -x
iptables -t raw -v -n -L
iptables -t mangle -v -n -L
iptables -t nat -v -n -L
iptables -t filter -v -n -L) &> ${LOG_DIR}/system_logs/iptables.txt
(set -x
ip6tables -t raw -v -n -L
ip6tables -t mangle -v -n -L
ip6tables -t nat -v -n -L
ip6tables -t filter -v -n -L) &> ${LOG_DIR}/system_logs/ip6tables.txt
ss -putona > ${LOG_DIR}/system_logs/ss.txt
if [ `command -v dpkg` ]; then
dpkg -l > ${LOG_DIR}/system_logs/dpkg-l.txt
fi

View File

@ -9,6 +9,8 @@
dest: "{{ logs_dir }}/facts.json"
- name: Run diagnostics script
environment:
KOLLA_INTERNAL_VIP_ADDRESS: "{{ kolla_internal_vip_address }}"
script: get_logs.sh
register: get_logs_result
become: true

View File

@ -38,5 +38,69 @@
hostname:
name: "{{ inventory_hostname }}"
become: true
# NOTE(yoctozepto): start VXLAN interface config
- name: Set VXLAN interface facts
set_fact:
api_interface_address: "{{ api_network_prefix }}{{ groups['all'].index(inventory_hostname) + 1 }}"
api_interface_tunnel_vni: 10001
tunnel_local_address: "{{ nodepool.private_ipv4 }}"
- name: Create VXLAN interface
become: true
command: ip link add {{ api_interface_name }} type vxlan id {{ api_interface_tunnel_vni }} local {{ tunnel_local_address }} dstport 4789
- name: Set VXLAN interface MTU
become: true
vars:
# Find the parent interface
parent_interface: >-
{{ ansible_interfaces |
map('extract', ansible_facts) |
selectattr('ipv4.address', 'defined') |
selectattr('ipv4.address', 'equalto', tunnel_local_address) |
first }}
# Allow 50 bytes overhead for VXLAN headers.
mtu: "{{ parent_interface.mtu | int - 50 }}"
command: ip link set {{ api_interface_name }} mtu {{ mtu }}
# emulate BUM by multiplicating traffic to unicast targets
- name: Add fdb entries for BUM traffic
become: true
vars:
dest_ip: "{{ hostvars[item].tunnel_local_address }}"
command: bridge fdb append 00:00:00:00:00:00 dev {{ api_interface_name }} dst {{ dest_ip }}
with_inventory_hostnames: all
when: item != inventory_hostname
- name: Add IP address for VXLAN network
become: true
vars:
api_network_cidr: "{{ api_interface_address }}/{{ api_network_prefix_length }}"
# NOTE(yoctozepto): we have to compute and explicitly set the broadcast address,
# otherwise bifrost fails its pre-bootstrap sanity checks due to missing
# broadcast address as ansible picks up scope ('global') as the interface's
# broadcast address which fails checks logic
api_network_broadcast_address: "{{ api_network_cidr | ipaddr('broadcast') }}"
command: ip address add {{ api_network_cidr }} broadcast {{ api_network_broadcast_address }} dev {{ api_interface_name }}
- name: Accept traffic on the VXLAN network
become: true
iptables:
state: present
action: insert
chain: INPUT
ip_version: ipv4
in_interface: "{{ api_interface_name }}"
jump: ACCEPT
- name: Bring VXLAN interface up
become: true
command: ip link set {{ api_interface_name }} up
- name: Ping across VXLAN
command: ping -c1 {{ hostvars[item].api_interface_address }}
with_inventory_hostnames: all
roles:
- multi-node-firewall

View File

@ -1,10 +1,12 @@
---
- hosts: all
tasks:
# NOTE(yoctozepto): ensure we pick up fact changes from pre
- name: Refresh facts
setup:
# NOTE(yoctozepto): setting vars as facts for all to have them around in all the plays
- name: set facts for commonly used variables
vars:
api_interface_address: "{{ nodepool.private_ipv4 }}"
set_fact:
kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs"
@ -14,17 +16,7 @@
build_image_tag: "change_{{ zuul.change | default('none') }}"
is_upgrade: "{{ 'upgrade' in scenario }}"
is_ceph: "{{ 'ceph' in scenario }}"
api_interface_address: "{{ api_interface_address }}"
# FIXME: in multi node env, api_interface may be different on each node.
api_interface_name: >-
{{ (ansible_interfaces |
map('replace', '-', '_') |
map('extract', ansible_facts) |
selectattr('ipv4.address', 'defined') |
selectattr('ipv4.address', 'equalto', api_interface_address) |
first).device }}
# We use HAProxy and a VIP for single node, not for multinode jobs.
kolla_internal_vip_address: "{{ api_interface_address if hostvars | length > 2 else '169.254.169.10' }}"
primary_address: "{{ hostvars.primary['ansible_' + api_interface_name].ipv4.address }}"
- name: Prepare disks for Ceph or LVM
script: "setup_disks.sh {{ disk_type }}"
@ -216,7 +208,6 @@
chdir: "{{ kolla_ansible_src_dir }}"
environment:
ACTION: "{{ scenario }}"
DASHBOARD_URL: "http://{{ kolla_internal_vip_address }}"
when: scenario not in ['ironic', 'scenario_nfv']
- name: Run test-zun.sh script
@ -361,7 +352,6 @@
chdir: "{{ kolla_ansible_src_dir }}"
environment:
ACTION: "{{ scenario }}"
DASHBOARD_URL: "http://{{ kolla_internal_vip_address }}"
- name: Run test-zun.sh script
shell:

View File

@ -10,7 +10,6 @@ keepalived_virtual_router_id: "{{ 250 | random(1) }}"
{% if enable_core_openstack | bool %}
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
enable_haproxy: "{{ 'no' if hostvars | length > 2 else 'yes' }}"
neutron_external_interface: "fake_interface"
openstack_logging_debug: "True"
openstack_service_workers: "1"
@ -19,7 +18,7 @@ openstack_service_workers: "1"
{% if need_build_image and not is_previous_release %}
# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deplying
docker_namespace: "lokolla"
docker_registry: "{{ api_interface_address }}:4000"
docker_registry: "{{ primary_address }}:4000"
openstack_release: "{{ build_image_tag }}"
{% else %}
# use docker hub images
@ -29,7 +28,7 @@ docker_namespace: "kolla"
# will be the source of images during the upgrade.
# NOTE(yoctozepto): this is required here for CI because we run templating
# of docker systemd command only once
docker_custom_option: "--insecure-registry {{ api_interface_address }}:4000"
docker_custom_option: "--insecure-registry {{ primary_address }}:4000"
{% endif %}
{% if not is_previous_release %}
openstack_release: "{{ zuul.branch | basename }}"

View File

@ -74,6 +74,7 @@ function test_instance_boot {
function check_dashboard {
# Query the dashboard, and check that the returned page looks like a login
# page.
DASHBOARD_URL=${OS_AUTH_URL%:*}
output_path=$1
if ! curl --include --location --fail $DASHBOARD_URL > $output_path; then
return 1

View File

@ -19,6 +19,10 @@
vars:
scenario: aio
enable_core_openstack: yes
api_network_prefix: "192.0.2."
api_network_prefix_length: "24"
api_interface_name: vxlan0
kolla_internal_vip_address: "192.0.2.10"
roles:
- zuul: zuul/zuul-jobs