Add container-with-siblings functional test

This adds an initial container-based test.  We use the "siblings"
containers, which thus makes this job roughly equate to the extant
non-container "-src" jobs that install dependencies from source (a
follow-on If510238c6ab2b8f6570848f76e84383925c73d87 will add jobs
based on released dependencies).

Change-Id: I0b8c309fe3284a2824a38d343fca168441f20471
This commit is contained in:
Ian Wienand 2019-11-08 13:23:48 +11:00
parent 2d9958f738
commit ceb6e2bbdb
8 changed files with 568 additions and 1 deletions

View File

@ -151,6 +151,8 @@
name: nodepool-build-image
parent: opendev-build-docker-image
description: Build Docker images.
dependencies:
- opendev-buildset-registry
vars: &nodepool_image_vars
docker_images:
- context: .
@ -186,8 +188,10 @@
# NOTE(ianw) : kept separate as these aren't intended to be released
- job:
name: nodepool-build-image-siblings
dependencies:
- opendev-buildset-registry
parent: opendev-build-docker-image
description: Build docker images, with required projects as sibling from source
description: Build container images, with required projects as sibling from source
required-projects:
- openstack/diskimage-builder
- openstack/openstacksdk
@ -209,9 +213,66 @@
- opendev.org/openstack/openstacksdk
- opendev.org/openstack/diskimage-builder
- job:
name: nodepool-functional-container-openstack-base
abstract: true
description: |
Test Nodepool containers with an OpenStack.
This is similar to nodepool-functional-openstack-base, but
uses containers to deploy nodepool.
Note this is an abstract job and does not define an image to
build. If you wish to test released dependencies, you should
inherit from this job and define nodepool_diskimage. If you
wish to test against dependencies installed from source, you
should inherit from
nodepool-functional-container-openstack-siblings-base.
timeout: 5400
required-projects:
- zuul/nodepool
pre-run: playbooks/nodepool-functional-container-openstack/pre.yaml
run: playbooks/nodepool-functional-container-openstack/run.yaml
post-run: playbooks/nodepool-functional-container-openstack/post.yaml
vars:
zuul_copy_output:
/var/log/nodepool: logs
- job:
name: nodepool-functional-container-openstack-siblings-base
abstract: true
description: |
Test Nodepool containers and OpenStack, with some projects from source
Note, this job does not define an image to build. It should be
inherited from and nodepool_diskimage defined.
parent: nodepool-functional-container-openstack-base
dependencies:
- nodepool-build-image-siblings
required-projects:
- name: opendev/glean # note, installed by dib
- name: zuul/nodepool
- job:
name: nodepool-functional-container-openstack-siblings
description: |
Test Nodepool containers and OpenStack, with some projects from source
This builds, uploads and boots a CentOS 7 image into a
OpenStack cloud.
parent: nodepool-functional-container-openstack-siblings-base
vars:
nodepool_diskimage:
base_element: centos-minimal
release: 7
mirror: "http://{{ zuul_site_mirror_fqdn }}/centos"
env-vars:
DIB_SIMPLE_INIT_NETWORKMANAGER: '1'
- project:
check:
jobs:
- opendev-buildset-registry
- nodepool-build-image
- nodepool-build-image-siblings
- zuul-tox-docs
@ -227,6 +288,7 @@
- nodepool-functional-openstack-src:
vars:
nodepool_debug: true
- nodepool-functional-container-openstack-siblings
- nodepool-functional-k8s
- nodepool-functional-openshift
- zuul-quick-start:

View File

@ -0,0 +1,151 @@
#!/bin/bash -ex
LOGDIR=/home/zuul/zuul-output/logs
# Set to indiciate an error return
RETURN=0
FAILURE_REASON=""
NODEPOOL="docker exec nodepool_nodepool-launcher_1 nodepool"
cat > /tmp/ssh_wrapper <<EOF
#!/bin/bash -ex
sudo -H -u zuul ssh -o StrictHostKeyChecking=no -i $HOME/.ssh/id_nodepool root@\$@
EOF
sudo chmod 0755 /tmp/ssh_wrapper
function sshintonode {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
/tmp/ssh_wrapper $node ls /
# Check that the root partition grew on boot; it should be a 5GiB
# partition minus some space for the boot partition. However
# emperical evidence suggests there is some modulo maths going on,
# (possibly with alignment?) that means we can vary up to even
# 64MiB. Thus we choose an expected value that gives us enough
# slop to avoid false matches, but still indicates we resized up.
root_size=$(/tmp/ssh_wrapper $node -- lsblk -rbno SIZE /dev/vda1)
expected_root_size=$(( 5000000000 ))
if [[ $root_size -lt $expected_root_size ]]; then
echo "*** Root device does not appear to have grown: $root_size"
FAILURE_REASON="Root partition of $name does not appear to have grown: $root_size < $expected_root_size"
RETURN=1
fi
# Check we saw metadata deployed to the config-drive
/tmp/ssh_wrapper $node \
"dd status=none if=/dev/sr0 | tr -cd '[:print:]' | grep -q nodepool_devstack"
if [[ $? -ne 0 ]]; then
echo "*** Failed to find metadata in config-drive"
FAILURE_REASON="Failed to find meta-data in config-drive for $node"
RETURN=1
fi
}
function showserver {
name=$1
state='ready'
node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/devstack/openrc admin admin
nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}
function checknm {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
nm_output=$(/tmp/ssh_wrapper $node -- nmcli c)
# virtio device is eth0 on older, ens3 on newer
if [[ ! ${nm_output} =~ (eth0|ens3) ]]; then
echo "*** Failed to find interface in NetworkManager connections"
/tmp/ssh_wrapper $node -- nmcli c
/tmp/ssh_wrapper $node -- nmcli device
FAILURE_REASON="Failed to find interface in NetworkManager connections"
RETURN=1
fi
}
function waitforimage {
local name=$1
local state='ready'
local builds
while ! $NODEPOOL image-list | grep $name | grep $state; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
builds=$(ls -l /var/log/nodepool/builds/ | grep $name | wc -l)
if [[ ${builds} -ge 4 ]]; then
echo "*** Build of $name failed at least 3 times, aborting"
exit 1
fi
sleep 10
done
}
function waitfornode {
name=$1
state='ready'
while ! $NODEPOOL list | grep $name | grep $state | grep "unlocked"; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
sleep 10
done
}
# check that image built
waitforimage test-image
# check image was bootable
waitfornode test-image
# check ssh for root user
sshintonode test-image
# networkmanager check
# TODO(jeblair): This should not run in all cases; in fact, most of
# this checking should move into the dib repo
#checknm test-image
# userdata check
showserver test-image
set -o errexit
# Show the built nodes
$NODEPOOL list
# Try to delete the nodes that were just built
$NODEPOOL delete --now 0000000000
# show the deleted nodes (and their replacements may be building)
$NODEPOOL list
if [[ -n "${FAILURE_REASON}" ]]; then
echo "${FAILURE_REASON}"
fi
exit $RETURN

View File

@ -0,0 +1,64 @@
- hosts: all
tasks:
- name: Copy nodepool log files
synchronize:
src: /var/log/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy nodepool config files
synchronize:
src: /etc/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy instance console logs
become: true
synchronize:
src: /opt/stack/data/nova/instances
dest: '{{ zuul.executor.log_root }}'
mode: pull
rsync_opts:
- "--include=*/"
- "--include=console.log"
- "--exclude=*"
- name: Export legacy syslog.txt
become: true
shell:
# The journal contains everything running under systemd, we'll
# build an old school version of the syslog with just the
# kernel and sudo messages.
cmd: |
journalctl \
-t kernel \
-t sudo \
--no-pager \
| gzip - > {{ ansible_user_dir }}/syslog.txt.gz
- name: Copy syslog
become: True
synchronize:
src: "{{ ansible_user_dir }}/syslog.txt.gz"
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: List containers
command: "docker ps -a --format '{{ '{{ .Names }}' }}'"
register: docker_containers
ignore_errors: true
- name: Create container log dir
file:
path: "{{ ansible_user_dir }}/docker"
state: directory
- name: Save container logs
loop: "{{ docker_containers.stdout_lines | default([]) }}"
shell: "docker logs {{ item }} &> {{ ansible_user_dir }}/docker/{{ item }}.txt"
args:
executable: /bin/bash
- name: Copy docker logs
become: True
synchronize:
src: '{{ ansible_user_dir }}/docker'
dest: '{{ zuul.executor.log_root }}'
mode: pull

View File

@ -0,0 +1,24 @@
- hosts: all
roles:
# for zookeeper, etc. on host
- role: bindep
bindep_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- role: test-setup
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- install-docker
# Note: keep after install-docker
- use-buildset-registry
- install-devstack
tasks:
- name: Install docker-compose dependencies
package:
name:
- docker-compose
become: yes
- name: Create a local user that matches the container UID
user:
name: nodepool
comment: nodepool
uid: 10001
become: yes

View File

@ -0,0 +1,114 @@
- hosts: all
vars:
nodepool_config_dir: "/etc/nodepool"
nodepool_log_dir: "/var/log/nodepool"
NODEPOOL_KEY: "$HOME/.ssh/id_nodepool"
NODEPOOL_KEY_NAME: "root"
NODEPOOL_PUBKEY: "$HOME/.ssh/id_nodepool.pub"
NODEPOOL_INSTALL: "$HOME/nodepool-venv"
NODEPOOL_CACHE_GET_PIP: "/opt/stack/cache/files/get-pip.py"
NODEPOOL_CONFIG: "{{ nodepool_config_dir }}/nodepool.yaml"
NODEPOOL_DIB_BASE_PATH: "/opt/dib"
tasks:
- name: Write clouds.yaml
include_tasks: write_clouds_yaml.yaml
- name: Create nodepool flavors
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
nova flavor-create nodepool-512 64 512 5 1
nova flavor-create nodepool-1024 128 1024 5 1
- name: Create security groups
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol tcp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol udp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
- name: Create unmanaged VM
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack network list
cirros_image=$(openstack --os-project-name demo --os-username demo image list | grep cirros | awk '{print $4}' | head -n1)
openstack --os-project-name demo --os-username demo server create --flavor cirros256 --image $cirros_image unmanaged-vm --network public
- name: Create nodepool SSH keypair
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
ssh-keygen -f {{ NODEPOOL_KEY }} -P ""
nova --os-project-name demo --os-username demo keypair-add --pub-key {{ NODEPOOL_PUBKEY }} {{ NODEPOOL_KEY_NAME }}
- name: Write nodepool elements
args:
executable: /bin/bash
shell:
cmd: |
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d
cat > /tmp/40-nodepool-setup <<EOF
sudo mkdir -p /etc/nodepool
# Make it world writeable so nodepool can write here later.
sudo chmod 777 /etc/nodepool
EOF
cat > /tmp/50-apt-allow-unauthenticated <<EOF
if [ -d "\$TARGET_ROOT/etc/apt/apt.conf.d" ]; then
echo "APT::Get::AllowUnauthenticated \"true\";" | sudo tee \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
echo "Acquire::AllowInsecureRepositories \"true\";" | sudo tee -a \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
fi
EOF
sudo mv /tmp/40-nodepool-setup \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo mv /tmp/50-apt-allow-unauthenticated \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/images
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/tmp
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/cache
sudo chown -R nodepool:nodepool {{ NODEPOOL_DIB_BASE_PATH }}
- name: Write nodepool config
become: true
template:
src: nodepool.yaml.j2
dest: "{{ NODEPOOL_CONFIG }}"
- name: Create nodepool runtime dirs
become: true
file:
path: '{{ item }}'
state: directory
owner: 'nodepool'
group: 'nodepool'
loop:
- '{{ nodepool_log_dir }}'
- name: Write docker-compose
template:
src: docker-compose.yaml.j2
dest: /etc/nodepool/docker-compose.yaml
mode: 0600
- name: Run docker compose pull
shell:
cmd: docker-compose pull
chdir: /etc/nodepool
- name: Run docker compose up
shell:
cmd: docker-compose up -d --timeout 60
chdir: /etc/nodepool
- name: Cleanup unused images
shell:
cmd: docker image prune -f
- name: Check nodepool functionality
command: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}/playbooks/nodepool-functional-container-openstack/check.sh"

View File

@ -0,0 +1,39 @@
version: '3.0'
services:
nodepool-builder:
image: zuul/nodepool-builder:siblings
network_mode: host
restart: always
privileged: true
environment:
- DEBUG=1
volumes:
# cloud config
- /etc/openstack:/etc/openstack:ro
# nodepool config
- /etc/nodepool:/etc/nodepool:ro
# large dib storage area
- /opt/dib:/opt/dib
# for dib to get source for glean installs, etc.
- /home/zuul:/home/zuul
# logs (builder + dib build logs under /build)
- /var/log/nodepool:/var/log/nodepool
nodepool-launcher:
image: zuul/nodepool-launcher:siblings
network_mode: host
restart: always
environment:
- DEBUG=1
volumes:
# cloud config
- /etc/openstack:/etc/openstack:ro
# nodepool config
- /etc/nodepool:/etc/nodepool:ro
# logs (builder + dib build logs under /build)
- /var/log/nodepool:/var/log/nodepool

View File

@ -0,0 +1,83 @@
elements-dir: {{ NODEPOOL_CONFIG | dirname }}/elements
images-dir: {{ NODEPOOL_DIB_BASE_PATH }}/images
zookeeper-servers:
- host: localhost
port: 2181
labels:
- name: test-image
min-ready: 1
providers:
- name: devstack
region-name: RegionOne
cloud: devstack
# Long boot timeout to deal with potentially nested virt.
boot-timeout: 600
launch-timeout: 900
rate: 0.25
diskimages:
- name: test-image
config-drive: true
pools:
- name: main
max-servers: 5
networks:
- private
labels:
- name: test-image
diskimage: test-image
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: {{ NODEPOOL_KEY_NAME }}
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages:
- name: test-image
rebuild-age: 86400
elements:
- {{ nodepool_diskimage.base_element }}
{% if 'extra_elements' in nodepool_diskimage %}
{% for item in nodepool_diskimage.extra_elements %}
- {{ item }}
{% endfor %}
{% endif %}
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
- journal-to-console
release: '{{ nodepool_diskimage.release }}'
env-vars:
TMPDIR: '{{ NODEPOOL_DIB_BASE_PATH }}/tmp'
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: '{{ NODEPOOL_DIB_BASE_PATH }}/cache'
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEBIAN_COMPONENTS: 'main,universe'
DIB_DEV_USER_AUTHORIZED_KEYS: '{{ NODEPOOL_PUBKEY }}'
{% if 'mirror' in nodepool_diskimage %}
DIB_DISTRIBUTION_MIRROR: '{{ nodepool_diskimage.mirror }}'
{% endif %}
DIB_DEBOOTSTRAP_EXTRA_ARGS: '--no-check-gpg'
# TODO(corvus): cached $DIB_GET_PIP
{% if zuul.projects.get('opendev.org/opendev/glean', {}).get('required', False) %}
DIB_INSTALLTYPE_simple_init: 'repo'
DIB_REPOLOCATION_glean: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/opendev/glean'].src_dir }}"
DIB_REPOREF_glean: "{{ zuul.projects['opendev.org/opendev/glean'].checkout }}"
{% endif %}
{% for k, v in nodepool_diskimage.get('env-vars', {}).items() %}
{{ k }}: "{{ v }}"
{% endfor %}

View File

@ -0,0 +1,30 @@
- name: Load clouds.yaml
become: true
slurp:
path: /etc/openstack/clouds.yaml
register: clouds_yaml
- name: Parse clouds.yaml
set_fact:
clouds_yaml: "{{ clouds_yaml.content | b64decode | from_yaml }}"
- name: Update clouds.yaml
vars:
new_config:
# cache:
# max_age: 3600
# class: dogpile.cache.dbm
# arguments:
# filename: .cache/openstack/shade.dbm
# expiration:
# floating-ip: 5
# server: 5
# port: 5
# TODO(pabelanger): Remove once glean fully supports IPv6.
client:
force_ipv4: True
set_fact:
clouds_yaml: "{{ clouds_yaml | combine(new_config) }}"
- name: Save clouds.yaml
become: true
copy:
content: "{{ clouds_yaml | to_nice_yaml }}"
dest: /etc/openstack/clouds.yaml