Switch functional testing to a devstack consumer job

Rather than implement functional OpenStack testing as a devstack
plugin, use devstack as a black-box OpenStack.  This allows us
to move nodepool to a new Zuul tenant without depending on the
devstack job and all its associated projects.  It will also let
us replace devstack with something potentially lighter weight in
the future.

The job permits customized settings for what images to build, so
that the DIB project can inherit from it and make child jobs for
each of the operating systems it cares about.

Change-Id: Ie6bc891cebd32b3d1bb646109f13ac2fd383bba5
This commit is contained in:
James E. Blair 2019-06-12 14:45:50 -07:00
parent 75adc01f0a
commit 007f7e0b08
7 changed files with 523 additions and 4 deletions

View File

@ -14,6 +14,90 @@
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul'].src_dir }}"
tox_envlist: nodepool
- job:
name: nodepool-functional-openstack-base
description: |
Test Nodepool with an OpenStack.
Sets up a working OpenStack system, builds an image, launches a
node, and verifies SSH access to the node.
.. zuul:jobvar:: nodepool_extra_packages
:type: list
A list of extra packages to install at the start of the job
(i.e., anything needed for the DIB build).
.. zuul:jobvar:: nodepool_diskimage
:type: dict
.. zuul:jobvar:: base_element
The base element to use when building the disk image.
.. zuul:jobvar:: release
The release number or name to use with the base element.
.. zuul:jobvar:: mirror
The URL for the package mirror to use when building the
disk image. This is optional.
.. zuul:jobvar:: env-vars
:type: dict
A dictionary of environment variables and values to add
to the DIB environment when building the image.
timeout: 5400
required-projects:
- zuul/nodepool
pre-run: playbooks/nodepool-functional-openstack/pre.yaml
run: playbooks/nodepool-functional-openstack/run.yaml
post-run: playbooks/nodepool-functional-openstack/post.yaml
vars:
zuul_copy_output:
/var/log/nodepool: logs
- job:
name: nodepool-functional-openstack
description: Test Nodepool with an OpenStack
parent: nodepool-functional-openstack-base
vars: &nodepool_diskimage_vars
nodepool_extra_packages:
- rpm
- yum-utils
nodepool_diskimage:
base_element: centos-minimal
release: 7
mirror: "http://{{ zuul_site_mirror_fqdn }}/centos"
env-vars:
DIB_SIMPLE_INIT_NETWORKMANAGER: '1'
- job:
name: nodepool-functional-openstack-src-base
description: Test Nodepool and OpenStack, with some projects from source
parent: nodepool-functional-openstack-base
required-projects:
- name: github.com/sqlalchemy/dogpile.cache
- name: opendev/glean
- name: zuul/nodepool
- name: openstack/diskimage-builder
- name: openstack/openstacksdk
- job:
name: nodepool-functional-openstack-src
description: Test Nodepool and OpenStack, with some projects from source
parent: nodepool-functional-openstack
required-projects:
- name: github.com/sqlalchemy/dogpile.cache
- name: opendev/glean
- name: zuul/nodepool
- name: openstack/diskimage-builder
- name: openstack/openstacksdk
vars: *nodepool_diskimage_vars
# NOTE(ianw): this job doesn't really do anything unless you set
# "NODEPOOL_PAUSE_something_DIB: false" to unpause one or more builds.
- job:
@ -219,10 +303,8 @@
- tox-py36
- nodepool-zuul-functional:
voting: false
- nodepool-functional-py35:
voting: false
- nodepool-functional-py35-src:
voting: false
- nodepool-functional-openstack
- nodepool-functional-openstack-src
- nodepool-functional-k8s
- nodepool-functional-openshift
- zuul-quick-start:

View File

@ -0,0 +1,153 @@
#!/bin/bash -ex
LOGDIR=/home/zuul/zuul-output/logs
# Set to indiciate an error return
RETURN=0
FAILURE_REASON=""
NODEPOOL_INSTALL=${NODEPOOL_INSTALL:-~/.venv}
NODEPOOL_CONFIG=${NODEPOOL_CONFIG:-/etc/nodepool/nodepool.yaml}
NODEPOOL="$NODEPOOL_INSTALL/bin/nodepool -c $NODEPOOL_CONFIG"
cat > /tmp/ssh_wrapper <<EOF
#!/bin/bash -ex
sudo -H -u zuul ssh -o StrictHostKeyChecking=no -i $HOME/.ssh/id_nodepool root@\$@
EOF
sudo chmod 0755 /tmp/ssh_wrapper
function sshintonode {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
/tmp/ssh_wrapper $node ls /
# Check that the root partition grew on boot; it should be a 5GiB
# partition minus some space for the boot partition. However
# emperical evidence suggests there is some modulo maths going on,
# (possibly with alignment?) that means we can vary up to even
# 64MiB. Thus we choose an expected value that gives us enough
# slop to avoid false matches, but still indicates we resized up.
root_size=$(/tmp/ssh_wrapper $node -- lsblk -rbno SIZE /dev/vda1)
expected_root_size=$(( 5000000000 ))
if [[ $root_size -lt $expected_root_size ]]; then
echo "*** Root device does not appear to have grown: $root_size"
FAILURE_REASON="Root partition of $name does not appear to have grown: $root_size < $expected_root_size"
RETURN=1
fi
# Check we saw metadata deployed to the config-drive
/tmp/ssh_wrapper $node \
"dd status=none if=/dev/sr0 | tr -cd '[:print:]' | grep -q nodepool_devstack"
if [[ $? -ne 0 ]]; then
echo "*** Failed to find metadata in config-drive"
FAILURE_REASON="Failed to find meta-data in config-drive for $node"
RETURN=1
fi
}
function showserver {
name=$1
state='ready'
node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/devstack/openrc admin admin
nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}
function checknm {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
nm_output=$(/tmp/ssh_wrapper $node -- nmcli c)
# virtio device is eth0 on older, ens3 on newer
if [[ ! ${nm_output} =~ (eth0|ens3) ]]; then
echo "*** Failed to find interface in NetworkManager connections"
/tmp/ssh_wrapper $node -- nmcli c
/tmp/ssh_wrapper $node -- nmcli device
FAILURE_REASON="Failed to find interface in NetworkManager connections"
RETURN=1
fi
}
function waitforimage {
local name=$1
local state='ready'
local builds
while ! $NODEPOOL image-list | grep $name | grep $state; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
builds=$(ls -l /var/log/nodepool/builds/ | grep $name | wc -l)
if [[ ${builds} -ge 4 ]]; then
echo "*** Build of $name failed at least 3 times, aborting"
exit 1
fi
sleep 10
done
}
function waitfornode {
name=$1
state='ready'
while ! $NODEPOOL list | grep $name | grep $state | grep "unlocked"; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
sleep 10
done
}
# check that image built
waitforimage test-image
# check image was bootable
waitfornode test-image
# check ssh for root user
sshintonode test-image
# networkmanager check
# TODO(jeblair): This should not run in all cases; in fact, most of
# this checking should move into the dib repo
#checknm test-image
# userdata check
showserver test-image
set -o errexit
# Show the built nodes
$NODEPOOL list
# Try to delete the nodes that were just built
$NODEPOOL delete --now 0000000000
# show the deleted nodes (and their replacements may be building)
$NODEPOOL list
if [[ -n "${FAILURE_REASON}" ]]; then
echo "${FAILURE_REASON}"
fi
exit $RETURN

View File

@ -0,0 +1,40 @@
- hosts: all
tasks:
- name: Copy nodepool log files
synchronize:
src: /var/log/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy nodepool config files
synchronize:
src: /etc/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy instance console logs
become: true
synchronize:
src: /opt/stack/data/nova/instances
dest: '{{ zuul.executor.log_root }}'
mode: pull
rsync_opts:
- "--include=*/"
- "--include=console.log"
- "--exclude=*"
- name: Export legacy syslog.txt
become: true
shell:
# The journal contains everything running under systemd, we'll
# build an old school version of the syslog with just the
# kernel and sudo messages.
cmd: |
journalctl \
-t kernel \
-t sudo \
--no-pager \
| gzip - > {{ ansible_user_dir }}/syslog.txt.gz
- name: Copy syslog
become: True
synchronize:
src: "{{ ansible_user_dir }}/syslog.txt.gz"
dest: '{{ zuul.executor.log_root }}'
mode: pull

View File

@ -0,0 +1,21 @@
- hosts: all
roles:
- role: bindep
bindep_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- role: test-setup
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- install-devstack
tasks:
# Create the virtualenv so we can control the python version
- name: Create virtualenv
pip:
name: extras
virtualenv: "{{ ansible_user_dir }}/.venv"
virtualenv_python: python3
- name: Install python project from source
include_role:
name: install-if-python
vars:
zuul_work_dir: "{{ item.src_dir }}"
error_on_failure: true
loop: "{{ zuul.projects.values() | selectattr('required') | list }}"

View File

@ -0,0 +1,116 @@
- hosts: all
vars:
NODEPOOL_KEY: "$HOME/.ssh/id_nodepool"
NODEPOOL_KEY_NAME: "root"
NODEPOOL_PUBKEY: "$HOME/.ssh/id_nodepool.pub"
NODEPOOL_INSTALL: "$HOME/nodepool-venv"
NODEPOOL_CACHE_GET_PIP: "/opt/stack/cache/files/get-pip.py"
NODEPOOL_CONFIG: "/etc/nodepool/nodepool.yaml"
NODEPOOL_DIB_BASE_PATH: "/opt/dib"
tasks:
- name: Write clouds.yaml
include_tasks: write_clouds_yaml.yaml
- name: Install software for DIB
become: true
package:
name:
- qemu-utils
- kpartx
- name: Install extra software for DIB
when: nodepool_extra_packages is defined
become: true
package:
name: "{{ nodepool_extra_packages }}"
- name: Create cache directory
file:
path: .cache/openstack
state: directory
- name: Create nodepool flavors
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
nova flavor-create nodepool-512 64 512 5 1
nova flavor-create nodepool-1024 128 1024 5 1
- name: Create security groups
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol tcp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol udp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
- name: Create unmanaged VM
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack network list
cirros_image=$(openstack --os-project-name demo --os-username demo image list | grep cirros | awk '{print $4}' | head -n1)
openstack --os-project-name demo --os-username demo server create --flavor cirros256 --image $cirros_image unmanaged-vm --network public
- name: Create nodepool SSH keypair
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
ssh-keygen -f {{ NODEPOOL_KEY }} -P ""
nova --os-project-name demo --os-username demo keypair-add --pub-key {{ NODEPOOL_PUBKEY }} {{ NODEPOOL_KEY_NAME }}
- name: Write nodepool elements
args:
executable: /bin/bash
shell:
cmd: |
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d
cat > /tmp/40-nodepool-setup <<EOF
sudo mkdir -p /etc/nodepool
# Make it world writeable so nodepool can write here later.
sudo chmod 777 /etc/nodepool
EOF
cat > /tmp/50-apt-allow-unauthenticated <<EOF
if [ -d "\$TARGET_ROOT/etc/apt/apt.conf.d" ]; then
echo "APT::Get::AllowUnauthenticated \"true\";" | sudo tee \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
echo "Acquire::AllowInsecureRepositories \"true\";" | sudo tee -a \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
fi
EOF
sudo mv /tmp/40-nodepool-setup \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo mv /tmp/50-apt-allow-unauthenticated \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/images
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/tmp
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/cache
sudo chown -R {{ ansible_user }}:{{ ansible_user }} {{ NODEPOOL_DIB_BASE_PATH }}
sudo mkdir -p /var/log/nodepool
sudo chown -R {{ ansible_user }}:{{ ansible_user }} /var/log/nodepool
sudo mkdir -p /var/run/nodepool
sudo chown -R {{ ansible_user }}:{{ ansible_user }} /var/run/nodepool
- name: Write nodepool config
become: true
template:
src: nodepool.yaml.j2
dest: "{{ NODEPOOL_CONFIG }}"
- name: Validate nodepool config
command: "./.venv/bin/nodepool -c {{ NODEPOOL_CONFIG }} config-validate"
# FIXME: don't use activate once this merges:
# https://review.opendev.org/666177 Use the DIB installed in the virtualenv if running there
- name: Start nodepool builder
shell: |
. ./.venv/bin/activate
nodepool-builder -c {{ NODEPOOL_CONFIG }}
- name: Start nodepool launcher
command: "./.venv/bin/nodepool-launcher -c {{ NODEPOOL_CONFIG }}"
- name: Check nodepool functionality
command: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}/playbooks/nodepool-functional-openstack/check.sh"

View File

@ -0,0 +1,77 @@
elements-dir: {{ NODEPOOL_CONFIG | dirname }}/elements
images-dir: {{ NODEPOOL_DIB_BASE_PATH }}/images
zookeeper-servers:
- host: localhost
port: 2181
labels:
- name: test-image
min-ready: 1
providers:
- name: devstack
region-name: RegionOne
cloud: devstack
# Long boot timeout to deal with potentially nested virt.
boot-timeout: 600
launch-timeout: 900
rate: 0.25
diskimages:
- name: test-image
config-drive: true
pools:
- name: main
max-servers: 5
networks:
- private
labels:
- name: test-image
diskimage: test-image
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: {{ NODEPOOL_KEY_NAME }}
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages:
- name: test-image
rebuild-age: 86400
elements:
- {{ nodepool_diskimage.base_element }}
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: '{{ nodepool_diskimage.release }}'
env-vars:
TMPDIR: '{{ NODEPOOL_DIB_BASE_PATH }}/tmp'
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: '{{ NODEPOOL_DIB_BASE_PATH }}/cache'
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEBIAN_COMPONENTS: 'main,universe'
DIB_DEV_USER_AUTHORIZED_KEYS: '{{ NODEPOOL_PUBKEY }}'
{% if 'mirror' in nodepool_diskimage %}
DIB_DISTRIBUTION_MIRROR: '{{ nodepool_diskimage.mirror }}'
{% endif %}
DIB_DEBOOTSTRAP_EXTRA_ARGS: '--no-check-gpg'
# TODO(corvus): cached $DIB_GET_PIP
{% if zuul.projects.get('opendev.org/opendev/glean', {}).get('required', False) %}
DIB_INSTALLTYPE_simple_init: 'repo'
DIB_REPOLOCATION_glean: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/opendev/glean'].src_dir }}"
DIB_REPOREF_glean: "{{ zuul.projects['opendev.org/opendev/glean'].checkout }}"
{% endif %}
{% for k, v in nodepool_diskimage.get('env-vars', {}).items() %}
{{ k }}: "{{ v }}"
{% endfor %}

View File

@ -0,0 +1,30 @@
- name: Load clouds.yaml
become: true
slurp:
path: /etc/openstack/clouds.yaml
register: clouds_yaml
- name: Parse clouds.yaml
set_fact:
clouds_yaml: "{{ clouds_yaml.content | b64decode | from_yaml }}"
- name: Update clouds.yaml
vars:
new_config:
# cache:
# max_age: 3600
# class: dogpile.cache.dbm
# arguments:
# filename: .cache/openstack/shade.dbm
# expiration:
# floating-ip: 5
# server: 5
# port: 5
# TODO(pabelanger): Remove once glean fully supports IPv6.
client:
force_ipv4: True
set_fact:
clouds_yaml: "{{ clouds_yaml | combine(new_config) }}"
- name: Save clouds.yaml
become: true
copy:
content: "{{ clouds_yaml | to_nice_yaml }}"
dest: /etc/openstack/clouds.yaml