Fix gate for Focal issues

This fixes libffi bindep installation on Ubuntu Focal

The Python 3.6 tox tests are switched back to bionic, as Focal nodes
don't have Python 3.6.

Additionally, we squashed the following change into this to unblock
the gate:

Remove nodepool-functional-openstack

This test installs devstack and then nodepool on a bionic host (in
contrast to the -containers variant that builds a container from the
Dockerfile and installs/runs that).

Firstly, devstack support for Bionic is going away soon so we have to
update this.  We don't really need to test if we run ontop of a plain
Bionic/Focal host.  We have tox jobs testing various Python versions
for compatability, so running on here isn't providing any extra
coverage.  DIB can't build many things on plain Bionic/Focal due to
updates or incompatabilities in "alien" versions of RPM, Zypper,
debootstrap, etc.  The container incorporates fixes as required and is
where anyone is going to put attention if there are build issues;
hence we're not testing anything useful for image building paths.
Finally we also have nodepool-zuul-functional, which brings up Zuul
and nodepool on a plain Bionic host anyway.  Per the prior reasons,
that covers basically the same thing this is providing anyway.

openstacksdk is using this on older branches, but is switched to using
the container job in the dependent changes.

Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/788414
Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/788416
Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/788418
Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/788420
Depends-On: https://review.opendev.org/c/openstack/diskimage-builder/+/788404

(was : Change-Id: I87318e9101b982f3cafcf82439fdcb68767b602b)

Change-Id: Ifc74e6958f64be70386cdb3e05768d94db75c3bb
This commit is contained in:
Ian Wienand 2021-05-06 17:22:56 +10:00
parent 11029a788a
commit 26bad7f853
8 changed files with 5 additions and 475 deletions

View File

@ -14,102 +14,6 @@
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/zuul'].src_dir }}"
tox_envlist: nodepool
- job:
name: nodepool-functional-openstack-base
description: |
Test Nodepool with an OpenStack.
Sets up a working OpenStack system, builds an image, launches a
node, and verifies SSH access to the node.
.. zuul:jobvar:: nodepool_extra_packages
:type: list
A list of extra packages to install at the start of the job
(i.e., anything needed for the DIB build).
.. zuul:jobvar:: nodepool_diskimage
:type: dict
.. zuul:jobvar:: base_element
The base element to use when building the disk image.
.. zuul:jobvar:: elements-dir
:type: string
Path to directory with external elements
.. zuul:jobvar:: extra_elements
:type: list
:default: []
List of extra DIB elements to be added to image
configuration.
.. zuul:jobvar:: release
The release number or name to use with the base element.
.. zuul:jobvar:: mirror
The URL for the package mirror to use when building the
disk image. This is optional.
.. zuul:jobvar:: env-vars
:type: dict
A dictionary of environment variables and values to add
to the DIB environment when building the image.
.. zuul:jobvar:: nodepool_debug
Boolean flag to enable debug logging for the nodepool builder
and launcher services. By default we log at INFO level and
higher. This enables DEBUG and higher. This is optional.
timeout: 5400
required-projects:
- zuul/nodepool
pre-run: playbooks/nodepool-functional-openstack/pre.yaml
run: playbooks/nodepool-functional-openstack/run.yaml
post-run: playbooks/nodepool-functional-openstack/post.yaml
vars:
zuul_copy_output:
/var/log/nodepool: logs
- job:
name: nodepool-functional-openstack
description: Test Nodepool with an OpenStack
parent: nodepool-functional-openstack-base
vars: &nodepool_diskimage_vars
nodepool_extra_packages:
- rpm
- yum-utils
nodepool_diskimage:
base_element: centos-minimal
release: 8
mirror: "http://{{ zuul_site_mirror_fqdn }}/centos"
env-vars:
DIB_SIMPLE_INIT_NETWORKMANAGER: '1'
- job:
name: nodepool-functional-openstack-src-base
description: Test Nodepool and OpenStack, with some projects from source
parent: nodepool-functional-openstack-base
required-projects:
- name: github.com/sqlalchemy/dogpile.cache
- name: opendev/glean
- name: zuul/nodepool
- name: openstack/diskimage-builder
- name: openstack/openstacksdk
- job:
name: nodepool-functional-openstack-src
description: Test Nodepool and OpenStack, with some projects from source
parent: nodepool-functional-openstack-src-base
vars: *nodepool_diskimage_vars
- job:
description: |
Test that nodepool works with kubernetes.
@ -256,9 +160,6 @@
description: |
Test Nodepool containers with an OpenStack.
This is similar to nodepool-functional-openstack-base, but
uses containers to deploy nodepool.
Note this is an abstract job and does not define an image to
build and test, and you should not inherit directly from this
job.
@ -352,6 +253,7 @@
description: |
Nodepool unit tests with ZooKeeper running
parent: tox-py36
nodeset: ubuntu-bionic
pre-run: playbooks/nodepool-tox/pre.yaml
vars: &nodepool_tox_vars
tox_environment:
@ -381,12 +283,6 @@
- nodepool-tox-py38
- nodepool-zuul-functional:
voting: false
- nodepool-functional-openstack:
vars:
nodepool_debug: true
- nodepool-functional-openstack-src:
vars:
nodepool_debug: true
- nodepool-functional-container-openstack-release
- nodepool-functional-container-openstack-siblings
- nodepool-functional-k8s
@ -402,8 +298,6 @@
- tox-pep8
- nodepool-tox-py36
- nodepool-tox-py38
- nodepool-functional-openstack
- nodepool-functional-openstack-src
- nodepool-functional-k8s
- nodepool-functional-openshift
- zuul-quick-start:

View File

@ -3,10 +3,11 @@
gcc [compile test]
libc6-dev [compile test platform:dpkg]
libffi-devel [platform:rpm]
libffi-dev [compile test platform:apk platform:dpkg]
libffi6 [platform:dpkg]
libffi-dev [compile test platform:dpkg platform:apk]
libffi [platform:apk]
libffi7 [platform:ubuntu-focal]
libffi6 [platform:dpkg !platform:ubuntu-focal]
libffi-devel [compile test platform:rpm]
libressl-dev [compile test platform:apk]
libssl-dev [compile test platform:dpkg]
linux-headers [compile test platform:apk]

View File

@ -1,31 +0,0 @@
- hosts: all
tasks:
- name: Copy logs
ignore_errors: yes
block:
- name: Copy nodepool log files
synchronize:
src: /var/log/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy nodepool config files
synchronize:
src: /etc/nodepool
dest: '{{ zuul.executor.log_root }}'
mode: pull
- name: Copy instance console logs
become: true
synchronize:
src: /opt/stack/data/nova/instances
dest: '{{ zuul.executor.log_root }}'
mode: pull
rsync_opts:
- "--include=*/"
- "--include=console.log"
- "--exclude=*"
- name: Copy syslog
become: True
synchronize:
src: "/var/log/syslog"
dest: '{{ zuul.executor.log_root }}'
mode: pull

View File

@ -1,24 +0,0 @@
- hosts: all
roles:
- role: bindep
bindep_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- role: test-setup
zuul_work_dir: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}"
- role: ensure-zookeeper
zookeeper_use_tls: true
- ensure-devstack
- ensure-virtualenv
tasks:
# Create the virtualenv so we can control the python version
- name: Create virtualenv
pip:
name: extras
virtualenv: "{{ ansible_user_dir }}/.venv"
virtualenv_python: python3
- name: Install python project from source
include_role:
name: ensure-if-python
vars:
zuul_work_dir: "{{ item.src_dir }}"
error_on_failure: true
loop: "{{ zuul.projects.values() | selectattr('required') | list }}"

View File

@ -1,145 +0,0 @@
- hosts: all
vars:
nodepool_config_dir: "/etc/nodepool"
nodepool_log_dir: "/var/log/nodepool"
NODEPOOL_KEY: "$HOME/.ssh/id_nodepool"
NODEPOOL_KEY_NAME: "root"
NODEPOOL_PUBKEY: "$HOME/.ssh/id_nodepool.pub"
NODEPOOL_INSTALL: "$HOME/nodepool-venv"
NODEPOOL_CACHE_GET_PIP: "/opt/stack/cache/files/get-pip.py"
NODEPOOL_CONFIG: "{{ nodepool_config_dir }}/nodepool.yaml"
NODEPOOL_DIB_BASE_PATH: "/opt/dib"
launcher_logging_config: "{{ nodepool_config_dir }}/launcher-logging.conf"
launcher_logging_arg: "{{ '-l '+ launcher_logging_config if nodepool_debug is defined and nodepool_debug else '' }}"
builder_logging_config: "{{ nodepool_config_dir }}/builder-logging.conf"
builder_logging_arg: "{{ '-l '+ builder_logging_config if nodepool_debug is defined and nodepool_debug else '' }}"
tasks:
- name: Write clouds.yaml
include_tasks: write_clouds_yaml.yaml
- name: Install software for DIB
become: true
package:
name:
- qemu-utils
- kpartx
- name: Install extra software for DIB
when: nodepool_extra_packages is defined
become: true
package:
name: "{{ nodepool_extra_packages }}"
- name: Create cache directory
file:
path: .cache/openstack
state: directory
- name: Create nodepool flavors
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
nova flavor-create nodepool-512 64 512 5 1
nova flavor-create nodepool-1024 128 1024 5 1
- name: Create security groups
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol tcp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol udp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
- name: Create unmanaged VM
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
openstack network list
cirros_image=$(openstack --os-project-name demo --os-username demo image list | grep cirros | awk '{print $4}' | head -n1)
openstack --os-project-name demo --os-username demo server create --flavor cirros256 --image $cirros_image unmanaged-vm --network public
- name: Create nodepool SSH keypair
args:
executable: /bin/bash
shell: |
source /opt/devstack/openrc admin admin
ssh-keygen -f {{ NODEPOOL_KEY }} -P ""
nova --os-project-name demo --os-username demo keypair-add --pub-key {{ NODEPOOL_PUBKEY }} {{ NODEPOOL_KEY_NAME }}
- name: Write nodepool elements
args:
executable: /bin/bash
shell:
cmd: |
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d
sudo mkdir -p $(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d
cat > /tmp/40-nodepool-setup <<EOF
sudo mkdir -p /etc/nodepool
# Make it world writeable so nodepool can write here later.
sudo chmod 777 /etc/nodepool
EOF
cat > /tmp/50-apt-allow-unauthenticated <<EOF
if [ -d "\$TARGET_ROOT/etc/apt/apt.conf.d" ]; then
echo "APT::Get::AllowUnauthenticated \"true\";" | sudo tee \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
echo "Acquire::AllowInsecureRepositories \"true\";" | sudo tee -a \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
fi
EOF
sudo mv /tmp/40-nodepool-setup \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/install.d/40-nodepool-setup
sudo mv /tmp/50-apt-allow-unauthenticated \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo chmod a+x \
$(dirname {{ NODEPOOL_CONFIG }})/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/images
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/tmp
sudo mkdir -p {{ NODEPOOL_DIB_BASE_PATH }}/cache
sudo chown -R {{ ansible_user }}:{{ ansible_user }} {{ NODEPOOL_DIB_BASE_PATH }}
- name: Write nodepool config
become: true
template:
src: nodepool.yaml.j2
dest: "{{ NODEPOOL_CONFIG }}"
- name: Validate nodepool config
command: "./.venv/bin/nodepool -c {{ NODEPOOL_CONFIG }} config-validate"
- name: Create nodepool runtime dirs
become: true
file:
path: '{{ item }}'
state: directory
owner: '{{ ansible_user }}'
group: '{{ ansible_user }}'
loop:
- /var/run/nodepool
- '{{ nodepool_log_dir }}'
- name: Write builder logging config
become: true
template:
src: logging.conf.j2
dest: "{{ builder_logging_config }}"
vars:
log_file: "{{ nodepool_log_dir }}/nodepool-builder.log"
- name: Write launcher logging config
become: true
template:
src: logging.conf.j2
dest: "{{ launcher_logging_config }}"
vars:
log_file: "{{ nodepool_log_dir }}/nodepool-launcher.log"
# FIXME: don't use activate once this merges:
# https://review.opendev.org/666177 Use the DIB installed in the virtualenv if running there
- name: Start nodepool builder
shell: |
. ./.venv/bin/activate
nodepool-builder {{ builder_logging_arg}} -c {{ NODEPOOL_CONFIG }}
- name: Start nodepool launcher
command: "./.venv/bin/nodepool-launcher {{ launcher_logging_arg }} -c {{ NODEPOOL_CONFIG }}"
- name: Check nodepool functionality
command: "{{ zuul.projects['opendev.org/zuul/nodepool'].src_dir }}/tools/functional-test-check.sh"
environment:
NODEPOOL_FUNCTIONAL_CHECK: 'installed'

View File

@ -1,52 +0,0 @@
[formatters]
keys=simple
[loggers]
keys=root,nodepool,openstack,kazoo,keystoneauth,novaclient
[handlers]
keys=file
[logger_root]
level=WARNING
handlers=file
[logger_nodepool]
level=DEBUG
handlers=file
qualname=nodepool
propagate=0
[logger_openstack]
level=DEBUG
handlers=file
qualname=openstack
propagate=0
[logger_keystoneauth]
level=DEBUG
handlers=file
qualname=keystoneauth
propagate=0
[logger_novaclient]
level=DEBUG
handlers=file
qualname=novaclient
propagate=0
[logger_kazoo]
level=INFO
handlers=file
qualname=kazoo
propagate=0
[handler_file]
level=DEBUG
class=FileHandler
formatter=simple
args=('{{ log_file }}', 'w')
[formatter_simple]
format=%(asctime)s %(levelname)s %(name)s: %(message)s
datefmt=

View File

@ -1,83 +0,0 @@
elements-dir: {{ NODEPOOL_CONFIG | dirname }}/elements
images-dir: {{ NODEPOOL_DIB_BASE_PATH }}/images
zookeeper-servers:
- host: localhost
port: 2281
zookeeper-tls:
ca: /opt/zookeeper/ca/certs/cacert.pem
cert: /opt/zookeeper/ca/certs/client.pem
key: /opt/zookeeper/ca/keys/clientkey.pem
labels:
- name: test-image
min-ready: 1
providers:
- name: devstack
region-name: RegionOne
cloud: devstack
# Long boot timeout to deal with potentially nested virt.
boot-timeout: 600
launch-timeout: 900
rate: 0.25
diskimages:
- name: test-image
config-drive: true
pools:
- name: main
max-servers: 5
networks:
- private
labels:
- name: test-image
diskimage: test-image
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: {{ NODEPOOL_KEY_NAME }}
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages:
- name: test-image
rebuild-age: 86400
elements:
- {{ nodepool_diskimage.base_element }}
{% if 'extra_elements' in nodepool_diskimage %}
{% for item in nodepool_diskimage.extra_elements %}
- {{ item }}
{% endfor %}
{% endif %}
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
- journal-to-console
release: '{{ nodepool_diskimage.release }}'
env-vars:
TMPDIR: '{{ NODEPOOL_DIB_BASE_PATH }}/tmp'
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: '{{ NODEPOOL_DIB_BASE_PATH }}/cache'
DIB_DEV_USER_AUTHORIZED_KEYS: '{{ NODEPOOL_PUBKEY }}'
{% if 'mirror' in nodepool_diskimage %}
DIB_DISTRIBUTION_MIRROR: '{{ nodepool_diskimage.mirror }}'
{% endif %}
{% if zuul.projects.get('opendev.org/opendev/glean', {}).get('required', False) %}
DIB_INSTALLTYPE_simple_init: 'repo'
DIB_REPOLOCATION_glean: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/opendev/glean'].src_dir }}"
DIB_REPOREF_glean: "{{ zuul.projects['opendev.org/opendev/glean'].checkout }}"
{% endif %}
{% for k, v in nodepool_diskimage.get('env-vars', {}).items() %}
{{ k }}: "{{ v }}"
{% endfor %}

View File

@ -1,30 +0,0 @@
- name: Load clouds.yaml
become: true
slurp:
path: /etc/openstack/clouds.yaml
register: clouds_yaml
- name: Parse clouds.yaml
set_fact:
clouds_yaml: "{{ clouds_yaml.content | b64decode | from_yaml }}"
- name: Update clouds.yaml
vars:
new_config:
# cache:
# max_age: 3600
# class: dogpile.cache.dbm
# arguments:
# filename: .cache/openstack/shade.dbm
# expiration:
# floating-ip: 5
# server: 5
# port: 5
# TODO(pabelanger): Remove once glean fully supports IPv6.
client:
force_ipv4: True
set_fact:
clouds_yaml: "{{ clouds_yaml | combine(new_config) }}"
- name: Save clouds.yaml
become: true
copy:
content: "{{ clouds_yaml | to_nice_yaml }}"
dest: /etc/openstack/clouds.yaml