Remove devstack plugin functional test jobs

The dependent commits mark the point at which all external usage
of these jobs has been replaced with the new jobs, so these can
be removed.

Change-Id: Ie51f9c177668190a55563fca944b825253d90f3f
Depends-On: https://review.opendev.org/667225
Depends-On: https://review.opendev.org/667992
Depends-On: https://review.opendev.org/667993
This commit is contained in:
James E. Blair 2019-06-24 09:44:58 -07:00 committed by James E. Blair
parent 007f7e0b08
commit f0b4930ee5
6 changed files with 0 additions and 1332 deletions

View File

@ -98,122 +98,6 @@
- name: openstack/openstacksdk
vars: *nodepool_diskimage_vars
# NOTE(ianw): this job doesn't really do anything unless you set
# "NODEPOOL_PAUSE_something_DIB: false" to unpause one or more builds.
- job:
name: nodepool-functional-base
parent: devstack
# NOTE(ianw): for now, keep these jobs as Xenial only. We will
# add bionic variants, but we want to keep Xenial testing around
# whilst we still run Xenial for upstream nodepool-builders too
nodeset: openstack-single-node-xenial
pre-run: playbooks/nodepool-functional-base/pre.yaml
post-run: playbooks/nodepool-functional-base/post.yaml
timeout: 5400
required-projects:
- zuul/nodepool
vars:
devstack_localrc:
USE_PYTHON3: True
devstack_services:
horizon: false
ceilometer-acentral: false
ceilometer-acompute: false
ceilometer-alarm-evaluator: false
ceilometer-alarm-notifier: false
ceilometer-anotification: false
ceilometer-api: false
ceilometer-collector: false
cinder: false
c-bak: false
c-sch: false
c-api: false
c-vol: false
s-account: false
s-container: false
s-object: false
s-proxy: false
# Nodepool doesn't need vnc access
n-novnc: false
# Nodepool testing uses config drive only
n-api-meta: false
q-meta: false
devstack_plugins:
nodepool: https://opendev.org/zuul/nodepool
zuul_copy_output:
'/var/log/nodepool/builds': 'logs'
- job:
name: nodepool-functional-py35
parent: nodepool-functional-base
description: |
Run nodepool functional tests for py35
run: playbooks/nodepool-functional-py35/run.yaml
vars:
devstack_localrc:
NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB: false
- job:
name: nodepool-functional-py35-src-base
parent: nodepool-functional-base
run: playbooks/nodepool-functional-py35-src/run.yaml
required-projects:
- name: github.com/sqlalchemy/dogpile.cache
- name: opendev/glean
- name: zuul/nodepool
- name: openstack/diskimage-builder
- name: openstack/openstacksdk
- job:
name: nodepool-functional-py35-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB: false
# Distro functional tests. This is used by projects such as
# diskimage-builder and glean as live-boot tests
- job:
name: nodepool-functional-py35-redhat-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_CENTOS_7_DIB: false
NODEPOOL_PAUSE_FEDORA_29_DIB: false
- job:
name: nodepool-functional-py35-suse-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_OPENSUSE_423_DIB: false
NODEPOOL_PAUSE_OPENSUSE_150_DIB: false
NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB: false
- job:
name: nodepool-functional-py35-gentoo-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB: false
- job:
name: nodepool-functional-py35-ubuntu-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB: false
NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB: false
NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB: false
- job:
name: nodepool-functional-py35-debian-src
parent: nodepool-functional-py35-src-base
vars:
devstack_localrc:
NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB: false
NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB: false
- job:
description: |
Test that nodepool works with kubernetes.
@ -326,9 +210,6 @@
promote:
jobs:
- nodepool-promote-image
experimental:
jobs:
- nodepool-functional-py35-debian-src
release:
jobs:
- release-zuul-python

View File

@ -1,7 +0,0 @@
qemu-utils
kpartx
debootstrap
yum-utils
zookeeperd
zypper
gnupg2

View File

@ -1,5 +0,0 @@
qemu-img
kpartx
debootstrap
zypper
gnupg2

View File

@ -1,892 +0,0 @@
#!/bin/bash
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NODEPOOL_KEY=$HOME/.ssh/id_nodepool
NODEPOOL_KEY_NAME=root
NODEPOOL_PUBKEY=$HOME/.ssh/id_nodepool.pub
NODEPOOL_INSTALL=$HOME/nodepool-venv
NODEPOOL_CACHE_GET_PIP=/opt/stack/cache/files/get-pip.py
function install_diskimage_builder {
if use_library_from_git "diskimage-builder"; then
GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL
GITDIR["diskimage-builder"]=$DEST/diskimage-builder
GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF
git_clone_by_name "diskimage-builder"
setup_dev_lib "diskimage-builder"
$NODEPOOL_INSTALL/bin/pip install $DEST/diskimage-builder
fi
}
function install_glean {
if use_library_from_git "glean"; then
GITREPO["glean"]=$GLEAN_REPO_URL
GITDIR["glean"]=$DEST/glean
GITBRANCH["glean"]=$GLEAN_REPO_REF
git_clone_by_name "glean"
setup_dev_lib "glean"
$NODEPOOL_INSTALL/bin/pip install $DEST/glean
fi
}
function install_openstacksdk {
if use_library_from_git "openstacksdk"; then
git_clone_by_name "openstacksdk"
$NODEPOOL_INSTALL/bin/pip install $DEST/openstacksdk
fi
}
function install_dogpile_cache {
if use_library_from_git "dogpile.cache"; then
GITREPO["dogpile.cache"]=$DOGPILE_CACHE_REPO_URL
GITDIR["dogpile.cache"]=$DEST/dogpile.cache
GITBRANCH["dogpile.cache"]=$DOGPILE_CACHE_REPO_REF
git_clone_by_name "dogpile.cache"
$NODEPOOL_INSTALL/bin/pip install $DEST/dogpile.cache
fi
}
# Install nodepool code
function install_nodepool {
VENV="virtualenv -p python3"
$VENV $NODEPOOL_INSTALL
install_diskimage_builder
install_glean
setup_develop $DEST/nodepool
$NODEPOOL_INSTALL/bin/pip install $DEST/nodepool
# TODO(mordred) Install openstacksdk after nodepool so that if we're
# in the -src job we don't re-install from the requirement.
# We should make this more resilient, probably using install-siblings.
install_openstacksdk
install_dogpile_cache
$NODEPOOL_INSTALL/bin/pbr freeze
# NOTE(ianw): Hack for debian buster compatible deboostrap
sudo add-apt-repository ppa:openstack-ci-core/debootstrap
sudo apt-get update
sudo apt-get install -y debootstrap
}
# requires some globals from devstack, which *might* not be stable api
# points. If things break, investigate changes in those globals first.
function nodepool_create_keypairs {
if [[ ! -f $NODEPOOL_KEY ]]; then
ssh-keygen -f $NODEPOOL_KEY -P ""
fi
cat > /tmp/ssh_wrapper <<EOF
#!/bin/bash -ex
sudo -H -u stack ssh -o StrictHostKeyChecking=no -i $NODEPOOL_KEY root@\$@
EOF
sudo chmod 0755 /tmp/ssh_wrapper
}
function nodepool_write_elements {
sudo mkdir -p $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d
sudo mkdir -p $(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d
cat > /tmp/40-nodepool-setup <<EOF
sudo mkdir -p /etc/nodepool
# Make it world writeable so nodepool can write here later.
sudo chmod 777 /etc/nodepool
EOF
cat > /tmp/50-apt-allow-unauthenticated <<EOF
if [ -d "\$TARGET_ROOT/etc/apt/apt.conf.d" ]; then
echo "APT::Get::AllowUnauthenticated \"true\";" | sudo tee \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
echo "Acquire::AllowInsecureRepositories \"true\";" | sudo tee -a \$TARGET_ROOT/etc/apt/apt.conf.d/95allow-unauthenticated
fi
EOF
sudo mv /tmp/40-nodepool-setup \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/40-nodepool-setup
sudo chmod a+x \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/install.d/40-nodepool-setup
sudo mv /tmp/50-apt-allow-unauthenticated \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo chmod a+x \
$(dirname $NODEPOOL_CONFIG)/elements/nodepool-setup/root.d/50-apt-allow-unauthenticated
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/images
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/tmp
sudo mkdir -p $NODEPOOL_DIB_BASE_PATH/cache
sudo chown -R stack:stack $NODEPOOL_DIB_BASE_PATH
}
function nodepool_write_config {
sudo mkdir -p $(dirname $NODEPOOL_CONFIG)
sudo mkdir -p $(dirname $NODEPOOL_SECURE)
cat > /tmp/logging.conf <<EOF
[formatters]
keys=simple
[loggers]
keys=root,nodepool,openstack,kazoo,keystoneauth,novaclient
[handlers]
keys=console
[logger_root]
level=WARNING
handlers=console
[logger_nodepool]
level=DEBUG
handlers=console
qualname=nodepool
propagate=0
[logger_openstack]
level=DEBUG
handlers=console
qualname=openstack
propagate=0
[logger_keystoneauth]
level=DEBUG
handlers=console
qualname=keystoneauth
propagate=0
[logger_novaclient]
level=DEBUG
handlers=console
qualname=novaclient
propagate=0
[logger_kazoo]
level=INFO
handlers=console
qualname=kazoo
propagate=0
[handler_console]
level=DEBUG
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
datefmt=
EOF
sudo mv /tmp/logging.conf $NODEPOOL_LOGGING
cat > /tmp/secure.conf << EOF
# Empty
EOF
sudo mv /tmp/secure.conf $NODEPOOL_SECURE
sudo mkdir /var/log/nodepool
sudo chown -R stack:stack /var/log/nodepool
if use_library_from_git "glean"; then
git --git-dir=$DEST/glean/.git checkout -b devstack
DIB_GLEAN_INSTALLTYPE="DIB_INSTALLTYPE_simple_init: 'repo'"
DIB_GLEAN_REPOLOCATION="DIB_REPOLOCATION_glean: '$DEST/glean'"
DIB_GLEAN_REPOREF="DIB_REPOREF_glean: 'devstack'"
fi
if [ -f $NODEPOOL_CACHE_GET_PIP ] ; then
DIB_GET_PIP="DIB_REPOLOCATION_pip_and_virtualenv: file://$NODEPOOL_CACHE_GET_PIP"
fi
if [ -f /etc/ci/mirror_info.sh ] ; then
source /etc/ci/mirror_info.sh
DIB_DISTRIBUTION_MIRROR_CENTOS="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_CENTOS_MIRROR"
DIB_DISTRIBUTION_MIRROR_DEBIAN="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_DEBIAN_MIRROR"
DIB_DISTRIBUTION_MIRROR_UBUNTU="DIB_DISTRIBUTION_MIRROR: $NODEPOOL_UBUNTU_MIRROR"
DIB_DEBOOTSTRAP_EXTRA_ARGS="DIB_DEBOOTSTRAP_EXTRA_ARGS: '--no-check-gpg'"
fi
NODEPOOL_CENTOS_7_MIN_READY=1
NODEPOOL_DEBIAN_STRETCH_MIN_READY=1
NODEPOOL_DEBIAN_BUSTER_MIN_READY=1
NODEPOOL_FEDORA_29_MIN_READY=1
NODEPOOL_UBUNTU_BIONIC_MIN_READY=1
NODEPOOL_UBUNTU_TRUSTY_MIN_READY=1
NODEPOOL_UBUNTU_XENIAL_MIN_READY=1
NODEPOOL_OPENSUSE_423_MIN_READY=1
NODEPOOL_OPENSUSE_150_MIN_READY=1
NODEPOOL_OPENSUSE_TUMBLEWEED_MIN_READY=1
NODEPOOL_GENTOO_17_0_SYSTEMD_MIN_READY=1
if $NODEPOOL_PAUSE_CENTOS_7_DIB ; then
NODEPOOL_CENTOS_7_MIN_READY=0
fi
if $NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB ; then
NODEPOOL_DEBIAN_STRETCH_MIN_READY=0
fi
if $NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB ; then
NODEPOOL_DEBIAN_STRETCH_BUSTER_READY=0
fi
if $NODEPOOL_PAUSE_FEDORA_29_DIB ; then
NODEPOOL_FEDORA_29_MIN_READY=0
fi
if $NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB ; then
NODEPOOL_UBUNTU_BIONIC_MIN_READY=0
fi
if $NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB ; then
NODEPOOL_UBUNTU_TRUSTY_MIN_READY=0
fi
if $NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB ; then
NODEPOOL_UBUNTU_XENIAL_MIN_READY=0
fi
if $NODEPOOL_PAUSE_OPENSUSE_423_DIB ; then
NODEPOOL_OPENSUSE_423_MIN_READY=0
fi
if $NODEPOOL_PAUSE_OPENSUSE_150_DIB ; then
NODEPOOL_OPENSUSE_150_MIN_READY=0
fi
if $NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB ; then
NODEPOOL_OPENSUSE_TUMBLEWEED_MIN_READY=0
fi
if $NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB; then
NODEPOOL_GENTOO_17_0_SYSTEMD_MIN_READY=0
fi
cat > /tmp/nodepool.yaml <<EOF
# You will need to make and populate this path as necessary,
# cloning nodepool does not do this. Further in this doc we have an
# example element.
elements-dir: $(dirname $NODEPOOL_CONFIG)/elements
images-dir: $NODEPOOL_DIB_BASE_PATH/images
zookeeper-servers:
- host: localhost
port: 2181
labels:
- name: centos-7
min-ready: $NODEPOOL_CENTOS_7_MIN_READY
- name: debian-stretch
min-ready: $NODEPOOL_DEBIAN_STRETCH_MIN_READY
- name: debian-buster
min-ready: $NODEPOOL_DEBIAN_BUSTER_MIN_READY
- name: fedora-29
min-ready: $NODEPOOL_FEDORA_29_MIN_READY
- name: ubuntu-bionic
min-ready: $NODEPOOL_UBUNTU_BIONIC_MIN_READY
- name: ubuntu-trusty
min-ready: $NODEPOOL_UBUNTU_TRUSTY_MIN_READY
- name: ubuntu-xenial
min-ready: $NODEPOOL_UBUNTU_XENIAL_MIN_READY
- name: opensuse-423
min-ready: $NODEPOOL_OPENSUSE_423_MIN_READY
- name: opensuse-150
min-ready: $NODEPOOL_OPENSUSE_150_MIN_READY
- name: opensuse-tumbleweed
min-ready: $NODEPOOL_OPENSUSE_TUMBLEWEED_MIN_READY
- name: gentoo-17-0-systemd
min-ready: $NODEPOOL_GENTOO_17_0_SYSTEMD_MIN_READY
providers:
- name: devstack
region-name: '$REGION_NAME'
cloud: devstack
# Long boot timeout to deal with potentially nested virt.
boot-timeout: 600
launch-timeout: 900
rate: 0.25
diskimages:
- name: centos-7
config-drive: true
- name: debian-stretch
config-drive: true
- name: debian-buster
config-drive: true
- name: fedora-29
config-drive: true
- name: ubuntu-bionic
config-drive: true
- name: ubuntu-trusty
config-drive: true
- name: ubuntu-xenial
config-drive: true
- name: opensuse-423
config-drive: true
- name: opensuse-150
config-drive: true
- name: opensuse-tumbleweed
config-drive: true
- name: gentoo-17-0-systemd
config-drive: true
pools:
- name: main
max-servers: 5
labels:
- name: centos-7
diskimage: centos-7
min-ram: 1024
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: debian-stretch
diskimage: debian-stretch
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: debian-buster
diskimage: debian-buster
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: fedora-29
diskimage: fedora-29
min-ram: 1024
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-bionic
diskimage: ubuntu-bionic
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-trusty
diskimage: ubuntu-trusty
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: ubuntu-xenial
diskimage: ubuntu-xenial
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-423
diskimage: opensuse-423
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-150
diskimage: opensuse-150
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: opensuse-tumbleweed
diskimage: opensuse-tumbleweed
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
- name: gentoo-17-0-systemd
diskimage: gentoo-17-0-systemd
min-ram: 512
flavor-name: 'nodepool'
console-log: True
key-name: $NODEPOOL_KEY_NAME
instance-properties:
nodepool_devstack: testing
userdata: |
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
diskimages:
- name: centos-7
pause: $NODEPOOL_PAUSE_CENTOS_7_DIB
rebuild-age: 86400
elements:
- centos-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_DISTRIBUTION_MIRROR_CENTOS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
DIB_SIMPLE_INIT_NETWORKMANAGER: '1'
- name: debian-stretch
pause: $NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB
rebuild-age: 86400
elements:
- debian-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: stretch
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main'
$DIB_DISTRIBUTION_MIRROR_DEBIAN
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: debian-buster
pause: $NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB
rebuild-age: 86400
elements:
- debian-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: buster
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main'
$DIB_DISTRIBUTION_MIRROR_DEBIAN
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: fedora-29
pause: $NODEPOOL_PAUSE_FEDORA_29_DIB
rebuild-age: 86400
elements:
- fedora-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: 29
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
DIB_SIMPLE_INIT_NETWORKMANAGER: '1'
- name: ubuntu-bionic
pause: $NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB
rebuild-age: 86400
elements:
- ubuntu-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: bionic
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_DISTRIBUTION_MIRROR_UBUNTU
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: ubuntu-trusty
pause: $NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB
rebuild-age: 86400
elements:
- ubuntu-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: trusty
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_DISTRIBUTION_MIRROR_UBUNTU
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: ubuntu-xenial
pause: $NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB
rebuild-age: 86400
elements:
- ubuntu-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: xenial
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_APT_LOCAL_CACHE: '0'
DIB_DISABLE_APT_CLEANUP: '1'
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
DIB_DEBIAN_COMPONENTS: 'main,universe'
$DIB_DISTRIBUTION_MIRROR_UBUNTU
$DIB_DEBOOTSTRAP_EXTRA_ARGS
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: opensuse-423
pause: $NODEPOOL_PAUSE_OPENSUSE_423_DIB
rebuild-age: 86400
elements:
- opensuse-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: '42.3'
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: opensuse-150
pause: $NODEPOOL_PAUSE_OPENSUSE_150_DIB
rebuild-age: 86400
elements:
- opensuse-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: '15.0'
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: opensuse-tumbleweed
pause: $NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB
rebuild-age: 86400
elements:
- opensuse-minimal
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
release: 'tumbleweed'
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
- name: gentoo-17-0-systemd
pause: $NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB
rebuild-age: 86400
elements:
- gentoo
- vm
- simple-init
- growroot
- devuser
- openssh-server
- nodepool-setup
env-vars:
TMPDIR: $NODEPOOL_DIB_BASE_PATH/tmp
DIB_CHECKSUM: '1'
DIB_SHOW_IMAGE_USAGE: '1'
DIB_IMAGE_CACHE: $NODEPOOL_DIB_BASE_PATH/cache
DIB_DEV_USER_AUTHORIZED_KEYS: $NODEPOOL_PUBKEY
$DIB_GET_PIP
$DIB_GLEAN_INSTALLTYPE
$DIB_GLEAN_REPOLOCATION
$DIB_GLEAN_REPOREF
GENTOO_PROFILE: 'default/linux/amd64/17.0/systemd'
EOF
sudo mv /tmp/nodepool.yaml $NODEPOOL_CONFIG
cp /etc/openstack/clouds.yaml /tmp
cat >>/tmp/clouds.yaml <<EOF
cache:
max_age: 3600
class: dogpile.cache.dbm
arguments:
filename: $HOME/.cache/openstack/shade.dbm
expiration:
floating-ip: 5
server: 5
port: 5
# TODO(pabelanger): Remove once glean fully supports IPv6.
client:
force_ipv4: True
EOF
sudo mv /tmp/clouds.yaml /etc/openstack/clouds.yaml
mkdir -p $HOME/.cache/openstack/
}
function nodepool_zk_on_tmpfs {
local datadir
datadir=$(sed -n -e 's/^dataDir=//p' /etc/zookeeper/conf/zoo.cfg)
sudo service zookeeper stop
sudo mount -t tmpfs -o nodev,nosuid,size=500M none $datadir
sudo service zookeeper start
}
# Create configs
# Setup custom flavor
function configure_nodepool {
# build a dedicated keypair for nodepool to use with guests
nodepool_create_keypairs
# write the nodepool config
nodepool_write_config
# write the elements
nodepool_write_elements
}
function start_nodepool {
# build a custom flavor that's more friendly to nodepool; give
# disks a little room to grow
local available_flavors=$(nova flavor-list)
if [[ ! ( $available_flavors =~ 'nodepool-512' ) ]]; then
nova flavor-create nodepool-512 64 512 5 1
fi
if [[ ! ( $available_flavors =~ 'nodepool-1024' ) ]]; then
nova flavor-create nodepool-1024 128 1024 5 1
fi
# build sec group rules to reach the nodes, we need to do this
# this late because nova hasn't started until this phase.
if [[ -z $(openstack security group rule list --protocol tcp default | grep '65535') ]]; then
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol tcp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
openstack --os-project-name demo --os-username demo security group rule create --ingress --protocol udp --dst-port 1:65535 --remote-ip 0.0.0.0/0 default
fi
# start an unmanaged vm that should be ignored by nodepool
local cirros_image=$(openstack --os-project-name demo --os-username demo image list | grep cirros | awk '{print $4}' | head -n1)
openstack --os-project-name demo --os-username demo server create --flavor cirros256 --image $cirros_image unmanaged-vm
# create root keypair to use with glean for devstack cloud.
nova --os-project-name demo --os-username demo \
keypair-add --pub-key $NODEPOOL_PUBKEY $NODEPOOL_KEY_NAME
export PATH=$NODEPOOL_INSTALL/bin:$PATH
# run a fake statsd so we test stats sending paths
export STATSD_HOST=localhost
export STATSD_PORT=8125
run_process statsd "/usr/bin/socat -u udp-recv:$STATSD_PORT -"
# Restart nodepool's zk on a tmpfs
nodepool_zk_on_tmpfs
# Ensure our configuration is valid.
$NODEPOOL_INSTALL/bin/nodepool -c $NODEPOOL_CONFIG config-validate
run_process nodepool-launcher "$NODEPOOL_INSTALL/bin/nodepool-launcher -c $NODEPOOL_CONFIG -s $NODEPOOL_SECURE -l $NODEPOOL_LOGGING -d"
run_process nodepool-builder "$NODEPOOL_INSTALL/bin/nodepool-builder -c $NODEPOOL_CONFIG -l $NODEPOOL_LOGGING -d"
:
}
function shutdown_nodepool {
stop_process nodepool
# Verify that the unmanaged vm still exists
openstack --os-project-name demo --os-username demo server show unmanaged-vm
:
}
function cleanup_nodepool {
:
}
# check for service enabled
if is_service_enabled nodepool-launcher; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing nodepool"
install_nodepool
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring nodepool"
configure_nodepool
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
# Initialize and start the nodepool service
echo_summary "Initializing nodepool"
start_nodepool
fi
if [[ "$1" == "unstack" ]]; then
# Shut down nodepool services
# no-op
shutdown_nodepool
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
# no-op
cleanup_nodepool
fi
fi

View File

@ -1,33 +0,0 @@
NODEPOOL_CONFIG=/etc/nodepool/nodepool.yaml
NODEPOOL_LOGGING=/etc/nodepool/logging.conf
NODEPOOL_SECURE=/etc/nodepool/secure.conf
NODEPOOL_DIB_BASE_PATH=/opt/dib
# Flags to control which images we build.
# NOTE(pabelanger): Be sure to also update tools/check_devstack_plugin.sh if you
# change the defaults.
NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-true}
NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB=${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB:-true}
NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB=${NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB:-true}
NODEPOOL_PAUSE_FEDORA_29_DIB=${NODEPOOL_PAUSE_FEDORA_29_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB=${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-true}
NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB=${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB:-true}
NODEPOOL_PAUSE_OPENSUSE_423_DIB=${NODEPOOL_PAUSE_OPENSUSE_423_DIB:-true}
NODEPOOL_PAUSE_OPENSUSE_150_DIB=${NODEPOOL_PAUSE_OPENSUSE_150_DIB:-true}
NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB=${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB:-true}
NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB=${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB:-true}
DISKIMAGE_BUILDER_REPO_URL=${DISKIMAGE_BUILDER_REPO_URL:-https://opendev.org/openstack/diskimage-builder}
DISKIMAGE_BUILDER_REPO_REF=${DISKIMAGE_BUILDER_REPO_REF:-master}
GLEAN_REPO_URL=${GLEAN_REPO_URL:-https://opendev.org/opendev/glean}
GLEAN_REPO_REF=${GLEAN_REPO_REF:-master}
DOGPILE_CACHE_REPO_URL=${DOGPILE_CACHE_REPO_URL:-https://github.com/sqlalchemy/dogpile.git}
DOGPILE_CACHE_REPO_REF=${DOGPILE_CACHE_REPO_REF:-master}
enable_service geard
enable_service statsd
enable_service nodepool-launcher
enable_service nodepool-builder

View File

@ -1,276 +0,0 @@
#!/bin/bash -ex
LOGDIR=$1
# Set to indiciate an error return
RETURN=0
FAILURE_REASON=""
NODEPOOL_INSTALL=${NODEPOOL_INSTALL:-/opt/stack/nodepool-venv}
NODEPOOL_CONFIG=${NODEPOOL_CONFIG:-/etc/nodepool/nodepool.yaml}
NODEPOOL_SECURE=${NODEPOOL_SECURE:-/etc/nodepool/secure.conf}
NODEPOOL="$NODEPOOL_INSTALL/bin/nodepool -c $NODEPOOL_CONFIG -s $NODEPOOL_SECURE"
# Source stackrc so that we get the variables about enabled images set
# from the devstack job. That's the ones we'll wait for below.
if [[ ! -f /opt/stack/devstack/stackrc ]]; then
echo "Can not find enabled images from devstack run!"
exit 1
else
source /opt/stack/devstack/stackrc
fi
NODEPOOL_PAUSE_CENTOS_7_DIB=${NODEPOOL_PAUSE_CENTOS_7_DIB:-True}
NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB=${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB:-True}
NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB=${NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB:-True}
NODEPOOL_PAUSE_FEDORA_29_DIB=${NODEPOOL_PAUSE_FEDORA_29_DIB:-True}
NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB=${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB:-True}
NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB=${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB:-True}
NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB=${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB:-True}
NODEPOOL_PAUSE_OPENSUSE_423_DIB=${NODEPOOL_PAUSE_OPENSUSE_423_DIB:-True}
NODEPOOL_PAUSE_OPENSUSE_150_DIB=${NODEPOOL_PAUSE_OPENSUSE_150_DIB:-True}
NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB=${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB:-True}
NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB=${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB:-True}
function sshintonode {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
/tmp/ssh_wrapper $node ls /
# Check that the root partition grew on boot; it should be a 5GiB
# partition minus some space for the boot partition. However
# emperical evidence suggests there is some modulo maths going on,
# (possibly with alignment?) that means we can vary up to even
# 64MiB. Thus we choose an expected value that gives us enough
# slop to avoid false matches, but still indicates we resized up.
root_size=$(/tmp/ssh_wrapper $node -- lsblk -rbno SIZE /dev/vda1)
expected_root_size=$(( 5000000000 ))
if [[ $root_size -lt $expected_root_size ]]; then
echo "*** Root device does not appear to have grown: $root_size"
FAILURE_REASON="Root partition of $name does not appear to have grown: $root_size < $expected_root_size"
RETURN=1
fi
# Check we saw metadata deployed to the config-drive
/tmp/ssh_wrapper $node \
"dd status=none if=/dev/sr0 | tr -cd '[:print:]' | grep -q nodepool_devstack"
if [[ $? -ne 0 ]]; then
echo "*** Failed to find metadata in config-drive"
FAILURE_REASON="Failed to find meta-data in config-drive for $node"
RETURN=1
fi
}
function showserver {
name=$1
state='ready'
node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '`
EXPECTED=$(mktemp)
RESULT=$(mktemp)
source /opt/stack/devstack/openrc admin admin
nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\
base64 --decode > $RESULT
cat <<EOF >$EXPECTED
#cloud-config
write_files:
- content: |
testpassed
path: /etc/testfile_nodepool_userdata
EOF
diff $EXPECTED $RESULT
if [[ $? -ne 0 ]]; then
echo "*** Failed to find userdata on server!"
FAILURE_REASON="Failed to find userdata on server for $node"
echo "Expected userdata:"
cat $EXPECTED
echo "Found userdata:"
cat $RESULT
RETURN=1
fi
}
function checknm {
name=$1
state='ready'
node=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f6 | tr -d ' '`
nm_output=$(/tmp/ssh_wrapper $node -- nmcli c)
# virtio device is eth0 on older, ens3 on newer
if [[ ! ${nm_output} =~ (eth0|ens3) ]]; then
echo "*** Failed to find interface in NetworkManager connections"
/tmp/ssh_wrapper $node -- nmcli c
/tmp/ssh_wrapper $node -- nmcli device
FAILURE_REASON="Failed to find interface in NetworkManager connections"
RETURN=1
fi
}
function waitforimage {
local name=$1
local state='ready'
local builds
while ! $NODEPOOL image-list | grep $name | grep $state; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
builds=$(ls -l /var/log/nodepool/builds/ | grep $name | wc -l)
if [[ ${builds} -ge 4 ]]; then
echo "*** Build of $name failed at least 3 times, aborting"
exit 1
fi
sleep 10
done
}
function waitfornode {
name=$1
state='ready'
while ! $NODEPOOL list | grep $name | grep $state | grep "unlocked"; do
$NODEPOOL image-list > ${LOGDIR}/nodepool-image-list.txt
$NODEPOOL list --detail > ${LOGDIR}/nodepool-list.txt
sleep 10
done
}
if [ ${NODEPOOL_PAUSE_CENTOS_7_DIB,,} = 'false' ]; then
# check that image built
waitforimage centos-7
# check image was bootable
waitfornode centos-7
# check ssh for root user
sshintonode centos-7
# networkmanager check
checknm centos-7
# userdata check
showserver centos-7
fi
if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then
# check that image built
waitforimage debian-stretch
# check image was bootable
waitfornode debian-stretch
# check ssh for root user
sshintonode debian-stretch
# userdata check
showserver debian-stretch
fi
if [ ${NODEPOOL_PAUSE_DEBIAN_BUSTER_DIB,,} = 'false' ]; then
# check that image built
waitforimage debian-buster
# check image was bootable
waitfornode debian-buster
# check ssh for root user
sshintonode debian-buster
# userdata check
showserver debian-buster
fi
if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then
# check that image built
waitforimage fedora-29
# check image was bootable
waitfornode fedora-29
# check ssh for root user
sshintonode fedora-29
# networkmanager check
checknm fedora-29
# userdata check
showserver fedora-29
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then
# check that image built
waitforimage ubuntu-bionic
# check image was bootable
waitfornode ubuntu-bionic
# check ssh for root user
sshintonode ubuntu-bionic
# userdata check
showserver ubuntu-bionic
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then
# check that image built
waitforimage ubuntu-trusty
# check image was bootable
waitfornode ubuntu-trusty
# check ssh for root user
sshintonode ubuntu-trusty
# userdata check
showserver ubuntu-trusty
fi
if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then
# check that image built
waitforimage ubuntu-xenial
# check image was bootable
waitfornode ubuntu-xenial
# check ssh for root user
sshintonode ubuntu-xenial
# userdata check
showserver ubuntu-xenial
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then
# check that image built
waitforimage opensuse-423
# check image was bootable
waitfornode opensuse-423
# check ssh for root user
sshintonode opensuse-423
# userdata check
showserver opensuse-423
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then
# check that image built
waitforimage opensuse-150
# check image was bootable
waitfornode opensuse-150
# check ssh for root user
sshintonode opensuse-150
# userdata check
showserver opensuse-150
fi
if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then
# check that image built
waitforimage opensuse-tumbleweed
# check image was bootable
waitfornode opensuse-tumbleweed
# check ssh for root user
sshintonode opensuse-tumbleweed
# userdata check
showserver opensuse-tumbleweed
fi
if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then
# check that image built
waitforimage gentoo-17-0-systemd
# check image was bootable
waitfornode gentoo-17-0-systemd
# check ssh for root user
sshintonode gentoo-17-0-systemd
# userdata check
showserver gentoo-17-0-systemd
fi
set -o errexit
# Show the built nodes
$NODEPOOL list
# Try to delete the nodes that were just built
$NODEPOOL delete --now 0000000000
# show the deleted nodes (and their replacements may be building)
$NODEPOOL list
if [[ -n "${FAILURE_REASON}" ]]; then
echo "${FAILURE_REASON}"
fi
exit $RETURN