Convert gate to file backend rather than partition

The existing gate partitioned a disk for use with docker, depending
on the gate it would use the swap disk (RAX) or a spare disk (HP).
However, with the new gates (Bluebox + OVH) there is neither a spare
disk nor a swap disk. This leaves us with one choice: File based loop
device.

This patch creates a file at /swapfile to ensure we have swap. It
creates a file at /docker to ensure we have a loop device for Docker.

Right now the /docker file is 10GB and the /swapfile is 4GB due to
size limitations in the gate across all servers and types. This has
proven to be enough space for all our current tests.

Additionally, reduce the number of threads the gate uses to 4 to
prevent the lockup and hour timeout we have been seeing as more
recently in the gate.

The scripts that setup the gate are moved to the tools directory
rather than the tests directory to match the structure of the other
projects.

Partially-Implements: blueprint functional-testing-gate

Change-Id: I3e370f2382b6df36103d8b2ceda9b21d9b4229d5
This commit is contained in:
SamYaple 2016-01-12 20:52:00 +00:00
parent e00dc579c1
commit 6adb5c0aa5
10 changed files with 220 additions and 202 deletions

View File

@ -1,49 +0,0 @@
#!/bin/bash
set -o xtrace
set -o errexit
DEV=$1
# (SamYaple)TODO: Remove the path overriding
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
cat | sudo tee /etc/yum.repos.d/docker.repo << EOF
[docker]
name=Docker Main Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum install -y libffi-devel openssl-devel docker-engine xfsprogs
# Only do FS optimization if we have a secondary disk
if [[ -b /dev/${DEV} ]]; then
# Setup backing disk for use with Docker. This is to ensure we use the ephemeral
# disk provided to the build instance. It ensures the correct disk and storage
# driver are used for Docker. It is recommend to use the thin provisioning
# driver. https://github.com/docker/docker/blob/master/man/docker.1.md
sudo parted /dev/${DEV} -s -- mklabel msdos mkpart pri 1 -1
# Figure out the path to the partitioned device
PARTDEV=$(ls "/dev/${DEV}"* | egrep "/dev/${DEV}p?1")
sudo pvcreate ${PARTDEV}
sudo vgcreate kolla01 ${PARTDEV}
sudo lvcreate -n thin01 -L 60G kolla01
sudo lvcreate -n thin01meta -L 2G kolla01
yes | sudo lvconvert --type thin-pool --poolmetadata kolla01/thin01meta kolla01/thin01
# Setup Docker
sudo sed -i -r 's,(ExecStart)=(.+),\1=/usr/bin/docker daemon --storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=kolla01-thin01 --storage-opt dm.use_deferred_removal=true,' /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
fi
sudo systemctl start docker
sudo docker info
# disable ipv6 until we're sure routes to fedora mirrors work properly
sudo sh -c 'echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf'
sudo /usr/sbin/sysctl -p
echo "Completed $0."

View File

@ -1,123 +0,0 @@
#!/bin/bash
set -o xtrace
set -o errexit
# Just for mandre :)
if [[ ! -f /etc/sudoers.d/jenkins ]]; then
echo "jenkins ALL=(:docker) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/jenkins
fi
function dump_node_info {
# NOTE(SamYaple): function for debugging gate
set +o errexit
local OLD_PATH="${PATH}"
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
sudo parted -l
sudo mount
df -h
uname -a
cat /etc/*release*
PATH="${OLD_PATH}"
set -o errexit
}
function detect_disk {
# TODO(SamYaple): This check could be much better, but should work for now
if [[ $(hostname | grep rax) ]]; then
export DEV="xvde"
else
echo "Assuming this is a hpcloud box"
export DEV="vdb"
fi
}
function setup_config {
# generate the config
tox -e genconfig
# Copy configs
sudo cp -a etc/kolla /etc/
}
function detect_distro {
DISTRO=$(ansible all -i "localhost," -msetup -clocal | awk -F\" '/ansible_os_family/ {print $4}')
}
function setup_ssh {
# Generate a new keypair that Ansible will use
ssh-keygen -f /home/jenkins/.ssh/kolla -N ''
cat /home/jenkins/.ssh/kolla.pub >> /home/jenkins/.ssh/authorized_keys
# Push the the public key around to all of the nodes
for ip in $(cat /etc/nodepool/sub_nodes_private); do
scp /home/jenkins/.ssh/kolla.pub ${ip}:/home/jenkins/.ssh/authorized_keys
# TODO(SamYaple): Remove this root key pushing once Kolla doesn't
# require root anymore.
ssh ${ip} -i /home/jenkins/.ssh/kolla 'sudo mkdir -p /root/.ssh; sudo cp /home/jenkins/.ssh/* /root/.ssh/'
done
# From now on use the new IdentityFile for connecting to other hosts
echo "IdentityFile /home/jenkins/.ssh/kolla" >> /home/jenkins/.ssh/config
}
function setup_inventory {
local counter=0
detect_distro
if [[ "${DISTRO}" == "Debian" ]]; then
ANSIBLE_CONNECTION_TYPE=ssh
else
ANSIBLE_CONNECTION_TYPE=local
fi
echo -e "127.0.0.1\tlocalhost" > /tmp/hosts
for ip in $(cat /etc/nodepool/{node_private,sub_nodes_private}); do
: $((counter++))
echo -e "${ip}\tnode${counter} $(ssh ${ip} hostname)" >> /tmp/hosts
echo "node${counter} ansible_connection=${ANSIBLE_CONNECTION_TYPE}" >> ${RAW_INVENTORY}
done
sudo chown root: /tmp/hosts
sudo chmod 644 /tmp/hosts
sudo mv /tmp/hosts /etc/hosts
}
function setup_ansible {
RAW_INVENTORY=/tmp/kolla/raw_inventory
mkdir /tmp/kolla
sudo -H pip install "ansible<2" "docker-py>=1.6.0"
setup_inventory
# Record the running state of the environment as seen by the setup module
ansible all -i ${RAW_INVENTORY} -m setup > /tmp/logs/ansible/initial-setup
}
function setup_node {
detect_disk
ansible-playbook -i ${RAW_INVENTORY} -edocker_dev=${DEV} tests/setup_nodes.yml
}
function setup_logging {
# This directory is the directory that is copied with the devstack-logs
# publisher. It must exist at /home/jenkins/workspace/<job-name>/logs
mkdir logs
# For ease of access we symlink that logs directory to a known path
ln -s $(pwd)/logs /tmp/logs
mkdir -p /tmp/logs/{ansible,build}
}
setup_logging
(dump_node_info 2>&1) > /tmp/logs/node_info_first
setup_ssh
setup_ansible
setup_node
setup_config
(dump_node_info 2>&1) > /tmp/logs/node_info_last

1
tests/setup_gate.sh Symbolic link
View File

@ -0,0 +1 @@
../tools/setup_gate.sh

View File

@ -33,7 +33,7 @@ class BuildTest(base.BaseTestCase):
super(BuildTest, self).setUp()
self.useFixture(log_fixture.SetLogLevel([__name__],
logging.logging.INFO))
self.build_args = [__name__, "--debug"]
self.build_args = [__name__, "--debug", '--threads', '4']
@testtools.skipUnless(os.environ.get('DOCKER_BUILD_TEST'),
'Skip the docker build test')

26
tools/dump_info.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
set -o xtrace
function dump_node_info {
# NOTE(SamYaple): function for debugging gate
set +o errexit
local OLD_PATH="${PATH}"
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
sudo parted -l
sudo mount
df -h
uname -a
cat /etc/*release*
cat /proc/meminfo
PATH="${OLD_PATH}"
set -o errexit
}
(dump_node_info 2>&1) > /tmp/logs/node_info_$(date +%s)

View File

@ -3,7 +3,30 @@
set -o xtrace
set -o errexit
DEV=$1
function setup_disk {
sudo swapoff -a
sudo dd if=/dev/zero of=/swapfile bs=1M count=4096
sudo chmod 0600 /swapfile
sudo mkswap /swapfile
sudo /sbin/swapon /swapfile
sudo dd if=/dev/zero of=/docker bs=1M count=10240
losetup -f /docker
DEV=$(losetup -a | awk -F: '/\/docker/ {print $1}')
# Format Disks and setup Docker to use BTRFS
sudo parted ${DEV} -s -- mklabel msdos
sudo rm -rf /var/lib/docker
sudo mkdir /var/lib/docker
# We want to snapshot the entire docker directory so we have to first create a
# subvolume and use that as the root for the docker directory.
sudo mkfs.btrfs -f ${DEV}
sudo mount ${DEV} /var/lib/docker
sudo btrfs subvolume create /var/lib/docker/docker
sudo umount /var/lib/docker
sudo mount -o noatime,subvol=docker ${DEV} /var/lib/docker
}
# (SamYaple)TODO: Remove the path overriding
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@ -14,28 +37,10 @@ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58
sudo apt-get update
sudo apt-get install -y --no-install-recommends docker-engine btrfs-tools
# Only do FS optimization if we have a secondary disk
if [[ -b /dev/${DEV} ]]; then
# The reason for using BTRFS is stability. There are numerous issues with the
# devicemapper backed on Ubuntu and AUFS is slow. BTRFS is very solid as a
# backend in my experince. I use ie almost exclusively.
# Format Disks and setup Docker to use BTRFS
sudo umount /dev/${DEV} || true
sudo parted /dev/${DEV} -s -- mklabel msdos
sudo service docker stop
echo 'DOCKER_OPTS="-s btrfs"' | sudo tee /etc/default/docker
sudo rm -rf /var/lib/docker/*
# We want to snapshot the entire docker directory so we have to first create a
# subvolume and use that as the root for the docker directory.
sudo mkfs.btrfs -f /dev/${DEV}
sudo mount /dev/${DEV} /var/lib/docker
sudo btrfs subvolume create /var/lib/docker/docker
sudo umount /var/lib/docker
sudo mount -o noatime,compress=lzo,space_cache,subvol=docker /dev/${DEV} /var/lib/docker
sudo service docker start
fi
sudo service docker stop
setup_disk
echo 'DOCKER_OPTS="-s btrfs"' | sudo tee /etc/default/docker
sudo service docker start
sudo docker info

58
tools/setup_RedHat.sh Executable file
View File

@ -0,0 +1,58 @@
#!/bin/bash
set -o xtrace
set -o errexit
function setup_disk {
sudo swapoff -a
sudo dd if=/dev/zero of=/swapfile bs=1M count=4096
sudo chmod 0600 /swapfile
sudo mkswap /swapfile
sudo /sbin/swapon /swapfile
sudo dd if=/dev/zero of=/docker bs=1M count=10240
losetup -f /docker
DEV=$(losetup -a | awk -F: '/\/docker/ {print $1}')
# Format Disks and setup Docker to use BTRFS
sudo parted ${DEV} -s -- mklabel msdos
sudo rm -rf /var/lib/docker
sudo mkdir /var/lib/docker
# We want to snapshot the entire docker directory so we have to first create a
# subvolume and use that as the root for the docker directory.
sudo mkfs.btrfs -f ${DEV}
sudo mount ${DEV} /var/lib/docker
sudo btrfs subvolume create /var/lib/docker/docker
sudo umount /var/lib/docker
sudo mount -o noatime,subvol=docker ${DEV} /var/lib/docker
}
# (SamYaple)TODO: Remove the path overriding
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
cat | sudo tee /etc/yum.repos.d/docker.repo << EOF
[docker]
name=Docker Main Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum install -y libffi-devel openssl-devel docker-engine btrfs-progs
setup_disk
# Setup Docker
sudo sed -i -r 's,(ExecStart)=(.+),\1=/usr/bin/docker daemon --storage-driver btrfs,' /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl start docker
sudo docker info
# disable ipv6 until we're sure routes to fedora mirrors work properly
sudo sh -c 'echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf'
sudo /usr/sbin/sysctl -p
echo "Completed $0."

92
tools/setup_gate.sh Executable file
View File

@ -0,0 +1,92 @@
#!/bin/bash
set -o xtrace
set -o errexit
# Just for mandre :)
if [[ ! -f /etc/sudoers.d/jenkins ]]; then
echo "jenkins ALL=(:docker) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/jenkins
fi
function setup_config {
# generate the config
tox -e genconfig
# Copy configs
sudo cp -a etc/kolla /etc/
}
function detect_distro {
DISTRO=$(ansible all -i "localhost," -msetup -clocal | awk -F\" '/ansible_os_family/ {print $4}')
}
function setup_ssh {
# Generate a new keypair that Ansible will use
ssh-keygen -f /home/jenkins/.ssh/kolla -N ''
cat /home/jenkins/.ssh/kolla.pub >> /home/jenkins/.ssh/authorized_keys
# Push the the public key around to all of the nodes
for ip in $(cat /etc/nodepool/sub_nodes_private); do
scp /home/jenkins/.ssh/kolla.pub ${ip}:/home/jenkins/.ssh/authorized_keys
# TODO(SamYaple): Remove this root key pushing once Kolla doesn't
# require root anymore.
ssh ${ip} -i /home/jenkins/.ssh/kolla 'sudo mkdir -p /root/.ssh; sudo cp /home/jenkins/.ssh/* /root/.ssh/'
done
# From now on use the new IdentityFile for connecting to other hosts
echo "IdentityFile /home/jenkins/.ssh/kolla" >> /home/jenkins/.ssh/config
}
function setup_inventory {
local counter=0
detect_distro
if [[ "${DISTRO}" == "Debian" ]]; then
ANSIBLE_CONNECTION_TYPE=ssh
else
ANSIBLE_CONNECTION_TYPE=local
fi
echo -e "127.0.0.1\tlocalhost" > /tmp/hosts
for ip in $(cat /etc/nodepool/{node_private,sub_nodes_private}); do
: $((counter++))
echo -e "${ip}\tnode${counter} $(ssh ${ip} hostname)" >> /tmp/hosts
echo "node${counter} ansible_connection=${ANSIBLE_CONNECTION_TYPE}" >> ${RAW_INVENTORY}
done
sudo chown root: /tmp/hosts
sudo chmod 644 /tmp/hosts
sudo mv /tmp/hosts /etc/hosts
}
function setup_ansible {
RAW_INVENTORY=/tmp/kolla/raw_inventory
mkdir /tmp/kolla
sudo -H pip install "ansible<2" "docker-py>=1.6.0"
setup_inventory
# Record the running state of the environment as seen by the setup module
ansible all -i ${RAW_INVENTORY} -m setup > /tmp/logs/ansible/initial-setup
}
function setup_node {
ansible-playbook -i ${RAW_INVENTORY} tools/setup_nodes.yml
}
function setup_logging {
# This directory is the directory that is copied with the devstack-logs
# publisher. It must exist at /home/jenkins/workspace/<job-name>/logs
mkdir logs
# For ease of access we symlink that logs directory to a known path
ln -s $(pwd)/logs /tmp/logs
mkdir -p /tmp/logs/{ansible,build}
}
setup_logging
tools/dump_info.sh
setup_ssh
setup_ansible
setup_node
setup_config

View File

@ -25,4 +25,4 @@
path: /tmp/{{ inventory_hostname }}
- name: Run node setup
shell: sudo /tmp/setup.sh {{ docker_dev }}
shell: sudo /tmp/setup.sh

18
tox.ini
View File

@ -45,8 +45,9 @@ commands =
{toxinidir}/tools/run-bashate.sh
[testenv:setupenv]
whitelist_externals = bash
commands = bash -c tests/setup_gate.sh
commands =
{toxinidir}/tools/setup_gate.sh
{toxinidir}/tools/dump_info.sh
[testenv:build-centos-binary]
whitelist_externals = find
@ -58,6 +59,7 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.BuildTestCentosBinary
{toxinidir}/tools/dump_info.sh
[testenv:build-centos-source]
whitelist_externals = find
@ -69,6 +71,7 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.BuildTestCentosSource
{toxinidir}/tools/dump_info.sh
[testenv:build-ubuntu-source]
whitelist_externals = find
@ -80,6 +83,7 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.BuildTestUbuntuSource
{toxinidir}/tools/dump_info.sh
[testenv:deploy-centos-binary]
whitelist_externals = find
@ -91,7 +95,8 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.DeployTestCentosBinary
sudo tests/deploy_aio.sh centos binary
sudo {toxinidir}/tools/deploy_aio.sh centos binary
{toxinidir}/tools/dump_info.sh
[testenv:deploy-centos-source]
whitelist_externals = find
@ -103,7 +108,8 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.DeployTestCentosSource
sudo tests/deploy_aio.sh centos source
sudo {toxinidir}/tools/deploy_aio.sh centos source
{toxinidir}/tools/dump_info.sh
[testenv:deploy-ubuntu-source]
whitelist_externals = find
@ -115,7 +121,8 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.DeployTestUbuntuSource
sudo tests/deploy_aio.sh ubuntu source
sudo {toxinidir}/tools/deploy_aio.sh ubuntu source
{toxinidir}/tools/dump_info.sh
[testenv:deploy-multinode-ubuntu-source]
whitelist_externals = find
@ -127,6 +134,7 @@ commands =
find . -type f -name "*.pyc" -delete
bash -c "if [ ! -d .testrepository ]; then testr init; fi"
sudo -E -g docker testr run test_build.BuildTestUbuntuSource
{toxinidir}/tools/dump_info.sh
[testenv:genconfig]
whitelist_externals = which