Add grenade support

Zuul v3 does not yet offer native grenade base jobs. Workaround it by
creating a legacy grenade-devstack-octavia playbook.

Change-Id: Ib81557ee75ff2ea365ca8403dcfe6e6724657f75
changes/54/549654/37
Carlos Goncalves 5 years ago
parent b9a8309d50
commit 13eab152fb
  1. 128
      devstack/upgrade/resources.sh
  2. 11
      devstack/upgrade/settings
  3. 24
      devstack/upgrade/shutdown.sh
  4. 70
      devstack/upgrade/upgrade.sh
  5. 4
      devstack/upgrade/vm_user_data.sh
  6. 15
      playbooks/legacy/grenade-devstack-octavia/post.yaml
  7. 60
      playbooks/legacy/grenade-devstack-octavia/run.yaml
  8. 12
      zuul.d/jobs.yaml
  9. 13
      zuul.d/projects.yaml

@ -0,0 +1,128 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TOP_DIR/openrc admin demo
set -o xtrace
OCTAVIA_GRENADE_DIR=$(dirname $0)
INSTANCE_USER_DATA_FILE=$OCTAVIA_GRENADE_DIR/vm_user_data.sh
DEFAULT_INSTANCE_FLAVOR=${DEFAULT_INSTANCE_FLAVOR:-m1.tiny}
PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
# $1: desired provisioning_status
# $2: desired operating_status
# $3..n: command with arguments and parameters
# TODO(cgoncalves): set timeout
function _wait_for_status {
while :
do
eval $("${@:3}" -f shell -c provisioning_status -c operating_status)
[[ $operating_status == "ONLINE" && $provisioning_status == "ACTIVE" ]] && break
if [ $provisioning_status == "ERROR" ]; then
die $LINENO "ERROR creating load balancer"
fi
sleep 10
done
}
function create {
# TODO(cgoncalves): make create idempotent for resiliancy in testing
# NOTE(cgoncalves): OS_USERNAME=demo is set to overcome security group name collision
sc_rule_id=$(OS_USERNAME=demo openstack security group rule create -f value -c id --protocol tcp --ingress --dst-port 80 default)
resource_save octavia sc_rule_id $sc_rule_id
# create VMs
vm1_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm1)
vm2_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm2)
vm1_ipv4=$(echo $vm1_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
vm2_ipv4=$(echo $vm2_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
openstack loadbalancer create --name lb1 --vip-subnet-id $PUBLIC_SUBNET_NAME
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1
openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer listener show listener1
openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer pool show pool1
openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path / --name hm1 pool1
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer healthmonitor show hm1
openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm1_ipv4 --protocol-port 80 pool1 --name member1
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member1
openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm2_ipv4 --protocol-port 80 pool1 --name member2
_wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member2
lb_vip_ip=$(openstack loadbalancer show -f value -c vip_address lb1)
resource_save octavia lb_vip_ip $lb_vip_ip
echo "Octavia create: SUCCESS"
}
function verify {
# verify control plane
openstack loadbalancer show -f value -c operating_status lb1 | grep -q ONLINE
openstack loadbalancer listener show -f value -c operating_status listener1 | grep -q ONLINE
openstack loadbalancer pool show -f value -c operating_status pool1 | grep -q ONLINE
openstack loadbalancer healthmonitor show -f value -c operating_status hm1 | grep -q ONLINE
openstack loadbalancer member show -f value -c operating_status pool1 member1 | grep -q ONLINE
openstack loadbalancer member show -f value -c operating_status pool1 member2 | grep -q ONLINE
# verify data plane
lb_vip_ip=$(resource_get octavia lb_vip_ip)
curl --include -D lb.out $lb_vip_ip
grep -q "^HTTP/1.1 200 OK" lb.out
echo "Octavia verify: SUCCESS"
}
function verify_noapi {
# verify data plane
lb_vip_ip=$(resource_get octavia lb_vip_ip)
curl --include -D lb.out $lb_vip_ip
grep -q "^HTTP/1.1 200 OK" lb.out
echo "Octavia verify_noapi: SUCCESS"
}
function destroy {
sc_rule_id=$(resource_get octavia sc_rule_id)
# make destroy idempotent for resiliancy in testing
openstack loadbalancer show lb1 && openstack loadbalancer delete --cascade lb1
openstack server show vm1 && openstack server delete vm1
openstack server show vm2 && openstack server delete vm2
openstack security group rule show $sc_rule_id && openstack security group rule delete $sc_rule_id
echo "Octavia destroy: SUCCESS"
}
# Dispatcher
case $1 in
"create")
create
;;
"verify_noapi")
verify_noapi
;;
"verify")
verify
;;
"destroy")
destroy
;;
"force_destroy")
set +o errexit
destroy
;;
esac

@ -0,0 +1,11 @@
register_project_for_upgrade octavia
register_db_to_save octavia
devstack_localrc base enable_plugin octavia https://git.openstack.org/openstack/octavia stable/queens
devstack_localrc target enable_plugin octavia https://git.openstack.org/openstack/octavia
devstack_localrc base enable_service o-api o-cw o-hm o-hk octavia
devstack_localrc target enable_service o-api o-cw o-hm o-hk octavia
BASE_RUN_SMOKE=False
TARGET_RUN_SMOKE=False

@ -0,0 +1,24 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
# We need base DevStack functions for this
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source $BASE_DEVSTACK_DIR/lib/apache
source $BASE_DEVSTACK_DIR/lib/neutron
OCTAVIA_DEVSTACK_DIR=$(dirname $(dirname $0))
source $OCTAVIA_DEVSTACK_DIR/settings
source $OCTAVIA_DEVSTACK_DIR/plugin.sh
set -o xtrace
octavia_stop
# sanity check that service is actually down
ensure_services_stopped o-api o-cw o-hk o-hm

@ -0,0 +1,70 @@
#!/usr/bin/env bash
# ``upgrade-octavia``
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "********************************************************************"
echo "ERROR: Abort $0"
echo "********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
# Source params
source $GRENADE_DIR/grenaderc
# Import common functions
source $GRENADE_DIR/functions
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Upgrade octavia
# ============
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/apache
source $TARGET_DEVSTACK_DIR/lib/tls
source $(dirname $(dirname $BASH_SOURCE))/settings
source $(dirname $(dirname $BASH_SOURCE))/plugin.sh
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Save current config files for posterity
[[ -d $SAVE_DIR/etc.octavia ]] || cp -pr $OCTAVIA_CONF_DIR $SAVE_DIR/etc.octavia
# Install the target octavia
octavia_install
# calls upgrade-octavia for specific release
upgrade_project octavia $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Migrate the database
octavia-db-manage upgrade head || die $LINO "DB migration error"
octavia_start
# Don't succeed unless the services come up
ensure_services_started o-api o-cw o-hm o-hk
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"

@ -0,0 +1,4 @@
#!/bin/sh -v
Body=$(hostname)
Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
while true ; do echo -e $Response | nc -llp 80; done

@ -0,0 +1,15 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

@ -0,0 +1,60 @@
- hosts: all
name: legacy-grenade-dsvm-octavia
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack-infra/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
git://git.openstack.org \
openstack-infra/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export PROJECTS="openstack-dev/grenade $PROJECTS"
export PROJECTS="openstack/octavia $PROJECTS"
export PROJECTS="openstack/octavia-tempest-plugin $PROJECTS"
export PROJECTS="openstack/python-octaviaclient $PROJECTS"
export DEVSTACK_PROJECT_FROM_GIT="python-octaviaclient $DEVSTACK_PROJECT_FROM_GIT"
export GRENADE_PLUGINRC="enable_grenade_plugin octavia https://git.openstack.org/openstack/octavia"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"export TEMPEST_PLUGINS='/opt/stack/new/octavia-tempest-plugin'"
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_GRENADE=pullup
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
# Add configuration values for enabling security features in local.conf
function pre_test_hook {
if [ -f /opt/stack/old/octavia-tempest-plugin/tools/pre_test_hook.sh ] ; then
. /opt/stack/old/octavia-tempest-plugin/tools/pre_test_hook.sh
fi
}
export -f pre_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

@ -110,3 +110,15 @@
amphora_os: centos
amphora_image_size: 3
amphora_os_release: 7
- job:
name: octavia-grenade
parent: octavia-legacy-dsvm-base
run: playbooks/legacy/grenade-devstack-octavia/run.yaml
post-run: playbooks/legacy/grenade-devstack-octavia/post.yaml
required-projects:
- openstack-dev/grenade
- openstack-infra/devstack-gate
- openstack/octavia
- openstack/octavia-tempest-plugin
- openstack/python-octaviaclient

@ -49,6 +49,19 @@
- octavia-v2-dsvm-py35-scenario:
voting: false
branches: ^(?!stable/ocata).*$
- octavia-grenade:
branches: ^(?!stable/(ocata|queens)).*$
voting: false
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^octavia/tests/unit/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
- ^(test-|)requirements.txt$
- ^tox.ini$
gate:
queue: octavia
jobs:

Loading…
Cancel
Save