Browse Source

zuul: Replace grenade and nova-grenade-multinode with grenade-multinode

If2608406776e0d5a06b726e65b55881e70562d18 dropped the single node
grenade job from the integrated-gate-compute template as it duplicates
the existing grenade-multinode job. However it doesn't remove the
remianing single node grenade job still present in the Nova project.

This change replaces the dsvm based nova-grenade-multinode job with the
zuulv3 native grenade-multinode based job.

Various legacy playbooks and hook scripts are also removed as they are
no longer used. Note that this does result in a loss of coverage for
ceph that should be replaced as soon as a zuulv3 native ceph based
multinode job is available.

Conflicts:
    playbooks/legacy/nova-grenade-multinode/run.yaml

NOTE(melwitt): The conflict is because change
I51f0273e90ee39d644cf85a0bdb9d4f95de6d3c7 ([OVN] Explicitly set grenade
job to ML2/OVS) is not in Wallaby.

Change-Id: I02b2b851a74f24816d2f782a66d94de81ee527b0
(cherry picked from commit 91e53e4c2b)
changes/45/794345/1
Lee Yarwood 5 months ago
committed by melanie witt
parent
commit
c45bedd98d
  1. 32
      .zuul.yaml
  2. 208
      gate/live_migration/hooks/ceph.sh
  3. 50
      gate/live_migration/hooks/nfs.sh
  4. 72
      gate/live_migration/hooks/run_tests.sh
  5. 11
      gate/live_migration/hooks/utils.sh
  6. 15
      playbooks/legacy/nova-grenade-multinode/post.yaml
  7. 65
      playbooks/legacy/nova-grenade-multinode/run.yaml
  8. 15
      playbooks/legacy/nova-live-migration/post.yaml
  9. 60
      playbooks/legacy/nova-live-migration/run.yaml

32
.zuul.yaml

@ -287,22 +287,24 @@
- job:
name: nova-grenade-multinode
parent: nova-dsvm-multinode-base
parent: grenade-multinode
description: |
Multi-node grenade job which runs gate/live_migration/hooks tests under
python 3.
In other words, this tests live and cold migration and resize with
mixed-version compute services which is important for things like
rolling upgrade support.
Run a multinode grenade job and run the smoke, cold and live migration
tests with the controller upgraded and the compute on the older release.
The former names for this job were "nova-grenade-live-migration" and
"legacy-grenade-dsvm-neutron-multinode-live-migration".
run: playbooks/legacy/nova-grenade-multinode/run.yaml
post-run: playbooks/legacy/nova-grenade-multinode/post.yaml
required-projects:
- openstack/grenade
- openstack/devstack-gate
- openstack/nova
irrelevant-files: *dsvm-irrelevant-files
vars:
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
compute-feature-enabled:
live_migration: true
volume_backed_live_migration: true
block_migration_for_live_migration: true
block_migrate_cinder_iscsi: true
tox_envlist: all
tempest_test_regex: ((tempest\.(api\.compute|scenario)\..*smoke.*)|(^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)))
- job:
name: nova-multi-cell
@ -454,7 +456,6 @@
# so that we only run it on changes to networking and libvirt/vif
# code; we don't need to run this on all changes.
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- nova-grenade-multinode
- nova-live-migration
- nova-lvm
- nova-multi-cell
@ -481,7 +482,7 @@
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- grenade:
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *dsvm-irrelevant-files
@ -495,7 +496,6 @@
voting: false
gate:
jobs:
- nova-grenade-multinode
- nova-live-migration
- nova-tox-functional-py38
- nova-multi-cell
@ -510,7 +510,7 @@
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
- grenade:
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *dsvm-irrelevant-files

208
gate/live_migration/hooks/ceph.sh

@ -1,208 +0,0 @@
#!/bin/bash
function prepare_ceph {
git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
source /tmp/devstack-plugin-ceph/devstack/settings
source /tmp/devstack-plugin-ceph/devstack/lib/ceph
install_ceph
configure_ceph
#install ceph-common package and additional python3 ceph libraries on compute nodes
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m raw -a "executable=/bin/bash
USE_PYTHON3=${USE_PYTHON3:-True}
source $BASE/new/devstack/functions
source $BASE/new/devstack/functions-common
git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
source /tmp/devstack-plugin-ceph/devstack/lib/ceph
install_ceph_remote
"
#copy ceph admin keyring to compute nodes
sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring
sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring
sudo chmod 644 /tmp/ceph.client.admin.keyring
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=ceph group=ceph"
sudo rm -f /tmp/ceph.client.admin.keyring
#copy ceph.conf to compute nodes
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/etc/ceph/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root"
start_ceph
}
function _ceph_configure_glance {
GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=DEFAULT option=show_image_direct_url value=True"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=default_store value=rbd"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=stores value='file, http, rbd'"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_ceph_conf value=$CEPH_CONF_FILE"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_user value=$GLANCE_CEPH_USER"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_pool value=$GLANCE_CEPH_POOL"
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
#copy glance keyring to compute only node
sudo cp /etc/ceph/ceph.client.glance.keyring /tmp/ceph.client.glance.keyring
sudo chown $STACK_USER:$STACK_USER /tmp/ceph.client.glance.keyring
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.glance.keyring dest=/etc/ceph/ceph.client.glance.keyring"
sudo rm -f /tmp/ceph.client.glance.keyring
}
function configure_and_start_glance {
_ceph_configure_glance
echo 'check processes before glance-api stop'
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
# restart glance
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@g-api"
echo 'check processes after glance-api stop'
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
}
function _ceph_configure_nova {
#setup ceph for nova, we don't reuse configure_ceph_nova - as we need to emulate case where cinder is not configured for ceph
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
NOVA_CONF=${NOVA_CPU_CONF:-/etc/nova/nova.conf}
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_user value=${CINDER_CEPH_USER}"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_secret_uuid value=${CINDER_CEPH_UUID}"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_key value=false"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_partition value=-2"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=disk_cachemodes value='network=writeback'"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_type value=rbd"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_pool value=${NOVA_CEPH_POOL}"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_ceph_conf value=${CEPH_CONF_FILE}"
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
#copy cinder keyring to compute only node
sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring
sudo chown stack:stack /tmp/ceph.client.cinder.keyring
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring"
sudo rm -f /tmp/ceph.client.cinder.keyring
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
function _wait_for_nova_compute_service_state {
source $BASE/new/devstack/openrc admin admin
local status=$1
local attempt=1
local max_attempts=24
local attempt_sleep=5
local computes_count=$(openstack compute service list | grep -c nova-compute)
local computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l)
echo "Waiting for $computes_count computes to report as $status"
while [ "$computes_ready" -ne "$computes_count" ]; do
if [ "$attempt" -eq "$max_attempts" ]; then
echo "Failed waiting for computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${max_attempts} attempts"
exit 4
fi
echo "Waiting ${attempt_sleep} seconds for ${computes_count} computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${attempt}/${max_attempts} attempts"
sleep $attempt_sleep
attempt=$((attempt+1))
computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l)
done
echo "All computes are now reporting as ${status} after ${attempt} attempts"
}
function configure_and_start_nova {
echo "Checking all n-cpu services"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute"
# stop nova-compute
echo "Stopping all n-cpu services"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl stop devstack@n-cpu"
# Wait for the service to be marked as down
_wait_for_nova_compute_service_state "down"
_ceph_configure_nova
#import secret to libvirt
_populate_libvirt_secret
# start nova-compute
echo "Starting all n-cpu services"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl start devstack@n-cpu"
echo "Checking all n-cpu services"
# test that they are all running again
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute"
# Wait for the service to be marked as up
_wait_for_nova_compute_service_state "up"
}
function _ceph_configure_cinder {
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf}
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph"
}
function configure_and_start_cinder {
_ceph_configure_cinder
# restart cinder
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@c-vol"
source $BASE/new/devstack/openrc
export OS_USERNAME=admin
export OS_PROJECT_NAME=admin
lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}')
cinder type-delete $lvm_type
openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph
}
function _populate_libvirt_secret {
cat > /tmp/secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${CINDER_CEPH_UUID}</uuid>
<usage type='ceph'>
<name>client.${CINDER_CEPH_USER} secret</name>
</usage>
</secret>
EOF
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml"
local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
# TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed
secret=${secret//=/'\='}
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret"
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent"
}

50
gate/live_migration/hooks/nfs.sh

@ -1,50 +0,0 @@
#!/bin/bash
function nfs_setup {
if uses_debs; then
module=apt
elif is_fedora; then
module=yum
fi
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m $module \
-a "name=nfs-common state=present"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m $module \
-a "name=nfs-kernel-server state=present"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova"
for SUBNODE in $SUBNODES ; do
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'"
done
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted"
GetDistro
if [[ ! ${DISTRO} =~ (xenial) ]]; then
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted"
fi
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT"
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/"
}
function nfs_configure_tempest {
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
}
function nfs_verify_setup {
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch"
if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then
die $LINENO "NFS configuration failure"
fi
}
function nfs_teardown {
#teardown nfs shared storage
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/"
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped"
}

72
gate/live_migration/hooks/run_tests.sh

@ -1,72 +0,0 @@
#!/bin/bash
# Live migration dedicated ci job will be responsible for testing different
# environments based on underlying storage, used for ephemerals.
# This hook allows to inject logic of environment reconfiguration in ci job.
# Base scenario for this would be:
#
# 1. test with all local storage (use default for volumes)
# 2. test with NFS for root + ephemeral disks
# 3. test with Ceph for root + ephemeral disks
# 4. test with Ceph for volumes and root + ephemeral disk
set -xe
cd $BASE/new/tempest
source $BASE/new/devstack/functions
source $BASE/new/devstack/functions-common
source $BASE/new/devstack/lib/nova
source $WORKSPACE/devstack-gate/functions.sh
source $BASE/new/nova/gate/live_migration/hooks/utils.sh
source $BASE/new/nova/gate/live_migration/hooks/nfs.sh
source $BASE/new/nova/gate/live_migration/hooks/ceph.sh
primary_node=$(cat /etc/nodepool/primary_node_private)
SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
SERVICE_HOST=$primary_node
STACK_USER=${STACK_USER:-stack}
echo '1. test with all local storage (use default for volumes)'
echo 'NOTE: test_volume_backed_live_migration is skipped due to https://bugs.launchpad.net/nova/+bug/1524898'
echo 'NOTE: test_live_block_migration_paused is skipped due to https://bugs.launchpad.net/nova/+bug/1901739'
run_tempest "block migration test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration|test_live_block_migration_paused))"
# TODO(mriedem): Run $BASE/new/nova/gate/test_evacuate.sh for local storage
#all tests bellow this line use shared storage, need to update tempest.conf
echo 'disabling block_migration in tempest'
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
echo '2. NFS testing is skipped due to setup failures with Ubuntu 16.04'
#echo '2. test with NFS for root + ephemeral disks'
#nfs_setup
#nfs_configure_tempest
#nfs_verify_setup
#run_tempest "NFS shared storage test" "live_migration"
#nfs_teardown
# The nova-grenade-multinode job also runs resize and cold migration tests
# so we check for a grenade-only variable.
if [[ -n "$GRENADE_NEW_BRANCH" ]]; then
echo '3. test cold migration and resize'
run_tempest "cold migration and resize test" "test_resize_server|test_cold_migration|test_revert_cold_migration"
else
echo '3. cold migration and resize is skipped for non-grenade jobs'
fi
echo '4. test with Ceph for root + ephemeral disks'
# Discover and set variables for the OS version so the devstack-plugin-ceph
# scripts can find the correct repository to install the ceph packages.
GetOSVersion
USE_PYTHON3=${USE_PYTHON3:-True}
prepare_ceph
GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
configure_and_start_glance
configure_and_start_nova
run_tempest "Ceph nova&glance test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))"
set +e
#echo '5. test with Ceph for volumes and root + ephemeral disk'
#configure_and_start_cinder
#run_tempest "Ceph nova&glance&cinder test" "live_migration"

11
gate/live_migration/hooks/utils.sh

@ -1,11 +0,0 @@
#!/bin/bash
function run_tempest {
local message=$1
local tempest_regex=$2
sudo -H -u tempest tox -eall -- $tempest_regex --concurrency=$TEMPEST_CONCURRENCY
exitcode=$?
if [[ $exitcode -ne 0 ]]; then
die $LINENO "$message failure"
fi
}

15
playbooks/legacy/nova-grenade-multinode/post.yaml

@ -1,15 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

65
playbooks/legacy/nova-grenade-multinode/run.yaml

@ -1,65 +0,0 @@
- hosts: primary
name: nova-grenade-multinode
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PROJECTS="openstack/grenade $PROJECTS"
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_CONFIGDRIVE=0
export DEVSTACK_GATE_NEUTRON=1
# NOTE(mriedem): Run tempest smoke tests specific to compute on the
# new side of the grenade environment. The post-test hook script will
# run non-smoke migration tests in a local/block and shared/ceph
# setup. Note that grenade hard-codes "tox -esmoke" for tempest on
# the old side so the regex is not appied there.
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_REGEX="tempest\.(api\.compute|scenario)\..*smoke.*"
export DEVSTACK_GATE_GRENADE=pullup
export DEVSTACK_GATE_USE_PYTHON3=True
# By default grenade runs only smoke tests so we need to set
# RUN_SMOKE to False in order to run live migration tests using
# grenade
export DEVSTACK_LOCAL_CONFIG="RUN_SMOKE=False"
# LIVE_MIGRATE_BACK_AND_FORTH will tell Tempest to run a live
# migration of the same instance to one compute node and then back
# to the other, which is mostly only interesting for grenade since
# we have mixed level computes.
export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIVE_MIGRATE_BACK_AND_FORTH=True"
export BRANCH_OVERRIDE=default
export DEVSTACK_GATE_TOPOLOGY="multinode"
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
function post_test_hook {
/opt/stack/new/nova/gate/live_migration/hooks/run_tests.sh
}
export -f post_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

15
playbooks/legacy/nova-live-migration/post.yaml

@ -1,15 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

60
playbooks/legacy/nova-live-migration/run.yaml

@ -1,60 +0,0 @@
- hosts: primary
name: nova-live-migration
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- name: Configure devstack
shell:
# Force config drive.
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
FORCE_CONFIG_DRIVE=True
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_CONFIGDRIVE=0
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_NOTESTS=1
export DEVSTACK_GATE_TOPOLOGY="multinode"
export DEVSTACK_GATE_USE_PYTHON3=True
function post_test_hook {
/opt/stack/new/nova/gate/live_migration/hooks/run_tests.sh
$BASE/new/nova/gate/test_evacuate.sh
}
export -f post_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
Loading…
Cancel
Save