From 7581e99a057c3ec5390cd26bcda48a4785d8d476 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 1 Oct 2018 11:33:34 -0400 Subject: [PATCH] Enable volume multiattach tests in tempest-full/slow jobs The volume multiattach tests originally required special devstack configuration in Queens for the Ubuntu Cloud Archive, but that is no longer necessary. This change enables the volume multiattach tests in the tempest-full(-py3) and tempest-slow jobs so we can drop the nova-multiattach job, which is mostly redundant coverage of the tempest.api.compute.* tests, and reduce the total number of jobs we run against nova/cinder/tempest changes. Due to intermittent bug 1807723 when running the test_volume_swap_with_multiattach test with two compute services, that test is conditionally skipped if there is more than one compute. This is probably no great loss in test coverage for now given swapping multiattach volumes is likely rarely used (see bug 1775418). Related ML thread: http://lists.openstack.org/pipermail/openstack-dev/2018-October/135299.html Depends-On: https://review.openstack.org/634977 Change-Id: I522a15ba3dbfee5d8ef417e43288a12319abf6ff --- .zuul.yaml | 9 ++++----- .../enable-volume-multiattach-fd5e9bf0e96b56ce.yaml | 10 ++++++++++ tempest/api/compute/admin/test_volume_swap.py | 7 +++++++ 3 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml diff --git a/.zuul.yaml b/.zuul.yaml index 8576455cb2..6eaa2a5387 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -99,6 +99,7 @@ tox_envlist: full devstack_localrc: ENABLE_FILE_INJECTION: true + ENABLE_VOLUME_MULTIATTACH: true - job: name: tempest-full-oslo-master @@ -161,6 +162,7 @@ devstack_localrc: USE_PYTHON3: true FORCE_CONFIG_DRIVE: true + ENABLE_VOLUME_MULTIATTACH: true devstack_services: s-account: false s-container: false @@ -276,6 +278,7 @@ tox_envlist: slow-serial devstack_localrc: CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2 + ENABLE_VOLUME_MULTIATTACH: true tempest_concurrency: 2 - job: @@ -490,7 +493,7 @@ - ^playbooks/ - ^roles/ - ^.zuul.yaml$ - - nova-multiattach: + - tempest-full-parallel: # Define list of irrelevant files to use everywhere else irrelevant-files: &tempest-irrelevant-files - ^(test-|)requirements.txt$ @@ -502,8 +505,6 @@ - ^tempest/hacking/.*$ - ^tempest/tests/.*$ - ^tools/.*$ - - tempest-full-parallel: - irrelevant-files: *tempest-irrelevant-files - tempest-full-py3: irrelevant-files: *tempest-irrelevant-files - tempest-full-py3-ipv6: @@ -579,8 +580,6 @@ irrelevant-files: *tempest-irrelevant-files gate: jobs: - - nova-multiattach: - irrelevant-files: *tempest-irrelevant-files - tempest-slow-py3: irrelevant-files: *tempest-irrelevant-files - neutron-grenade-multinode: diff --git a/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml new file mode 100644 index 0000000000..0959b22bc7 --- /dev/null +++ b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The ``tempest-full``, ``tempest-full-py3`` and ``tempest-slow`` zuul v3 + job configurations now set ``ENABLE_VOLUME_MULTIATTACH: true`` in the + ``devstack_localrc`` variables section. If you have a plugin job + configuration that inherits from one of these jobs and the backend cinder + volume driver or nova compute driver do not support volume multiattach then + you should override and set this variable to + ``ENABLE_VOLUME_MULTIATTACH: false`` in your job configuration. diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py index 6b589392de..cc83c04590 100644 --- a/tempest/api/compute/admin/test_volume_swap.py +++ b/tempest/api/compute/admin/test_volume_swap.py @@ -148,6 +148,13 @@ class TestMultiAttachVolumeSwap(TestVolumeSwapBase): # so it's marked as such. @decorators.attr(type='slow') @decorators.idempotent_id('e8f8f9d1-d7b7-4cd2-8213-ab85ef697b6e') + # For some reason this test intermittently fails on teardown when there are + # multiple compute nodes and the servers are split across the computes. + # For now, just skip this test if there are multiple computes. + # Alternatively we could put the servers in an affinity group if there are + # multiple computes but that would just side-step the underlying bug. + @decorators.skip_because(bug='1807723', + condition=CONF.compute.min_compute_nodes > 1) @utils.services('volume') def test_volume_swap_with_multiattach(self): # Create two volumes.