From 40771bc155fe28977c805aed5f60b5d84b1ac2bc Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 31 Jan 2019 17:36:47 -0500 Subject: [PATCH] Fix race in test_volume_swap_server_with_error Change I03c8e8225e51fd80580772752c0b292987e34218 added another notification to the list checked in test_volume_swap_server_with_error and then change Id587967ea4f9980c292492e2f659bf55fb037b28 relied on checking the last notification in the list (compute.exception) but per the note in the test, that last exception can race to show up by the time we check the size of the notification list which can result in an IndexError. This change fixes the race by just waiting for the compute.exception notification rather than the racy _wait_until_swap_volume_error which depended on the cinder fixture swap_error variable (which is removed in this change). Change-Id: I8165fcd98e11b9155640559a36ae90a38f63c0dd Closes-Bug: #1814177 (cherry picked from commit 03e4e3ce1357123f9f53a3901a17a64f160653a4) --- nova/tests/fixtures.py | 12 ++---------- .../notification_sample_tests/test_instance.py | 15 ++------------- 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py index d6bc38a2f5ad..7255355d2b26 100644 --- a/nova/tests/fixtures.py +++ b/nova/tests/fixtures.py @@ -1392,7 +1392,6 @@ class CinderFixture(fixtures.Fixture): def __init__(self, test): super(CinderFixture, self).__init__() self.test = test - self.swap_error = False self.swap_volume_instance_uuid = None self.swap_volume_instance_error_uuid = None self.reserved_volumes = list() @@ -1501,11 +1500,6 @@ class CinderFixture(fixtures.Fixture): if volume_id in self.reserved_volumes: self.reserved_volumes.remove(volume_id) - # Signaling that swap_volume has encountered the error - # from initialize_connection and is working on rolling back - # the reservation on SWAP_ERR_NEW_VOL. - self.swap_error = True - def fake_attach(_self, context, volume_id, instance_uuid, mountpoint, mode='rw'): # Check to see if the volume is already attached to any server. @@ -1588,7 +1582,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture): def __init__(self, test): super(CinderFixtureNewAttachFlow, self).__init__() self.test = test - self.swap_error = False self.swap_volume_instance_uuid = None self.swap_volume_instance_error_uuid = None self.attachment_error_id = None @@ -1732,9 +1725,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture): _, attachment, attachments = _find_attachment(attachment_id) attachments.remove(attachment) - if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID: - self.swap_error = True - def fake_attachment_update(_self, context, attachment_id, connector, mountpoint=None): # Ensure the attachment exists @@ -1745,6 +1735,8 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture): {'foo': 'bar', 'target_lun': '1'}}} if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID: + # This intentionally triggers a TypeError for the + # instance.volume_swap.error versioned notification tests. attachment_ref = {'connection_info': ()} return attachment_ref diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py index 127fc928a3b3..4db2ef596975 100644 --- a/nova/tests/functional/notification_sample_tests/test_instance.py +++ b/nova/tests/functional/notification_sample_tests/test_instance.py @@ -334,13 +334,6 @@ class TestInstanceNotificationSample( time.sleep(0.5) self.fail('Volume swap operation failed.') - def _wait_until_swap_volume_error(self): - for i in range(50): - if self.cinder.swap_error: - return - time.sleep(0.5) - self.fail("Timed out waiting for volume swap error to occur.") - def test_instance_action(self): # A single test case is used to test most of the instance action # notifications to avoid booting up an instance for every action @@ -1397,13 +1390,9 @@ class TestInstanceNotificationSample( self._volume_swap_server(server, self.cinder.SWAP_ERR_OLD_VOL, self.cinder.SWAP_ERR_NEW_VOL) - self._wait_until_swap_volume_error() + self._wait_for_notification('compute.exception') - # Seven versioned notifications are generated. We only rely on the - # first six because _wait_until_swap_volume_error will return True - # after volume_api.unreserve is called on the cinder fixture, and that - # happens before the instance fault is handled in the compute manager - # which generates the last notification (compute.exception). + # Eight versioned notifications are generated. # 0. instance-create-start # 1. instance-create-end # 2. instance-update