From 52d7b0d62600b24f516070a4cc5f4194d5cffc76 Mon Sep 17 00:00:00 2001 From: lkuchlan Date: Mon, 7 Nov 2016 20:53:19 +0200 Subject: [PATCH] Add a generic "wait_for_volume_resource_status" function This function waits for a volume resource to reach a given status. The function is a common function for volume, snapshot and backup resources. Change-Id: I070a9f84b8b199df22765391482f7d69a5414db3 --- tempest/api/compute/admin/test_volume_swap.py | 8 +- tempest/api/compute/base.py | 10 +-- .../api/compute/servers/test_delete_server.py | 4 +- .../api/compute/volumes/test_attach_volume.py | 8 +- .../compute/volumes/test_volume_snapshots.py | 6 +- .../api/compute/volumes/test_volumes_get.py | 3 +- .../api/volume/admin/test_multi_backend.py | 4 +- .../api/volume/admin/test_volume_quotas.py | 2 +- tempest/api/volume/admin/test_volume_types.py | 8 +- .../api/volume/admin/test_volumes_backup.py | 19 +++-- .../volume/admin/v2/test_snapshot_manage.py | 6 +- .../api/volume/admin/v2/test_volumes_list.py | 4 +- tempest/api/volume/base.py | 20 ++--- tempest/api/volume/test_volume_transfers.py | 15 ++-- tempest/api/volume/test_volumes_actions.py | 21 +++-- tempest/api/volume/test_volumes_backup.py | 14 ++-- tempest/api/volume/test_volumes_extend.py | 4 +- tempest/api/volume/test_volumes_get.py | 8 +- tempest/common/compute.py | 5 +- tempest/common/waiters.py | 78 ++++++------------- tempest/exceptions.py | 9 +-- tempest/scenario/manager.py | 17 ++-- tempest/scenario/test_stamp_pattern.py | 8 +- tempest/scenario/test_volume_boot_pattern.py | 4 +- tempest/tests/common/test_waiters.py | 2 +- 25 files changed, 128 insertions(+), 159 deletions(-) diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py index 5f2444a0e2..d61efea230 100644 --- a/tempest/api/compute/admin/test_volume_swap.py +++ b/tempest/api/compute/admin/test_volume_swap.py @@ -60,10 +60,10 @@ class TestVolumeSwap(base.BaseV2ComputeAdminTest): # Swap volume from "volume1" to "volume2" self.servers_admin_client.update_attached_volume( server['id'], volume1['id'], volumeId=volume2['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume1['id'], 'available') - waiters.wait_for_volume_status(self.volumes_client, - volume2['id'], 'in-use') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume1['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume2['id'], 'in-use') self.addCleanup(self.servers_client.detach_volume, server['id'], volume2['id']) # Verify "volume2" is attached to the server diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py index c636894f80..5c7714e0f4 100644 --- a/tempest/api/compute/base.py +++ b/tempest/api/compute/base.py @@ -406,8 +406,8 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, kwargs['imageRef'] = image_ref volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) - waiters.wait_for_volume_status(cls.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(cls.volumes_client, + volume['id'], 'available') return volume @classmethod @@ -446,15 +446,15 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, # On teardown detach the volume and wait for it to be available. This # is so we don't error out when trying to delete the volume during # teardown. - self.addCleanup(waiters.wait_for_volume_status, + self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.detach_volume, server['id'], volume['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'in-use') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'in-use') class BaseV2ComputeAdminTest(BaseV2ComputeTest): diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py index 83b2e1b22d..8ed55e0730 100644 --- a/tempest/api/compute/servers/test_delete_server.py +++ b/tempest/api/compute/servers/test_delete_server.py @@ -115,8 +115,8 @@ class DeleteServersTestJSON(base.BaseV2ComputeTest): self.client.delete_server(server['id']) waiters.wait_for_server_termination(self.client, server['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest): diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py index cbe717852b..53049448e5 100644 --- a/tempest/api/compute/volumes/test_attach_volume.py +++ b/tempest/api/compute/volumes/test_attach_volume.py @@ -64,8 +64,8 @@ class AttachVolumeTestJSON(base.BaseV2ComputeTest): def _detach_volume(self, server_id, volume_id): try: self.servers_client.detach_volume(server_id, volume_id) - waiters.wait_for_volume_status(self.volumes_client, - volume_id, 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume_id, 'available') except lib_exc.NotFound: LOG.warning("Unable to detach volume %s from server %s " "possibly it was already detached", volume_id, @@ -78,8 +78,8 @@ class AttachVolumeTestJSON(base.BaseV2ComputeTest): kwargs.update({'device': '/dev/%s' % device}) attachment = self.servers_client.attach_volume( server_id, **kwargs)['volumeAttachment'] - waiters.wait_for_volume_status(self.volumes_client, - volume_id, 'in-use') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume_id, 'in-use') self.addCleanup(self._detach_volume, server_id, volume_id) diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py index 3d5d23bf67..4b068672cf 100644 --- a/tempest/api/compute/volumes/test_volume_snapshots.py +++ b/tempest/api/compute/volumes/test_volume_snapshots.py @@ -54,9 +54,9 @@ class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest): display_name=s_name)['snapshot'] def delete_snapshot(snapshot_id): - waiters.wait_for_snapshot_status(self.snapshots_client, - snapshot_id, - 'available') + waiters.wait_for_volume_resource_status(self.snapshots_client, + snapshot_id, + 'available') # Delete snapshot self.snapshots_client.delete_snapshot(snapshot_id) self.snapshots_client.wait_for_resource_deletion(snapshot_id) diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py index 63c247e674..0eaa359553 100644 --- a/tempest/api/compute/volumes/test_volumes_get.py +++ b/tempest/api/compute/volumes/test_volumes_get.py @@ -57,7 +57,8 @@ class VolumesGetTestJSON(base.BaseV2ComputeTest): self.assertIsNotNone(volume['id'], "Field volume id is empty or not found.") # Wait for Volume status to become ACTIVE - waiters.wait_for_volume_status(self.client, volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.client, volume['id'], + 'available') # GET Volume fetched_volume = self.client.show_volume(volume['id'])['volume'] # Verification of details of fetched Volume diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py index f68f19a9f1..0d30cb08be 100644 --- a/tempest/api/volume/admin/test_multi_backend.py +++ b/tempest/api/volume/admin/test_multi_backend.py @@ -74,8 +74,8 @@ class VolumeMultiBackendV2Test(base.BaseVolumeAdminTest): else: cls.volume_id_list_without_prefix.append( cls.volume['id']) - waiters.wait_for_volume_status(cls.admin_volume_client, - cls.volume['id'], 'available') + waiters.wait_for_volume_resource_status(cls.admin_volume_client, + cls.volume['id'], 'available') @classmethod def resource_cleanup(cls): diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py index 5a83ae3443..4d49f1e3e0 100644 --- a/tempest/api/volume/admin/test_volume_quotas.py +++ b/tempest/api/volume/admin/test_volume_quotas.py @@ -146,7 +146,7 @@ class BaseVolumeQuotasAdminV2TestJSON(base.BaseVolumeAdminTest): transfer_id, auth_key=auth_key)['transfer'] # Verify volume transferred is available - waiters.wait_for_volume_status( + waiters.wait_for_volume_resource_status( self.alt_client, volume['id'], 'available') # List of tenants quota usage post transfer diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py index 7938604463..205bd2dffe 100644 --- a/tempest/api/volume/admin/test_volume_types.py +++ b/tempest/api/volume/admin/test_volume_types.py @@ -58,14 +58,14 @@ class VolumeTypesV2Test(base.BaseVolumeAdminTest): "to the requested name") self.assertIsNotNone(volume['id'], "Field volume id is empty or not found.") - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') # Update volume with new volume_type self.volumes_client.retype_volume(volume['id'], new_type=volume_types[1]['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') # Get volume details and Verify fetched_volume = self.volumes_client.show_volume( diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py index 04d27ea087..13b7384303 100644 --- a/tempest/api/volume/admin/test_volumes_backup.py +++ b/tempest/api/volume/admin/test_volumes_backup.py @@ -94,8 +94,9 @@ class VolumesBackupsAdminV2Test(base.BaseVolumeAdminTest): self.addCleanup(self._delete_backup, new_id) self.assertIn("id", import_backup) self.assertEqual(new_id, import_backup['id']) - waiters.wait_for_backup_status(self.admin_backups_client, - import_backup['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_backups_client, + import_backup['id'], + 'available') # Verify Import Backup backups = self.admin_backups_client.list_backups( @@ -108,14 +109,16 @@ class VolumesBackupsAdminV2Test(base.BaseVolumeAdminTest): self.addCleanup(self.admin_volume_client.delete_volume, restore['volume_id']) self.assertEqual(backup['id'], restore['backup_id']) - waiters.wait_for_volume_status(self.admin_volume_client, - restore['volume_id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + restore['volume_id'], + 'available') # Verify if restored volume is there in volume list volumes = self.admin_volume_client.list_volumes()['volumes'] self.assertIn(restore['volume_id'], [v['id'] for v in volumes]) - waiters.wait_for_backup_status(self.admin_backups_client, - import_backup['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_backups_client, + import_backup['id'], + 'available') @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94') def test_volume_backup_reset_status(self): @@ -131,8 +134,8 @@ class VolumesBackupsAdminV2Test(base.BaseVolumeAdminTest): # Reset backup status to error self.admin_backups_client.reset_backup_status(backup_id=backup['id'], status="error") - waiters.wait_for_backup_status(self.admin_backups_client, - backup['id'], 'error') + waiters.wait_for_volume_resource_status(self.admin_backups_client, + backup['id'], 'error') class VolumesBackupsAdminV1Test(VolumesBackupsAdminV2Test): diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py index 111492428e..eed7dd13c5 100644 --- a/tempest/api/volume/admin/v2/test_snapshot_manage.py +++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py @@ -65,9 +65,9 @@ class SnapshotManageAdminV2Test(base.BaseVolumeAdminTest): self.admin_snapshots_client, new_snapshot['id']) # Wait for the snapshot to be available after manage operation - waiters.wait_for_snapshot_status(self.admin_snapshots_client, - new_snapshot['id'], - 'available') + waiters.wait_for_volume_resource_status(self.admin_snapshots_client, + new_snapshot['id'], + 'available') # Verify the managed snapshot has the expected parent volume self.assertEqual(new_snapshot['volume_id'], volume['id']) diff --git a/tempest/api/volume/admin/v2/test_volumes_list.py b/tempest/api/volume/admin/v2/test_volumes_list.py index b0a37fbcaf..6bab37319f 100644 --- a/tempest/api/volume/admin/v2/test_volumes_list.py +++ b/tempest/api/volume/admin/v2/test_volumes_list.py @@ -45,8 +45,8 @@ class VolumesListAdminV2TestJSON(base.BaseVolumeAdminTest): # Create a volume in admin tenant adm_vol = self.admin_volume_client.create_volume( size=CONF.volume.volume_size)['volume'] - waiters.wait_for_volume_status(self.admin_volume_client, - adm_vol['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + adm_vol['id'], 'available') self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id']) params = {'all_tenants': 1, 'project_id': self.volumes_client.tenant_id} diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py index 98e050e24a..f8c435f5b7 100644 --- a/tempest/api/volume/base.py +++ b/tempest/api/volume/base.py @@ -131,8 +131,8 @@ class BaseVolumeTest(tempest.test.BaseTestCase): volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) - waiters.wait_for_volume_status(cls.volumes_client, volume['id'], - wait_until) + waiters.wait_for_volume_resource_status(cls.volumes_client, + volume['id'], wait_until) return volume @classmethod @@ -146,8 +146,8 @@ class BaseVolumeTest(tempest.test.BaseTestCase): snapshot = cls.snapshots_client.create_snapshot( volume_id=volume_id, **kwargs)['snapshot'] cls.snapshots.append(snapshot) - waiters.wait_for_snapshot_status(cls.snapshots_client, - snapshot['id'], 'available') + waiters.wait_for_volume_resource_status(cls.snapshots_client, + snapshot['id'], 'available') return snapshot def create_backup(self, volume_id, backup_client=None, **kwargs): @@ -158,8 +158,8 @@ class BaseVolumeTest(tempest.test.BaseTestCase): backup = backup_client.create_backup( volume_id=volume_id, **kwargs)['backup'] self.addCleanup(backup_client.delete_backup, backup['id']) - waiters.wait_for_backup_status(backup_client, backup['id'], - 'available') + waiters.wait_for_volume_resource_status(backup_client, backup['id'], + 'available') return backup # NOTE(afazekas): these create_* and clean_* could be defined @@ -182,10 +182,10 @@ class BaseVolumeTest(tempest.test.BaseTestCase): self.servers_client.attach_volume( server_id, volumeId=volume_id, device='/dev/%s' % CONF.compute.volume_device_name) - waiters.wait_for_volume_status(self.volumes_client, - volume_id, 'in-use') - self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, - volume_id, 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume_id, 'in-use') + self.addCleanup(waiters.wait_for_volume_resource_status, + self.volumes_client, volume_id, 'available') self.addCleanup(self.servers_client.detach_volume, server_id, volume_id) diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py index 547777033c..9f63b140cb 100644 --- a/tempest/api/volume/test_volume_transfers.py +++ b/tempest/api/volume/test_volume_transfers.py @@ -43,8 +43,8 @@ class VolumesV2TransfersTest(base.BaseVolumeTest): volume_id=volume['id'])['transfer'] transfer_id = transfer['id'] auth_key = transfer['auth_key'] - waiters.wait_for_volume_status(self.client, - volume['id'], 'awaiting-transfer') + waiters.wait_for_volume_resource_status( + self.client, volume['id'], 'awaiting-transfer') # Get a volume transfer body = self.client.show_volume_transfer(transfer_id)['transfer'] @@ -58,8 +58,8 @@ class VolumesV2TransfersTest(base.BaseVolumeTest): # Accept a volume transfer by alt_tenant body = self.alt_client.accept_volume_transfer( transfer_id, auth_key=auth_key)['transfer'] - waiters.wait_for_volume_status(self.alt_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.alt_client, + volume['id'], 'available') @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30') def test_create_list_delete_volume_transfer(self): @@ -71,8 +71,8 @@ class VolumesV2TransfersTest(base.BaseVolumeTest): body = self.client.create_volume_transfer( volume_id=volume['id'])['transfer'] transfer_id = body['id'] - waiters.wait_for_volume_status(self.client, - volume['id'], 'awaiting-transfer') + waiters.wait_for_volume_resource_status( + self.client, volume['id'], 'awaiting-transfer') # List all volume transfers (looking for the one we created) body = self.client.list_volume_transfers()['transfers'] @@ -84,7 +84,8 @@ class VolumesV2TransfersTest(base.BaseVolumeTest): # Delete a volume transfer self.client.delete_volume_transfer(transfer_id) - waiters.wait_for_volume_status(self.client, volume['id'], 'available') + waiters.wait_for_volume_resource_status( + self.client, volume['id'], 'available') class VolumesV1TransfersTest(VolumesV2TransfersTest): diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py index c0cc74da8f..0a6901c14b 100644 --- a/tempest/api/volume/test_volumes_actions.py +++ b/tempest/api/volume/test_volumes_actions.py @@ -60,11 +60,11 @@ class VolumesV2ActionsTest(base.BaseVolumeTest): instance_uuid=server['id'], mountpoint='/dev/%s' % CONF.compute.volume_device_name) - waiters.wait_for_volume_status(self.client, - self.volume['id'], 'in-use') + waiters.wait_for_volume_resource_status(self.client, + self.volume['id'], 'in-use') self.client.detach_volume(self.volume['id']) - waiters.wait_for_volume_status(self.client, - self.volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.client, + self.volume['id'], 'available') @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599') def test_volume_bootable(self): @@ -91,11 +91,10 @@ class VolumesV2ActionsTest(base.BaseVolumeTest): instance_uuid=server['id'], mountpoint='/dev/%s' % CONF.compute.volume_device_name) - waiters.wait_for_volume_status(self.client, - self.volume['id'], 'in-use') - self.addCleanup(waiters.wait_for_volume_status, self.client, - self.volume['id'], - 'available') + waiters.wait_for_volume_resource_status(self.client, self.volume['id'], + 'in-use') + self.addCleanup(waiters.wait_for_volume_resource_status, self.client, + self.volume['id'], 'available') self.addCleanup(self.client.detach_volume, self.volume['id']) volume = self.client.show_volume(self.volume['id'])['volume'] self.assertIn('attachments', volume) @@ -124,8 +123,8 @@ class VolumesV2ActionsTest(base.BaseVolumeTest): self.image_client.delete_image, image_id) waiters.wait_for_image_status(self.image_client, image_id, 'active') - waiters.wait_for_volume_status(self.client, - self.volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.client, + self.volume['id'], 'available') @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33') def test_reserve_unreserve_volume(self): diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py index 939f1acf9c..e664ff70ca 100644 --- a/tempest/api/volume/test_volumes_backup.py +++ b/tempest/api/volume/test_volumes_backup.py @@ -40,11 +40,11 @@ class VolumesBackupsV2Test(base.BaseVolumeTest): self.addCleanup(self.volumes_client.delete_volume, restored_volume['volume_id']) self.assertEqual(backup_id, restored_volume['backup_id']) - waiters.wait_for_backup_status(self.backups_client, - backup_id, 'available') - waiters.wait_for_volume_status(self.volumes_client, - restored_volume['volume_id'], - 'available') + waiters.wait_for_volume_resource_status(self.backups_client, + backup_id, 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + restored_volume['volume_id'], + 'available') return restored_volume @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6') @@ -60,8 +60,8 @@ class VolumesBackupsV2Test(base.BaseVolumeTest): name=backup_name, description=description) self.assertEqual(backup_name, backup['name']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') # Get a given backup backup = self.backups_client.show_backup(backup['id'])['backup'] diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py index 2e1851ef57..79bce2c529 100644 --- a/tempest/api/volume/test_volumes_extend.py +++ b/tempest/api/volume/test_volumes_extend.py @@ -27,8 +27,8 @@ class VolumesV2ExtendTest(base.BaseVolumeTest): extend_size = int(volume['size']) + 1 self.volumes_client.extend_volume(volume['id'], new_size=extend_size) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') volume = self.volumes_client.show_volume(volume['id'])['volume'] self.assertEqual(int(volume['size']), extend_size) diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py index d1a1c2fb71..a3e46a8545 100644 --- a/tempest/api/volume/test_volumes_get.py +++ b/tempest/api/volume/test_volumes_get.py @@ -41,8 +41,8 @@ class VolumesV2GetTest(base.BaseVolumeTest): volume = self.volumes_client.create_volume(**kwargs)['volume'] self.assertIn('id', volume) self.addCleanup(self.delete_volume, self.volumes_client, volume['id']) - waiters.wait_for_volume_status(self.volumes_client, volume['id'], - 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') self.assertIn(name_field, volume) self.assertEqual(volume[name_field], v_name, "The created volume name is not equal " @@ -106,8 +106,8 @@ class VolumesV2GetTest(base.BaseVolumeTest): self.assertIn('id', new_volume) self.addCleanup(self.delete_volume, self.volumes_client, new_volume['id']) - waiters.wait_for_volume_status(self.volumes_client, - new_volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + new_volume['id'], 'available') params = {name_field: volume[name_field], descrip_field: volume[descrip_field]} diff --git a/tempest/common/compute.py b/tempest/common/compute.py index 55bc93e846..99da983615 100644 --- a/tempest/common/compute.py +++ b/tempest/common/compute.py @@ -124,8 +124,9 @@ def create_test_server(clients, validatable=False, validation_resources=None, 'imageRef': image_id, 'size': CONF.volume.volume_size} volume = volumes_client.create_volume(**params) - waiters.wait_for_volume_status(volumes_client, - volume['volume']['id'], 'available') + waiters.wait_for_volume_resource_status(volumes_client, + volume['volume']['id'], + 'available') bd_map_v2 = [{ 'uuid': volume['volume']['id'], diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py index 15619f43be..3e5600cea3 100644 --- a/tempest/common/waiters.py +++ b/tempest/common/waiters.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. - +import re import time from oslo_log import log as logging @@ -179,25 +179,33 @@ def wait_for_image_status(client, image_id, status): raise lib_exc.TimeoutException(message) -def wait_for_volume_status(client, volume_id, status): - """Waits for a Volume to reach a given status.""" - body = client.show_volume(volume_id)['volume'] - volume_status = body['status'] +def wait_for_volume_resource_status(client, resource_id, status): + """Waits for a volume resource to reach a given status. + + This function is a common function for volume, snapshot and backup + resources. The function extracts the name of the desired resource from + the client class name of the resource. + """ + resource_name = re.findall(r'(Volume|Snapshot|Backup)', + client.__class__.__name__)[0].lower() + show_resource = getattr(client, 'show_' + resource_name) + resource_status = show_resource(resource_id)[resource_name]['status'] start = int(time.time()) - while volume_status != status: + while resource_status != status: time.sleep(client.build_interval) - body = client.show_volume(volume_id)['volume'] - volume_status = body['status'] - if volume_status == 'error' and status != 'error': - raise exceptions.VolumeBuildErrorException(volume_id=volume_id) - if volume_status == 'error_restoring': - raise exceptions.VolumeRestoreErrorException(volume_id=volume_id) + resource_status = show_resource(resource_id)[ + '{}'.format(resource_name)]['status'] + if resource_status == 'error' and resource_status != status: + raise exceptions.VolumeResourceBuildErrorException( + resource_name=resource_name, resource_id=resource_id) + if resource_name == 'volume' and resource_status == 'error_restoring': + raise exceptions.VolumeRestoreErrorException(volume_id=resource_id) if int(time.time()) - start >= client.build_timeout: - message = ('Volume %s failed to reach %s status (current %s) ' + message = ('%s %s failed to reach %s status (current %s) ' 'within the required time (%s s).' % - (volume_id, status, volume_status, + (resource_name, resource_id, status, resource_status, client.build_timeout)) raise lib_exc.TimeoutException(message) @@ -221,48 +229,6 @@ def wait_for_volume_retype(client, volume_id, new_volume_type): raise lib_exc.TimeoutException(message) -def wait_for_snapshot_status(client, snapshot_id, status): - """Waits for a Snapshot to reach a given status.""" - body = client.show_snapshot(snapshot_id)['snapshot'] - snapshot_status = body['status'] - start = int(time.time()) - - while snapshot_status != status: - time.sleep(client.build_interval) - body = client.show_snapshot(snapshot_id)['snapshot'] - snapshot_status = body['status'] - if snapshot_status == 'error': - raise exceptions.SnapshotBuildErrorException( - snapshot_id=snapshot_id) - if int(time.time()) - start >= client.build_timeout: - message = ('Snapshot %s failed to reach %s status (current %s) ' - 'within the required time (%s s).' % - (snapshot_id, status, snapshot_status, - client.build_timeout)) - raise lib_exc.TimeoutException(message) - - -def wait_for_backup_status(client, backup_id, status): - """Waits for a Backup to reach a given status.""" - body = client.show_backup(backup_id)['backup'] - backup_status = body['status'] - start = int(time.time()) - - while backup_status != status: - time.sleep(client.build_interval) - body = client.show_backup(backup_id)['backup'] - backup_status = body['status'] - if backup_status == 'error' and backup_status != status: - raise lib_exc.VolumeBackupException(backup_id=backup_id) - - if int(time.time()) - start >= client.build_timeout: - message = ('Volume backup %s failed to reach %s status ' - '(current %s) within the required time (%s s).' % - (backup_id, status, backup_status, - client.build_timeout)) - raise lib_exc.TimeoutException(message) - - def wait_for_qos_operations(client, qos_id, operation, args=None): """Waits for a qos operations to be completed. diff --git a/tempest/exceptions.py b/tempest/exceptions.py index 45bbc11fb5..f48d7ac057 100644 --- a/tempest/exceptions.py +++ b/tempest/exceptions.py @@ -37,18 +37,15 @@ class AddImageException(exceptions.TempestException): message = "Image %(image_id)s failed to become ACTIVE in the allotted time" -class VolumeBuildErrorException(exceptions.TempestException): - message = "Volume %(volume_id)s failed to build and is in ERROR status" +class VolumeResourceBuildErrorException(exceptions.TempestException): + message = ("%(resource_name)s %(resource_id)s failed to build and is in " + "ERROR status") class VolumeRestoreErrorException(exceptions.TempestException): message = "Volume %(volume_id)s failed to restore and is in ERROR status" -class SnapshotBuildErrorException(exceptions.TempestException): - message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" - - class StackBuildErrorException(exceptions.TempestException): message = ("Stack %(stack_identifier)s is in %(stack_status)s status " "due to '%(stack_status_reason)s'") diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py index 6014c8cfc0..e670216384 100644 --- a/tempest/scenario/manager.py +++ b/tempest/scenario/manager.py @@ -241,8 +241,8 @@ class ScenarioTest(tempest.test.BaseTestCase): self.assertEqual(name, volume['display_name']) else: self.assertEqual(name, volume['name']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') # The volume retrieved on creation has a non-up-to-date status. # Retrieval after it becomes active ensures correct details. volume = self.volumes_client.show_volume(volume['id'])['volume'] @@ -481,8 +481,9 @@ class ScenarioTest(tempest.test.BaseTestCase): self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.snapshots_client.delete_snapshot, snapshot_id) - waiters.wait_for_snapshot_status(self.snapshots_client, - snapshot_id, 'available') + waiters.wait_for_volume_resource_status(self.snapshots_client, + snapshot_id, + 'available') image_name = snapshot_image['name'] self.assertEqual(name, image_name) LOG.debug("Created snapshot image %s for server %s", @@ -494,16 +495,16 @@ class ScenarioTest(tempest.test.BaseTestCase): server['id'], volumeId=volume_to_attach['id'], device='/dev/%s' % CONF.compute.volume_device_name)['volumeAttachment'] self.assertEqual(volume_to_attach['id'], volume['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'in-use') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'in-use') # Return the updated volume after the attachment return self.volumes_client.show_volume(volume['id'])['volume'] def nova_volume_detach(self, server, volume): self.servers_client.detach_volume(server['id'], volume['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') volume = self.volumes_client.show_volume(volume['id'])['volume'] self.assertEqual('available', volume['status']) diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py index 8661217132..ef9664d41b 100644 --- a/tempest/scenario/test_stamp_pattern.py +++ b/tempest/scenario/test_stamp_pattern.py @@ -64,10 +64,10 @@ class TestStampPattern(manager.ScenarioTest): self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) - waiters.wait_for_volume_status(self.volumes_client, - volume['id'], 'available') - waiters.wait_for_snapshot_status(self.snapshots_client, - snapshot['id'], 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.snapshots_client, + snapshot['id'], 'available') if 'display_name' in snapshot: self.assertEqual(snapshot_name, snapshot['display_name']) else: diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py index 43dcf9668b..9c33b716a6 100644 --- a/tempest/scenario/test_volume_boot_pattern.py +++ b/tempest/scenario/test_volume_boot_pattern.py @@ -82,8 +82,8 @@ class TestVolumeBootPattern(manager.ScenarioTest): self.addCleanup( self.snapshots_client.wait_for_resource_deletion, snap['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snap['id']) - waiters.wait_for_snapshot_status(self.snapshots_client, - snap['id'], 'available') + waiters.wait_for_volume_resource_status(self.snapshots_client, + snap['id'], 'available') # NOTE(e0ne): Cinder API v2 uses name instead of display_name if 'display_name' in snap: diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py index 46f9526e21..c2f622caba 100644 --- a/tempest/tests/common/test_waiters.py +++ b/tempest/tests/common/test_waiters.py @@ -66,7 +66,7 @@ class TestImageWaiters(base.TestCase): client.show_volume = mock_show volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa' self.assertRaises(exceptions.VolumeRestoreErrorException, - waiters.wait_for_volume_status, + waiters.wait_for_volume_resource_status, client, volume_id, 'available') mock_show.assert_has_calls([mock.call(volume_id), mock.call(volume_id)])