From beca6c147ce67f7d7c417070ae921106625a2354 Mon Sep 17 00:00:00 2001 From: yogesh Date: Thu, 23 Feb 2017 14:50:18 -0500 Subject: [PATCH] Fix internal tempest tests Existing methods - wait_for_volume_status, wait_for_snapshot_status and wait_for_backup_status were combined into wait_for_volume_resource_status. This patch is intended to fix the issue occured due to above change. Change-Id: Iaf1fd19a3fdf50aec8aaddffdcbaf6d0466fbd35 Closes-bug:1667448 --- .../api/volume/test_consistencygroups.py | 32 +++++++++---------- .../tempest/api/volume/test_volume_backup.py | 23 +++++++------ .../tempest/api/volume/test_volume_unicode.py | 6 ++-- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/cinder/tests/tempest/api/volume/test_consistencygroups.py b/cinder/tests/tempest/api/volume/test_consistencygroups.py index d11770de008..55115e4e3a0 100644 --- a/cinder/tests/tempest/api/volume/test_consistencygroups.py +++ b/cinder/tests/tempest/api/volume/test_consistencygroups.py @@ -88,8 +88,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_status(self.admin_volume_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) @@ -131,8 +131,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_status(self.admin_volume_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) @@ -147,8 +147,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): detail=True)['snapshots'] for snap in snapshots: if volume['id'] == snap['volume_id']: - waiters.wait_for_snapshot_status(self.admin_snapshots_client, - snap['id'], 'available') + waiters.wait_for_volume_resource_status( + self.admin_snapshots_client, snap['id'], 'available') self.consistencygroups_adm_client.wait_for_cgsnapshot_status( cgsnapshot['id'], 'available') self.assertEqual(cgsnapshot_name, cgsnapshot['name']) @@ -191,8 +191,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_status(self.admin_volume_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) @@ -207,8 +207,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): detail=True)['snapshots'] for snap in snapshots: if volume['id'] == snap['volume_id']: - waiters.wait_for_snapshot_status(self.admin_snapshots_client, - snap['id'], 'available') + waiters.wait_for_volume_resource_status( + self.admin_snapshots_client, snap['id'], 'available') self.consistencygroups_adm_client.wait_for_cgsnapshot_status( cgsnapshot['id'], 'available') self.assertEqual(cgsnapshot_name, cgsnapshot['name']) @@ -223,8 +223,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): detail=True)['volumes'] for vol in vols: if vol['consistencygroup_id'] == cg2['id']: - waiters.wait_for_volume_status(self.admin_volume_client, - vol['id'], 'available') + waiters.wait_for_volume_resource_status( + self.admin_volume_client, vol['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg2['id'], 'available') self.assertEqual(cg_name2, cg2['name']) @@ -257,8 +257,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_status(self.admin_volume_client, - volume['id'], 'available') + waiters.wait_for_volume_resource_status(self.admin_volume_client, + volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) @@ -273,8 +273,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): detail=True)['volumes'] for vol in vols: if vol['consistencygroup_id'] == cg2['id']: - waiters.wait_for_volume_status(self.admin_volume_client, - vol['id'], 'available') + waiters.wait_for_volume_resource_status( + self.admin_volume_client, vol['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg2['id'], 'available') self.assertEqual(cg_name2, cg2['name']) diff --git a/cinder/tests/tempest/api/volume/test_volume_backup.py b/cinder/tests/tempest/api/volume/test_volume_backup.py index dc4090b2d5f..72c32ff9b13 100644 --- a/cinder/tests/tempest/api/volume/test_volume_backup.py +++ b/cinder/tests/tempest/api/volume/test_volume_backup.py @@ -55,7 +55,7 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest): # Get a given backup backup = self.backups_client.show_backup( backup['id'])['backup'] - waiters.wait_for_backup_status( + waiters.wait_for_volume_resource_status( self.backups_client, backup['id'], 'available') self.assertEqual(volume['id'], backup['volume_id']) @@ -78,17 +78,17 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest): backup = self.backups_client.create_backup( volume_id=src_vol['id'])['backup'] self.addCleanup(self.backups_client.delete_backup, backup['id']) - waiters.wait_for_backup_status( + waiters.wait_for_volume_resource_status( self.backups_client, backup['id'], 'available') # Restore to existing volume restore = self.backups_client.restore_backup( backup_id=backup['id'], volume_id=src_vol['id'])['restore'] - waiters.wait_for_backup_status( + waiters.wait_for_volume_resource_status( self.backups_client, backup['id'], 'available') - waiters.wait_for_volume_status( + waiters.wait_for_volume_resource_status( self.volumes_client, src_vol['id'], 'available') self.assertEqual(src_vol['id'], restore['volume_id']) @@ -106,9 +106,8 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest): # Create backup backup = self.backups_client.create_backup( volume_id=volume['id'])['backup'] - waiters.wait_for_backup_status(self.backups_client, - backup['id'], - 'available') + waiters.wait_for_volume_resource_status(self.backups_client, + backup['id'], 'available') # Create a server bd_map = [{'volume_id': volume['id'], 'delete_on_termination': '0'}] @@ -122,15 +121,15 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest): # Delete VM self.servers_client.delete_server(server['id']) # Create incremental backup - waiters.wait_for_volume_status(self.volumes_client, volume['id'], - 'available') + waiters.wait_for_volume_resource_status(self.volumes_client, + volume['id'], 'available') backup_incr = self.backups_client.create_backup( volume_id=volume['id'], incremental=True)['backup'] - waiters.wait_for_backup_status(self.backups_client, - backup_incr['id'], - 'available') + waiters.wait_for_volume_resource_status(self.backups_client, + backup_incr['id'], + 'available') is_incremental = self.backups_client.show_backup( backup_incr['id'])['backup']['is_incremental'] diff --git a/cinder/tests/tempest/api/volume/test_volume_unicode.py b/cinder/tests/tempest/api/volume/test_volume_unicode.py index d2615d1981d..6d3cc4f4ada 100644 --- a/cinder/tests/tempest/api/volume/test_volume_unicode.py +++ b/cinder/tests/tempest/api/volume/test_volume_unicode.py @@ -45,9 +45,9 @@ class CinderUnicodeTest(volume_base.BaseVolumeTest): volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) - waiters.wait_for_volume_status(cls.volumes_client, - volume['id'], - 'available') + waiters.wait_for_volume_resource_status(cls.volumes_client, + volume['id'], + 'available') return volume