Fix internal tempest tests
Existing methods - wait_for_volume_status, wait_for_snapshot_status and wait_for_backup_status were combined into wait_for_volume_resource_status. This patch is intended to fix the issue occured due to above change. Change-Id: Iaf1fd19a3fdf50aec8aaddffdcbaf6d0466fbd35 Closes-bug:1667448
This commit is contained in:
parent
bf20ff6c36
commit
beca6c147c
@ -88,7 +88,7 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
@ -131,7 +131,7 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
@ -147,8 +147,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['snapshots']
|
||||
for snap in snapshots:
|
||||
if volume['id'] == snap['volume_id']:
|
||||
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
|
||||
snap['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_snapshots_client, snap['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
|
||||
cgsnapshot['id'], 'available')
|
||||
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
|
||||
@ -191,7 +191,7 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
@ -207,8 +207,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['snapshots']
|
||||
for snap in snapshots:
|
||||
if volume['id'] == snap['volume_id']:
|
||||
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
|
||||
snap['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_snapshots_client, snap['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
|
||||
cgsnapshot['id'], 'available')
|
||||
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
|
||||
@ -223,8 +223,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['volumes']
|
||||
for vol in vols:
|
||||
if vol['consistencygroup_id'] == cg2['id']:
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
vol['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_volume_client, vol['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg2['id'], 'available')
|
||||
self.assertEqual(cg_name2, cg2['name'])
|
||||
@ -257,7 +257,7 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
@ -273,8 +273,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['volumes']
|
||||
for vol in vols:
|
||||
if vol['consistencygroup_id'] == cg2['id']:
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
vol['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_volume_client, vol['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg2['id'], 'available')
|
||||
self.assertEqual(cg_name2, cg2['name'])
|
||||
|
@ -55,7 +55,7 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Get a given backup
|
||||
backup = self.backups_client.show_backup(
|
||||
backup['id'])['backup']
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
self.assertEqual(volume['id'], backup['volume_id'])
|
||||
@ -78,17 +78,17 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
backup = self.backups_client.create_backup(
|
||||
volume_id=src_vol['id'])['backup']
|
||||
self.addCleanup(self.backups_client.delete_backup, backup['id'])
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
# Restore to existing volume
|
||||
restore = self.backups_client.restore_backup(
|
||||
backup_id=backup['id'],
|
||||
volume_id=src_vol['id'])['restore']
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
waiters.wait_for_volume_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.volumes_client,
|
||||
src_vol['id'], 'available')
|
||||
self.assertEqual(src_vol['id'], restore['volume_id'])
|
||||
@ -106,9 +106,8 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Create backup
|
||||
backup = self.backups_client.create_backup(
|
||||
volume_id=volume['id'])['backup']
|
||||
waiters.wait_for_backup_status(self.backups_client,
|
||||
backup['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(self.backups_client,
|
||||
backup['id'], 'available')
|
||||
# Create a server
|
||||
bd_map = [{'volume_id': volume['id'],
|
||||
'delete_on_termination': '0'}]
|
||||
@ -122,13 +121,13 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Delete VM
|
||||
self.servers_client.delete_server(server['id'])
|
||||
# Create incremental backup
|
||||
waiters.wait_for_volume_status(self.volumes_client, volume['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(self.volumes_client,
|
||||
volume['id'], 'available')
|
||||
backup_incr = self.backups_client.create_backup(
|
||||
volume_id=volume['id'],
|
||||
incremental=True)['backup']
|
||||
|
||||
waiters.wait_for_backup_status(self.backups_client,
|
||||
waiters.wait_for_volume_resource_status(self.backups_client,
|
||||
backup_incr['id'],
|
||||
'available')
|
||||
|
||||
|
@ -45,7 +45,7 @@ class CinderUnicodeTest(volume_base.BaseVolumeTest):
|
||||
volume = cls.volumes_client.create_volume(**kwargs)['volume']
|
||||
cls.volumes.append(volume)
|
||||
|
||||
waiters.wait_for_volume_status(cls.volumes_client,
|
||||
waiters.wait_for_volume_resource_status(cls.volumes_client,
|
||||
volume['id'],
|
||||
'available')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user