Fix internal tempest tests
Existing methods - wait_for_volume_status, wait_for_snapshot_status and wait_for_backup_status were combined into wait_for_volume_resource_status. This patch is intended to fix the issue occured due to above change. Change-Id: Iaf1fd19a3fdf50aec8aaddffdcbaf6d0466fbd35 Closes-bug:1667448
This commit is contained in:
parent
bf20ff6c36
commit
beca6c147c
@ -88,8 +88,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
self.assertEqual(cg_name, cg['name'])
|
||||
@ -131,8 +131,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
self.assertEqual(cg_name, cg['name'])
|
||||
@ -147,8 +147,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['snapshots']
|
||||
for snap in snapshots:
|
||||
if volume['id'] == snap['volume_id']:
|
||||
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
|
||||
snap['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_snapshots_client, snap['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
|
||||
cgsnapshot['id'], 'available')
|
||||
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
|
||||
@ -191,8 +191,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
self.assertEqual(cg_name, cg['name'])
|
||||
@ -207,8 +207,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['snapshots']
|
||||
for snap in snapshots:
|
||||
if volume['id'] == snap['volume_id']:
|
||||
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
|
||||
snap['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_snapshots_client, snap['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
|
||||
cgsnapshot['id'], 'available')
|
||||
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
|
||||
@ -223,8 +223,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['volumes']
|
||||
for vol in vols:
|
||||
if vol['consistencygroup_id'] == cg2['id']:
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
vol['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_volume_client, vol['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg2['id'], 'available')
|
||||
self.assertEqual(cg_name2, cg2['name'])
|
||||
@ -257,8 +257,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
|
||||
# Create volume
|
||||
volume = self.admin_volume_client.create_volume(**params)['volume']
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(self.admin_volume_client,
|
||||
volume['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg['id'], 'available')
|
||||
self.assertEqual(cg_name, cg['name'])
|
||||
@ -273,8 +273,8 @@ class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
|
||||
detail=True)['volumes']
|
||||
for vol in vols:
|
||||
if vol['consistencygroup_id'] == cg2['id']:
|
||||
waiters.wait_for_volume_status(self.admin_volume_client,
|
||||
vol['id'], 'available')
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.admin_volume_client, vol['id'], 'available')
|
||||
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
|
||||
cg2['id'], 'available')
|
||||
self.assertEqual(cg_name2, cg2['name'])
|
||||
|
@ -55,7 +55,7 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Get a given backup
|
||||
backup = self.backups_client.show_backup(
|
||||
backup['id'])['backup']
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
self.assertEqual(volume['id'], backup['volume_id'])
|
||||
@ -78,17 +78,17 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
backup = self.backups_client.create_backup(
|
||||
volume_id=src_vol['id'])['backup']
|
||||
self.addCleanup(self.backups_client.delete_backup, backup['id'])
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
# Restore to existing volume
|
||||
restore = self.backups_client.restore_backup(
|
||||
backup_id=backup['id'],
|
||||
volume_id=src_vol['id'])['restore']
|
||||
waiters.wait_for_backup_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.backups_client,
|
||||
backup['id'], 'available')
|
||||
waiters.wait_for_volume_status(
|
||||
waiters.wait_for_volume_resource_status(
|
||||
self.volumes_client,
|
||||
src_vol['id'], 'available')
|
||||
self.assertEqual(src_vol['id'], restore['volume_id'])
|
||||
@ -106,9 +106,8 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Create backup
|
||||
backup = self.backups_client.create_backup(
|
||||
volume_id=volume['id'])['backup']
|
||||
waiters.wait_for_backup_status(self.backups_client,
|
||||
backup['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(self.backups_client,
|
||||
backup['id'], 'available')
|
||||
# Create a server
|
||||
bd_map = [{'volume_id': volume['id'],
|
||||
'delete_on_termination': '0'}]
|
||||
@ -122,15 +121,15 @@ class VolumesBackupsTest(volume_base.BaseVolumeTest):
|
||||
# Delete VM
|
||||
self.servers_client.delete_server(server['id'])
|
||||
# Create incremental backup
|
||||
waiters.wait_for_volume_status(self.volumes_client, volume['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(self.volumes_client,
|
||||
volume['id'], 'available')
|
||||
backup_incr = self.backups_client.create_backup(
|
||||
volume_id=volume['id'],
|
||||
incremental=True)['backup']
|
||||
|
||||
waiters.wait_for_backup_status(self.backups_client,
|
||||
backup_incr['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(self.backups_client,
|
||||
backup_incr['id'],
|
||||
'available')
|
||||
|
||||
is_incremental = self.backups_client.show_backup(
|
||||
backup_incr['id'])['backup']['is_incremental']
|
||||
|
@ -45,9 +45,9 @@ class CinderUnicodeTest(volume_base.BaseVolumeTest):
|
||||
volume = cls.volumes_client.create_volume(**kwargs)['volume']
|
||||
cls.volumes.append(volume)
|
||||
|
||||
waiters.wait_for_volume_status(cls.volumes_client,
|
||||
volume['id'],
|
||||
'available')
|
||||
waiters.wait_for_volume_resource_status(cls.volumes_client,
|
||||
volume['id'],
|
||||
'available')
|
||||
|
||||
return volume
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user