Optimize the resource creation and status waiting in scenario tests

In many scenario tests, resource either same or different services
are created serially which is ok at some extend as most of the
resource creation is async operation. But when we wait for resource
status serially for example volume to reach to 'avaiable' state, server
to become 'SSHABLE' then it add a lot of time in test execution.

This commit try to make resources creation and their status waiting
in parallel operation so that while we wait for volume to be reach in
'available' state we create server and same time we wait for server
to be ACTIVE or SSHABLE.

Related-Bug: #2004780
Change-Id: I30f2a44d94dc1e94aefeefecd4dc4e25bdda7a72
This commit is contained in:
Ghanshyam Mann 2023-07-21 14:09:40 -05:00
parent 98d402f3c3
commit 51c0f9a353
5 changed files with 73 additions and 14 deletions

View File

@ -1659,7 +1659,8 @@ class EncryptionScenarioTest(ScenarioTest):
def create_encrypted_volume(self, encryption_provider, volume_type,
key_size=256, cipher='aes-xts-plain64',
control_location='front-end'):
control_location='front-end',
wait_until='available'):
"""Creates an encrypted volume"""
volume_type = self.create_volume_type(name=volume_type)
self.create_encryption_type(type_id=volume_type['id'],
@ -1667,7 +1668,8 @@ class EncryptionScenarioTest(ScenarioTest):
key_size=key_size,
cipher=cipher,
control_location=control_location)
return self.create_volume(volume_type=volume_type['name'])
return self.create_volume(volume_type=volume_type['name'],
wait_until=wait_until)
class ObjectStorageScenarioTest(ScenarioTest):

View File

@ -16,6 +16,7 @@
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
@ -56,9 +57,16 @@ class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest):
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
"""LUKs v1 decrypts volume through libvirt."""
server = self.launch_instance()
volume = self.create_encrypted_volume('luks',
volume_type='luks')
volume_type='luks',
wait_until=None)
server = self.launch_instance()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.attach_detach_volume(server, volume)
@decorators.idempotent_id('7abec0a3-61a0-42a5-9e36-ad3138fb38b4')
@ -68,16 +76,30 @@ class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest):
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luksv2(self):
"""LUKs v2 decrypts volume through os-brick."""
server = self.launch_instance()
volume = self.create_encrypted_volume('luks2',
volume_type='luksv2')
volume_type='luksv2',
wait_until=None)
server = self.launch_instance()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.attach_detach_volume(server, volume)
@decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
@decorators.attr(type='slow')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
server = self.launch_instance()
volume = self.create_encrypted_volume('plain',
volume_type='cryptsetup')
volume_type='cryptsetup',
wait_until=None)
server = self.launch_instance()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.attach_detach_volume(server, volume)

View File

@ -14,6 +14,7 @@
# under the License.
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
@ -61,6 +62,7 @@ class TestServerMultinode(manager.ScenarioTest):
# threshold (so that things don't get crazy if you have 1000
# compute nodes but set min to 3).
servers = []
host_server_ids = {}
for host in hosts[:CONF.compute.min_compute_nodes]:
# by getting to active state here, this means this has
@ -68,12 +70,18 @@ class TestServerMultinode(manager.ScenarioTest):
# in order to use the availability_zone:host scheduler hint,
# admin client is need here.
inst = self.create_server(
wait_until=None,
clients=self.os_admin,
availability_zone='%(zone)s:%(host_name)s' % host)
host_server_ids[host['host_name']] = inst['id']
for host_name, server_id in host_server_ids.items():
waiters.wait_for_server_status(self.os_admin.servers_client,
server_id, 'ACTIVE')
server = self.os_admin.servers_client.show_server(
inst['id'])['server']
server_id)['server']
# ensure server is located on the requested host
self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
self.assertEqual(host_name, server['OS-EXT-SRV-ATTR:host'])
servers.append(server)
# make sure we really have the number of servers we think we should

View File

@ -69,11 +69,18 @@ class TestServerVolumeAttachmentScenario(BaseAttachmentTest):
@utils.services('compute', 'volume', 'image', 'network')
def test_server_detach_rules(self):
"""Test that various methods of detaching a volume honors the rules"""
volume = self.create_volume(wait_until=None)
volume2 = self.create_volume(wait_until=None)
server = self.create_server(wait_until='SSHABLE')
servers = self.servers_client.list_servers()['servers']
self.assertIn(server['id'], [x['id'] for x in servers])
volume = self.create_volume()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
volume = self.nova_volume_attach(server, volume)
self.addCleanup(self.nova_volume_detach, server, volume)
@ -143,7 +150,12 @@ class TestServerVolumeAttachmentScenario(BaseAttachmentTest):
volume['id'], connector=None, attachment_id=att_id)
# Test user call to detach with mismatch is rejected
volume2 = self.create_volume()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume2['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume2 = self.volumes_client.show_volume(volume2['id'])['volume']
volume2 = self.nova_volume_attach(server, volume2)
att_id2 = volume2['attachments'][0]['attachment_id']
self.assertRaises(

View File

@ -16,6 +16,7 @@
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@ -84,7 +85,7 @@ class TestStampPattern(manager.ScenarioTest):
security_group = self.create_security_group()
# boot an instance and create a timestamp file in it
volume = self.create_volume()
volume = self.create_volume(wait_until=None)
server = self.create_server(
key_name=keypair['name'],
security_groups=[{'name': security_group['name']}])
@ -97,6 +98,12 @@ class TestStampPattern(manager.ScenarioTest):
ip_for_server, private_key=keypair['private_key'],
server=server)
disks_list_before_attach = linux_client.list_disks()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.nova_volume_attach(server, volume)
volume_device_name = self._attached_volume_name(
disks_list_before_attach, ip_for_server, keypair['private_key'])
@ -115,7 +122,7 @@ class TestStampPattern(manager.ScenarioTest):
# create second volume from the snapshot(volume2)
volume_from_snapshot = self.create_volume(
snapshot_id=volume_snapshot['id'])
snapshot_id=volume_snapshot['id'], wait_until=None)
# boot second instance from the snapshot(instance2)
server_from_snapshot = self.create_server(
@ -135,6 +142,14 @@ class TestStampPattern(manager.ScenarioTest):
disks_list_before_attach = linux_client.list_disks()
# attach volume2 to instance2
waiters.wait_for_volume_resource_status(self.volumes_client,
volume_from_snapshot['id'],
'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume_from_snapshot = self.volumes_client.show_volume(
volume_from_snapshot['id'])['volume']
self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
volume_device_name = self._attached_volume_name(
disks_list_before_attach, ip_for_snapshot, keypair['private_key'])