Import from tempest stable manager

This patch removes the copy of tempest scenario manager
and directly imports the manager from tempest which has been
marked stable in tempest 27.0.0 release.

Change-Id: I403d5bc2eef464e6071e466aeab8e040b9055568
This commit is contained in:
Rajat Dhasmana 2021-05-12 06:23:45 -04:00
parent 961bab0212
commit 638f2308a1
4 changed files with 156 additions and 1156 deletions

File diff suppressed because it is too large Load Diff

View File

@ -14,125 +14,17 @@
# under the License. # under the License.
from tempest.common import utils from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from cinder_tempest_plugin.scenario import manager from cinder_tempest_plugin.scenario import manager
CONF = config.CONF
class SnapshotDataIntegrityTests(manager.ScenarioTest): class SnapshotDataIntegrityTests(manager.ScenarioTest):
def setUp(self): def setUp(self):
super(SnapshotDataIntegrityTests, self).setUp() super(SnapshotDataIntegrityTests, self).setUp()
self.keypair = self.create_keypair() self.keypair = self.create_keypair()
self.security_group = self._create_security_group() self.security_group = self.create_security_group()
def _attached_volume_name(
self, disks_list_before_attach, ip_address, private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
def _wait_for_volume_available_on_system():
disks_list_after_attach = ssh.list_disks()
return len(disks_list_after_attach) > len(disks_list_before_attach)
if not test_utils.call_until_true(_wait_for_volume_available_on_system,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise lib_exc.TimeoutException
disks_list_after_attach = ssh.list_disks()
volume_name = [item for item in disks_list_after_attach
if item not in disks_list_before_attach][0]
return volume_name
def _get_file_md5(self, ip_address, filename, dev_name=None,
mount_path='/mnt', private_key=None, server=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
if dev_name is not None:
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
md5_sum = ssh_client.exec_command(
'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return md5_sum
def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None, server=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
if dev_name is not None:
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
# We subtract 2 from the count since `wc -l` also includes the count
# of new line character and while creating the filesystem, a
# lost+found folder is also created
return int(count) - 2
def _make_fs(self, ip_address, private_key, server, dev_name, fs='ext4'):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
ssh_client.make_fs(dev_name, fs=fs)
def create_md5_new_file(self, ip_address, filename, dev_name=None,
mount_path='/mnt', private_key=None, server=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
if dev_name is not None:
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
ssh_client.exec_command(
'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
(mount_path, filename))
md5 = ssh_client.exec_command(
'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
ssh_client.exec_command('sudo sync')
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return md5
def get_md5_from_file(self, instance, instance_ip, filename,
dev_name=None):
md5_sum = self._get_file_md5(instance_ip, filename=filename,
dev_name=dev_name,
private_key=self.keypair['private_key'],
server=instance)
count = self._count_files(instance_ip, dev_name=dev_name,
private_key=self.keypair['private_key'],
server=instance)
return count, md5_sum
def _attach_and_get_volume_device_name(self, server, volume, instance_ip,
private_key):
ssh_client = self.get_remote_client(
instance_ip, private_key=private_key,
server=server)
# List disks before volume attachment
disks_list_before_attach = ssh_client.list_disks()
# Attach volume
volume = self.nova_volume_attach(server, volume)
# Find the difference between disks before and after attachment that
# gives us the volume device name
volume_device_name = self._attached_volume_name(
disks_list_before_attach, instance_ip, private_key)
return volume_device_name
@decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61') @decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61')
@utils.services('compute', 'volume', 'image', 'network') @utils.services('compute', 'volume', 'image', 'network')
@ -164,7 +56,7 @@ class SnapshotDataIntegrityTests(manager.ScenarioTest):
instance_ip = self.get_server_ip(server) instance_ip = self.get_server_ip(server)
# Attach volume to instance and find it's device name (eg: /dev/vdb) # Attach volume to instance and find it's device name (eg: /dev/vdb)
volume_device_name = self._attach_and_get_volume_device_name( volume_device_name, __ = self._attach_and_get_volume_device_name(
server, volume, instance_ip, self.keypair['private_key']) server, volume, instance_ip, self.keypair['private_key'])
# Create filesystem on the volume # Create filesystem on the volume
@ -204,7 +96,7 @@ class SnapshotDataIntegrityTests(manager.ScenarioTest):
# Create volume from snapshot, attach it to instance and check file # Create volume from snapshot, attach it to instance and check file
# and contents for snap1 # and contents for snap1
volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id']) volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id'])
volume_device_name = self._attach_and_get_volume_device_name( volume_device_name, __ = self._attach_and_get_volume_device_name(
server, volume_snap_1, instance_ip, self.keypair['private_key']) server, volume_snap_1, instance_ip, self.keypair['private_key'])
count_snap_1, md5_file_1 = self.get_md5_from_file( count_snap_1, md5_file_1 = self.get_md5_from_file(
server, instance_ip, 'file1', dev_name=volume_device_name) server, instance_ip, 'file1', dev_name=volume_device_name)
@ -217,7 +109,7 @@ class SnapshotDataIntegrityTests(manager.ScenarioTest):
# Create volume from snapshot, attach it to instance and check file # Create volume from snapshot, attach it to instance and check file
# and contents for snap2 # and contents for snap2
volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id']) volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id'])
volume_device_name = self._attach_and_get_volume_device_name( volume_device_name, __ = self._attach_and_get_volume_device_name(
server, volume_snap_2, instance_ip, self.keypair['private_key']) server, volume_snap_2, instance_ip, self.keypair['private_key'])
count_snap_2, md5_file_2 = self.get_md5_from_file( count_snap_2, md5_file_2 = self.get_md5_from_file(
server, instance_ip, 'file2', dev_name=volume_device_name) server, instance_ip, 'file2', dev_name=volume_device_name)
@ -230,7 +122,7 @@ class SnapshotDataIntegrityTests(manager.ScenarioTest):
# Create volume from snapshot, attach it to instance and check file # Create volume from snapshot, attach it to instance and check file
# and contents for snap3 # and contents for snap3
volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id']) volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id'])
volume_device_name = self._attach_and_get_volume_device_name( volume_device_name, __ = self._attach_and_get_volume_device_name(
server, volume_snap_3, instance_ip, self.keypair['private_key']) server, volume_snap_3, instance_ip, self.keypair['private_key'])
count_snap_3, md5_file_3 = self.get_md5_from_file( count_snap_3, md5_file_3 = self.get_md5_from_file(
server, instance_ip, 'file3', dev_name=volume_device_name) server, instance_ip, 'file3', dev_name=volume_device_name)

View File

@ -16,7 +16,7 @@ from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from cinder_tempest_plugin.scenario import manager from tempest.scenario import manager
CONF = config.CONF CONF = config.CONF
@ -130,7 +130,7 @@ class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest,
""" """
keypair = self.create_keypair() keypair = self.create_keypair()
security_group = self._create_security_group() security_group = self.create_security_group()
volume = self.create_encrypted_volume_from_image('luks') volume = self.create_encrypted_volume_from_image('luks')

View File

@ -6,4 +6,4 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
oslo.config>=5.1.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0
six>=1.10.0 # MIT six>=1.10.0 # MIT
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
tempest>=17.1.0 # Apache-2.0 tempest>=27.0.0 # Apache-2.0