Fix share server migration tests

This patch provides small fixes to the share server migration tests.

Change-Id: I4dcd677d53d56aad544d9a7e88b8160b704c241d
This commit is contained in:
debeltrami
2020-09-15 21:06:04 +00:00
committed by Andre Beltrami
parent 3b68c8174d
commit b3089a7ee3
4 changed files with 158 additions and 100 deletions

View File

@@ -262,7 +262,7 @@ ShareGroup = [
help="Defines whether to run tests that create share from "
"snapshots in another pool or az. Enable this "
"option if the used driver supports it."),
cfg.BoolOpt("run_share_servers_migration_tests",
cfg.BoolOpt("run_share_server_migration_tests",
default=False,
help="Defines whether to run share servers migration tests. "
"Enable this option if the used driver supports it."),

View File

@@ -36,9 +36,9 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
raise cls.skipException('%s tests are disabled.' % cls.protocol)
if not CONF.share.multitenancy_enabled:
raise cls.skipException('Multitenancy tests are disabled.')
if not CONF.share.run_share_servers_migration_tests:
if not CONF.share.run_share_server_migration_tests:
raise cls.skipException(
'Share servers migration tests are disabled.')
'Share server migration tests are disabled.')
utils.check_skip_if_microversion_lt('2.57')
@classmethod
@@ -58,6 +58,10 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
# create share type (generic)
cls.share_type = cls._create_share_type()
# create two non routable IPs to be used in NFS access rulesi
cls.access_rules_ip_rw = utils.rand_ip()
cls.access_rules_ip_ro = utils.rand_ip()
def _setup_migration(self, share):
"""Initial share server migration setup."""
@@ -66,11 +70,12 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
# (andrer) Verify if have at least one backend compatible with
# the specified share server.
dest_host, compatible = self._choose_matching_backend_for_share_server(
server_id)
dest_host, compatible = (
self._choose_compatible_backend_for_share_server(server_id))
snapshot = False
if compatible['supported_capabilities']['preserve_snapshots']:
if (compatible['supported_capabilities']['preserve_snapshots'] and
share['snapshot_support']):
snapshot = self.create_snapshot_wait_for_active(
share['id'], cleanup_in_class=False)['id']
@@ -85,35 +90,26 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
# (andrer) Create the access rules, considering NFS and CIFS
# protocols.
access_rules = self._get_access_rule_data_for_protocols()
self.shares_v2_client.create_access_rule(
share['id'], access_type=access_rules[0].get('access_type'),
access_to=access_rules[0].get('access_to'),
access_level=access_rules[0].get('access_level'))
for rule in access_rules:
self.shares_v2_client.create_access_rule(
share['id'], access_type=rule.get('access_type'),
access_to=rule.get('access_to'),
access_level=rule.get('access_level')
)
self.shares_v2_client.wait_for_share_status(
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
if self.protocol == 'nfs':
self.shares_v2_client.create_access_rule(
share['id'], access_type=access_rules[1].get('access_type'),
access_to=access_rules[1].get('access_to'),
access_level=access_rules[1].get('access_level'))
self.shares_v2_client.wait_for_share_status(
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
share = self.shares_v2_client.get_share(share['id'])
return share, server_id, dest_host, snapshot
def _validate_instances_states(self, share, instance_status,
snapshot_id):
def _validate_state_of_resources(self, share, expected_status,
snapshot_id):
"""Validates the share and snapshot status."""
statuses = ((instance_status,)
if not isinstance(instance_status, (tuple, list, set))
else instance_status)
statuses = ((expected_status,)
if not isinstance(expected_status, (tuple, list, set))
else expected_status)
share = self.shares_v2_client.get_share(share['id'])
self.assertIn(share['status'], statuses)
@@ -123,9 +119,8 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
self.assertIn(snapshot['status'], statuses)
def _validate_share_server_migration_complete(
self, share, dest_host, src_server_id, dest_server_id,
snapshot_id=None, share_network_id=None,
version=CONF.share.max_api_microversion):
self, share, dest_host, dest_server_id, snapshot_id=None,
share_network_id=None, version=CONF.share.max_api_microversion):
"""Validates the share server migration complete. """
# Check the export locations
@@ -165,12 +160,12 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
elif self.protocol == 'nfs':
expected_rules = [{
'state': constants.RULE_STATE_ACTIVE,
'access_to': '50.50.50.50',
'access_to': self.access_rules_ip_rw,
'access_type': 'ip',
'access_level': 'rw',
}, {
'state': constants.RULE_STATE_ACTIVE,
'access_to': '51.51.51.51',
'access_to': self.access_rules_ip_ro,
'access_type': 'ip',
'access_level': 'ro',
}]
@@ -186,7 +181,7 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
self.assertEqual(len(expected_rules), len(filtered_rules))
@classmethod
def _choose_matching_backend_for_share_server(self, server_id):
def _choose_compatible_backend_for_share_server(self, server_id):
"""Choose a compatible host for the share server migration."""
for backend in self.backends:
# This try is necessary since if you try migrate the share server
@@ -218,10 +213,11 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
return backend, compatibility
raise self.skipException(
"Not found incompatible host for the share server migration.")
"None of the hosts available are incompatible to perform a"
" negative share server migration test.")
def _get_share_server_destination_for_migration(self, src_server_id):
"""Find the destination share server choosed for the migration."""
"""Find the destination share server chosen for the migration."""
params = {'source_share_server_id': src_server_id,
'status': constants.STATUS_SERVER_MIGRATING_TO}
dest_server = self.admin_shares_v2_client.list_share_servers(
@@ -235,11 +231,11 @@ class MigrationShareServerBase(base.BaseSharesAdminTest):
if self.protocol == 'nfs':
return [{
'access_type': 'ip',
'access_to': '50.50.50.50',
'access_to': self.access_rules_ip_rw,
'access_level': 'rw',
}, {
'access_type': 'ip',
'access_to': '51.51.51.51',
'access_to': self.access_rules_ip_ro,
'access_level': 'ro',
}]
elif self.protocol == 'cifs':
@@ -279,9 +275,9 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
self.shares_v2_client.share_server_migration_start(
src_server_id, dest_host, preserve_snapshots=preserve_snapshots)
task_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
self.shares_v2_client.wait_for_share_server_status(
src_server_id, task_state, status_attr='task_state')
src_server_id, expected_state, status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -292,19 +288,19 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
# Validate the share instances status.
share_status = constants.STATUS_SERVER_MIGRATING
self._validate_instances_states(share, share_status, snapshot_id)
self._validate_state_of_resources(share, share_status, snapshot_id)
# Cancel the share server migration.
self.shares_v2_client.share_server_migration_cancel(src_server_id)
# Wait for the migration cancelled status.
task_state = constants.TASK_STATE_MIGRATION_CANCELLED
expected_state = constants.TASK_STATE_MIGRATION_CANCELLED
self.shares_v2_client.wait_for_share_server_status(
src_server_id, task_state, status_attr='task_state')
src_server_id, expected_state, status_attr='task_state')
# After the cancel operation, we need to validate again the resources.
share_status = constants.STATUS_AVAILABLE
self._validate_instances_states(share, share_status, snapshot_id)
expected_status = constants.STATUS_AVAILABLE
self._validate_state_of_resources(share, expected_status, snapshot_id)
@decorators.idempotent_id('99e439a8-a716-4205-bf5b-af50128cb908')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -342,9 +338,9 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
new_share_network_id=dest_share_network_id,
preserve_snapshots=preserve_snapshots)
task_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
self.shares_v2_client.wait_for_share_server_status(
src_server_id, task_state, status_attr='task_state')
src_server_id, expected_state, status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -354,24 +350,32 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
self.assertEqual(dest_share_network_id,
dest_server['share_network_id'])
share_status = constants.STATUS_SERVER_MIGRATING
self._validate_instances_states(share, share_status, snapshot_id)
expected_status = constants.STATUS_SERVER_MIGRATING
self._validate_state_of_resources(share, expected_status, snapshot_id)
# Share server migration complete.
self.shares_v2_client.share_server_migration_complete(src_server_id)
# It's necessary wait for the migration success state and
# active status.
task_state = [constants.TASK_STATE_MIGRATION_SUCCESS,
constants.SERVER_STATE_INACTIVE]
# It's necessary wait for the destination server went to active status.
expected_status = constants.SERVER_STATE_ACTIVE
self.shares_v2_client.wait_for_share_server_status(
src_server_id, task_state, status_attr='task_state')
dest_server_id, expected_status)
# Check if the source server went to inactive status if it exists.
try:
src_server = self.shares_v2_client.show_share_server(src_server_id)
except exceptions.NotFound:
src_server = None
if src_server:
self.assertEqual(
src_server['status'], constants.SERVER_STATE_INACTIVE)
# Validate the share server migration complete.
share = self.shares_v2_client.get_share(share['id'])
self._validate_share_server_migration_complete(
share, dest_host, src_server_id, dest_server_id,
snapshot_id=snapshot_id, share_network_id=dest_share_network_id)
share, dest_host, dest_server_id, snapshot_id=snapshot_id,
share_network_id=dest_share_network_id)
@decorators.idempotent_id('52e154eb-2d39-45af-b5c1-49ea569ab804')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -385,8 +389,9 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
# Find a backend compatible or not for the share server
# check compatibility operation.
if compatible:
dest_host, result = self._choose_matching_backend_for_share_server(
server_id=share['share_server_id'])
dest_host, result = (
self._choose_compatible_backend_for_share_server(
server_id=share['share_server_id']))
self.assertTrue(result['compatible'])
self.assertEqual(result['requested_capabilities']['host'],
dest_host)
@@ -394,10 +399,6 @@ class ShareServerMigrationBasicNFS(MigrationShareServerBase):
dest_host, result = (
self._choose_incompatible_backend_for_share_server(
server_id=share['share_server_id']))
if dest_host is None:
raise self.skipException(
"Not found any compatible destination host for the share "
"server %s." % share['share_server_id'])
self.assertFalse(result['compatible'])
self.assertEqual(result['requested_capabilities'].get('host'),
dest_host)

View File

@@ -29,26 +29,26 @@ CONF = config.CONF
class MigrationShareServerNegative(
test_share_servers_migration.MigrationShareServerBase):
protocool = None
protocol = None
@classmethod
def _setup_migration(self, cleanup_in_class=True):
def _setup_migration(cls, cleanup_in_class=True):
"""Setup migration for negative tests."""
extra_specs = {
'driver_handles_share_servers': CONF.share.multitenancy_enabled}
if CONF.share.capability_snapshot_support:
extra_specs['snapshot_support'] = True
share_type = self.create_share_type(
share_type = cls.create_share_type(
name=data_utils.rand_name("tempest-share-type"),
extra_specs=extra_specs,
cleanup_in_class=cleanup_in_class)
share = self.create_share(share_protocol=self.protocol,
share_type_id=share_type['share_type']['id'],
cleanup_in_class=cleanup_in_class)
share = self.shares_v2_client.get_share(share['id'])
share = cls.create_share(share_protocol=cls.protocol,
share_type_id=share_type['share_type']['id'],
cleanup_in_class=cleanup_in_class)
share = cls.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
dest_host, compatible = self._choose_matching_backend_for_share_server(
share_server_id)
dest_host, compatible = (
cls._choose_compatible_backend_for_share_server(share_server_id))
return share, share_server_id, dest_host
@@ -60,16 +60,11 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
@classmethod
def resource_setup(cls):
super(ShareServerMigrationInvalidParametersNFS, cls).resource_setup()
cls.share = cls.create_share(
share_protocol=cls.protocol,
share_type_id=cls.share_type['id'])
cls.share = cls.shares_v2_client.get_share(cls.share['id'])
cls.share_server_id = cls.share['share_server_id']
cls.fake_server_id = 'fake_server_id'
cls.fake_host = 'fake_host@fake_backend'
@decorators.idempotent_id('1be6ec2a-3118-4033-9cdb-ea6d199d97f4')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_share_server_invalid_server_migration_check(self):
"""Not found share server in migration check."""
self.assertRaises(lib_exc.NotFound,
@@ -78,7 +73,7 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
self.fake_host)
@decorators.idempotent_id('2aeffcfa-4e68-40e4-8a75-03b017503501')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_share_server_invalid_server_migration_cancel(self):
"""Not found share server in migration cancel."""
self.assertRaises(lib_exc.NotFound,
@@ -86,7 +81,7 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
self.fake_server_id)
@decorators.idempotent_id('52d23980-80e7-40de-8dba-1bb1382ef995')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_share_server_invalid_server_migration_start(self):
"""Not found share server in migration start."""
self.assertRaises(lib_exc.NotFound,
@@ -95,7 +90,7 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
self.fake_host)
@decorators.idempotent_id('47795631-eb50-424b-9fac-d2ee832cd01c')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_share_server_invalid_server_migration_get_progress(self):
"""Not found share server in migration get progress."""
self.assertRaises(
@@ -104,9 +99,9 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
self.fake_server_id)
@decorators.idempotent_id('3b464298-a4e4-417b-92d6-acfbd30ac45b')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_share_server_invalid_server_migration_complete(self):
"""Not found share server in migration """
"""Not found share server in migration complete."""
self.assertRaises(
lib_exc.NotFound,
self.shares_v2_client.share_server_migration_complete,
@@ -115,21 +110,65 @@ class ShareServerMigrationInvalidParametersNFS(MigrationShareServerNegative):
@decorators.idempotent_id('2d25cf84-0b5c-4a9f-ae20-9bec09bb6914')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_share_server_invalid_host_migration_start(self):
"""Not found share server in migration start."""
"""Invalid host in migration start."""
share = self.create_share(
share_protocol=self.protocol,
share_type_id=self.share_type['id'])
share = self.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.share_server_migration_start,
self.share_server_id,
share_server_id,
self.fake_host)
@decorators.idempotent_id('e7e2c19c-a0ed-41ab-b666-b2beae4a690c')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_share_server_invalid_host_migration_check(self):
"""Not found share server in migration check."""
"""Invalid host in migration check."""
share = self.create_share(
share_protocol=self.protocol,
share_type_id=self.share_type['id'])
share = self.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.share_server_migration_check,
self.share_server_id,
share_server_id,
self.fake_host)
@decorators.idempotent_id('f0d7a055-3b46-4d2b-9b96-1d719bd323e8')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_share_server_invalid_share_network_migration_start(self):
"""Invalid host in migration start."""
share = self.create_share(
share_protocol=self.protocol,
share_type_id=self.share_type['id'])
share = self.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
dest_host, _ = self._choose_compatible_backend_for_share_server(
share_server_id)
self.assertRaises(lib_exc.BadRequest,
self.shares_v2_client.share_server_migration_start,
share_server_id,
dest_host,
new_share_network_id='fake_share_net_id')
@decorators.idempotent_id('2617e714-7a8e-49a4-8109-beab3ea6527f')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_share_server_invalid_share_network_migration_check(self):
"""Invalid host in migration check."""
share = self.create_share(
share_protocol=self.protocol,
share_type_id=self.share_type['id'])
share = self.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
dest_host, _ = self._choose_compatible_backend_for_share_server(
share_server_id)
self.assertRaises(lib_exc.BadRequest,
self.shares_v2_client.share_server_migration_check,
share_server_id,
self.fake_host,
new_share_network_id='fake_share_net_id')
class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
protocol = "nfs"
@@ -142,9 +181,10 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
share_type_id=cls.share_type['id'])
cls.share = cls.shares_v2_client.get_share(cls.share['id'])
cls.share_server_id = cls.share['share_server_id']
cls.dest_host, _ = cls._choose_compatible_backend_for_share_server(
cls.share_server_id)
cls.shares_v2_client.share_server_reset_state(
cls.share_server_id, status=constants.STATUS_ERROR)
cls.fake_host = 'fake_host@fake_backend'
@decorators.idempotent_id('1f8d75c1-aa3c-465a-b2dd-9ad33933944f')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -153,7 +193,7 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
self.assertRaises(lib_exc.Conflict,
self.shares_v2_client.share_server_migration_check,
self.share_server_id,
self.fake_host)
self.dest_host)
@decorators.idempotent_id('c256c5f5-b4d1-47b7-a1f4-af21f19ce600')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -162,7 +202,7 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
self.assertRaises(lib_exc.Conflict,
self.shares_v2_client.share_server_migration_start,
self.share_server_id,
self.fake_host)
self.dest_host)
@decorators.idempotent_id('d2830fe4-8d13-40d2-b987-18d414bb6196')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -171,8 +211,7 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
self.assertRaises(
lib_exc.BadRequest,
self.shares_v2_client.share_server_migration_get_progress,
self.share_server_id,
self.fake_host)
self.share_server_id)
@decorators.idempotent_id('245f39d7-bcbc-4711-afd7-651a5535a880')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -180,8 +219,7 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
"""Share server migration cancel invalid operation."""
self.assertRaises(lib_exc.BadRequest,
self.shares_v2_client.share_server_migration_cancel,
self.share_server_id,
self.fake_host)
self.share_server_id)
@decorators.idempotent_id('3db45440-2c70-4fa4-b5eb-75e3cb0204f8')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -190,8 +228,7 @@ class ShareServerErrorStatusOperationNFS(MigrationShareServerNegative):
self.assertRaises(
lib_exc.BadRequest,
self.shares_v2_client.share_server_migration_complete,
self.share_server_id,
self.fake_host)
self.share_server_id)
class ShareServerMigrationStartNegativesNFS(MigrationShareServerNegative):
@@ -258,14 +295,34 @@ class ShareServerMigrationStartInvalidStatesNFS(MigrationShareServerNegative):
"""Try server migration start with invalid network."""
share, share_server_id, dest_host = self._setup_migration(
cleanup_in_class=False)
share_network = self.create_share_network(cleanup_in_class=False)
azs = self.get_availability_zones()
if len(azs) < 2:
raise self.skipException(
"Could not find the necessary azs. At least two azs are "
"needed to run this test.")
# In this test we'll attempt to start a migration to a share
# network that isn't available in the destination back ends's
# availability zone.
dest_host_az = self.get_availability_zones(backends=[dest_host])
if dest_host_az[0] != share['availability_zone']:
share_network_az = share['availability_zone']
else:
for az in azs:
if az != dest_host_az:
share_network_az = az
break
share_network = self.create_share_network(
client=self.shares_v2_client, cleanup_in_class=False,
availability_zone=share_network_az)
self.assertRaises(
lib_exc.ServerFault,
lib_exc.Conflict,
self.shares_v2_client.share_server_migration_start,
share_server_id,
dest_host,
new_share_network_id=share_network)
new_share_network_id=share_network['id'])
@decorators.idempotent_id('11374277-efcf-4992-ad94-c8f4a393d41b')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@@ -273,7 +330,7 @@ class ShareServerMigrationStartInvalidStatesNFS(MigrationShareServerNegative):
"""Try server migration start with invalid share state."""
share, share_server_id, dest_host = self._setup_migration(
cleanup_in_class=False)
self.shares_v2_client.reset_state(share['id'])
self.shares_v2_client.reset_state(share['id'], status='error')
self.assertRaises(
lib_exc.Conflict,
@@ -303,7 +360,7 @@ class ShareServerMigrationStartInvalidStatesNFS(MigrationShareServerNegative):
cleanup_in_class=False)
share = self.shares_v2_client.get_share(share['id'])
share_server_id = share['share_server_id']
dest_host, _ = self._choose_matching_backend_for_share_server(
dest_host, _ = self._choose_compatible_backend_for_share_server(
share_server_id)
self.create_share_replica(
share['id'],

View File

@@ -243,7 +243,7 @@
multitenancy_enabled: true
backend_names: LONDON,PARIS
multi_backend: true
run_share_servers_migration_tests: true
run_share_server_migration_tests: true
- job:
name: manila-tempest-plugin-generic
@@ -485,7 +485,7 @@
run_mount_snapshot_tests: true
run_replication_tests: true
run_revert_to_snapshot_tests: true
run_share_servers_migration_tests: true
run_share_server_migration_tests: true
- job:
name: manila-tempest-plugin-glusterfs-native