Improve test coverage for share migration
- Added tests to validate the migration functionality for a replicated share. - Added tests to validate the extend and shrink functionality on migrated share. - Added tests to cover the preserve snapshot functionality on migrated share. - Current waiter logic in migration_progress method of dummy driver causes every migration test to take over 4 minutes to complete. Therefore, updated the method to cut the overall time in half and avoid the possible timeout. Partially-Implements: bp ocata-migration-improvements Change-Id: Ia7c7eb7b48b464aa670073e8c0795369bd972678
This commit is contained in:
parent
c54367035d
commit
301abe6257
@ -77,6 +77,7 @@ RUN_MANILA_REPLICATION_TESTS=${RUN_MANILA_REPLICATION_TESTS:-False}
|
||||
RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS=${RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS:-False}
|
||||
RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS=${RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS:-False}
|
||||
RUN_MANILA_MOUNT_SNAPSHOT_TESTS=${RUN_MANILA_MOUNT_SNAPSHOT_TESTS:-False}
|
||||
RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS=${RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS:-False}
|
||||
|
||||
MANILA_CONF=${MANILA_CONF:-/etc/manila/manila.conf}
|
||||
|
||||
@ -216,6 +217,7 @@ elif [[ "$DRIVER" == "dummy" ]]; then
|
||||
RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS=True
|
||||
RUN_MANILA_REVERT_TO_SNAPSHOT_TESTS=True
|
||||
RUN_MANILA_MOUNT_SNAPSHOT_TESTS=True
|
||||
RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS=True
|
||||
iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs'
|
||||
iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols 'cifs'
|
||||
iniset $TEMPEST_CONFIG share enable_cert_rules_for_protocols ''
|
||||
@ -270,6 +272,7 @@ iniset $TEMPEST_CONFIG share run_replication_tests $RUN_MANILA_REPLICATION_TESTS
|
||||
# Enable migration tests
|
||||
iniset $TEMPEST_CONFIG share run_host_assisted_migration_tests $RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS
|
||||
iniset $TEMPEST_CONFIG share run_driver_assisted_migration_tests $RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS
|
||||
iniset $TEMPEST_CONFIG share run_migration_with_preserve_snapshots_tests $RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS
|
||||
|
||||
# Enable mountable snapshots tests
|
||||
iniset $TEMPEST_CONFIG share run_mount_snapshot_tests $RUN_MANILA_MOUNT_SNAPSHOT_TESTS
|
||||
|
@ -78,7 +78,7 @@ dummy_opts = [
|
||||
"update_replicated_snapshot": "1.17",
|
||||
|
||||
"migration_start": 1.01,
|
||||
"migration_continue": 1.02, # it will be called 4 times
|
||||
"migration_continue": 1.02, # it will be called 2 times
|
||||
"migration_complete": 1.03,
|
||||
"migration_cancel": 1.04,
|
||||
"migration_get_progress": 1.05,
|
||||
@ -548,7 +548,7 @@ class DummyDriver(driver.ShareDriver):
|
||||
if source_share["id"] not in self.migration_progress:
|
||||
self.migration_progress[source_share["id"]] = 0
|
||||
|
||||
self.migration_progress[source_share["id"]] += 25
|
||||
self.migration_progress[source_share["id"]] += 50
|
||||
|
||||
LOG.debug(
|
||||
"Migration of dummy share with ID '%s' is continuing, %s." %
|
||||
|
@ -14,6 +14,8 @@
|
||||
STATUS_ERROR = 'error'
|
||||
STATUS_AVAILABLE = 'available'
|
||||
STATUS_ERROR_DELETING = 'error_deleting'
|
||||
STATUS_MIGRATING = 'migrating'
|
||||
|
||||
TEMPEST_MANILA_PREFIX = 'tempest-manila'
|
||||
|
||||
# Replication
|
||||
|
@ -192,6 +192,10 @@ ShareGroup = [
|
||||
deprecated_name="run_migration_tests",
|
||||
default=False,
|
||||
help="Enable or disable driver-assisted migration tests."),
|
||||
cfg.BoolOpt("run_migration_with_preserve_snapshots_tests",
|
||||
default=False,
|
||||
help="Enable or disable migration with "
|
||||
"preserve_snapshots tests set to True."),
|
||||
cfg.BoolOpt("run_manage_unmanage_tests",
|
||||
default=False,
|
||||
help="Defines whether to run manage/unmanage tests or not. "
|
||||
|
@ -493,6 +493,15 @@ class SharesV2Client(shares_client.SharesClient):
|
||||
self.expected_success(200, resp.status)
|
||||
return self._parse_resp(body)
|
||||
|
||||
def list_snapshots_for_share(self, share_id, detailed=False,
|
||||
version=LATEST_MICROVERSION):
|
||||
"""Get list of snapshots for given share."""
|
||||
uri = ('snapshots/detail?share_id=%s' % share_id
|
||||
if detailed else 'snapshots?share_id=%s' % share_id)
|
||||
resp, body = self.get(uri, version=version)
|
||||
self.expected_success(200, resp.status)
|
||||
return self._parse_resp(body)
|
||||
|
||||
def list_snapshots_with_detail(self, params=None,
|
||||
version=LATEST_MICROVERSION):
|
||||
"""Get detailed list of share snapshots w/o filters."""
|
||||
|
@ -17,6 +17,7 @@
|
||||
import ddt
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
import testtools
|
||||
from testtools import testcase as tc
|
||||
|
||||
from manila_tempest_tests.common import constants
|
||||
@ -63,6 +64,11 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
if not (CONF.share.run_host_assisted_migration_tests or
|
||||
CONF.share.run_driver_assisted_migration_tests):
|
||||
raise cls.skipException("Share migration tests are disabled.")
|
||||
cls.pools = cls.shares_v2_client.list_pools(detail=True)['pools']
|
||||
|
||||
if len(cls.pools) < 2:
|
||||
raise cls.skipException("At least two different pool entries are "
|
||||
"needed to run share migration tests.")
|
||||
|
||||
cls.new_type = cls.create_share_type(
|
||||
name=data_utils.rand_name('new_share_type_for_migration'),
|
||||
@ -82,7 +88,10 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
|
||||
self._check_migration_enabled(force_host_assisted)
|
||||
|
||||
share, dest_pool = self._setup_migration()
|
||||
share = self.create_share(self.protocol)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
share, dest_pool = self._setup_migration(share)
|
||||
|
||||
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
|
||||
if force_host_assisted
|
||||
@ -119,10 +128,6 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
|
||||
self._check_migration_enabled(force_host_assisted)
|
||||
|
||||
share, dest_pool = self._setup_migration(opposite=True)
|
||||
|
||||
old_share_network_id = share['share_network_id']
|
||||
|
||||
# If currently configured is DHSS=False,
|
||||
# then we need it for DHSS=True
|
||||
if not CONF.share.multitenancy_enabled:
|
||||
@ -136,6 +141,12 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
else:
|
||||
new_share_network_id = None
|
||||
|
||||
share = self.create_share(self.protocol)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
share, dest_pool = self._setup_migration(share, opposite=True)
|
||||
|
||||
old_share_network_id = share['share_network_id']
|
||||
old_share_type_id = share['share_type']
|
||||
new_share_type_id = self.new_type_opposite['share_type']['id']
|
||||
|
||||
@ -179,22 +190,15 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
|
||||
self._check_migration_enabled(force_host_assisted)
|
||||
|
||||
share, dest_pool = self._setup_migration()
|
||||
share = self.create_share(self.protocol)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
|
||||
if force_host_assisted
|
||||
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
|
||||
share, dest_pool = self._setup_migration(share)
|
||||
|
||||
old_share_network_id = share['share_network_id']
|
||||
|
||||
if CONF.share.multitenancy_enabled:
|
||||
new_share_network_id = self._create_secondary_share_network(
|
||||
old_share_network_id)
|
||||
else:
|
||||
new_share_network_id = None
|
||||
|
||||
old_share_type_id = share['share_type']
|
||||
new_share_type_id = self.new_type['share_type']['id']
|
||||
task_state, new_share_network_id, new_share_type_id = (
|
||||
self._get_migration_data(share, force_host_assisted))
|
||||
|
||||
share = self.migrate_share(
|
||||
share['id'], dest_pool,
|
||||
@ -224,18 +228,111 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS,
|
||||
complete=True, share_network_id=new_share_network_id,
|
||||
share_type_id=new_share_type_id)
|
||||
self._cleanup_share(share)
|
||||
|
||||
def _setup_migration(self, opposite=False):
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_extend_tests,
|
||||
'Extend share tests are disabled.')
|
||||
@ddt.data(True, False)
|
||||
def test_extend_on_migrated_share(self, force_host_assisted):
|
||||
self._test_resize_post_migration(force_host_assisted, resize='extend')
|
||||
|
||||
pools = self.shares_v2_client.list_pools(detail=True)['pools']
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_shrink_tests,
|
||||
'Shrink share tests are disabled.')
|
||||
@ddt.data(True, False)
|
||||
def test_shrink_on_migrated_share(self, force_host_assisted):
|
||||
self._test_resize_post_migration(force_host_assisted, resize='shrink')
|
||||
|
||||
if len(pools) < 2:
|
||||
raise self.skipException("At least two different pool entries are "
|
||||
"needed to run share migration tests.")
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_snapshot_tests,
|
||||
'Snapshot tests are disabled.')
|
||||
@testtools.skipUnless(CONF.share.run_driver_assisted_migration_tests,
|
||||
'Driver-assisted migration tests are disabled.')
|
||||
@testtools.skipUnless(
|
||||
CONF.share.run_migration_with_preserve_snapshots_tests,
|
||||
'Migration with preserve snapshots tests are disabled.')
|
||||
def test_migrating_share_with_snapshot(self):
|
||||
ss_type, __ = self._create_share_type_for_snapshot_capability()
|
||||
|
||||
share = self.create_share(self.protocol, cleanup_in_class=False)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
share, dest_pool = self._setup_migration(share)
|
||||
snapshot1 = self.create_snapshot_wait_for_active(
|
||||
share['id'], cleanup_in_class=False)
|
||||
snapshot2 = self.create_snapshot_wait_for_active(
|
||||
share['id'], cleanup_in_class=False)
|
||||
|
||||
task_state, new_share_network_id, __ = self._get_migration_data(share)
|
||||
|
||||
share = self.migrate_share(
|
||||
share['id'], dest_pool,
|
||||
wait_for_status=task_state,
|
||||
new_share_type_id=ss_type['share_type']['id'],
|
||||
new_share_network_id=new_share_network_id, preserve_snapshots=True)
|
||||
|
||||
share = self.migration_complete(share['id'], dest_pool)
|
||||
|
||||
self._validate_snapshot(share, snapshot1, snapshot2)
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_snapshot_tests,
|
||||
'Snapshot tests are disabled.')
|
||||
@testtools.skipUnless(CONF.share.run_driver_assisted_migration_tests,
|
||||
'Driver-assisted migration tests are disabled.')
|
||||
@testtools.skipUnless(
|
||||
CONF.share.run_migration_with_preserve_snapshots_tests,
|
||||
'Migration with preserve snapshots tests are disabled.')
|
||||
def test_migration_cancel_share_with_snapshot(self):
|
||||
share = self.create_share(self.protocol)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
share, dest_pool = self._setup_migration(share)
|
||||
snapshot1 = self.create_snapshot_wait_for_active(share['id'])
|
||||
snapshot2 = self.create_snapshot_wait_for_active(share['id'])
|
||||
|
||||
task_state, new_share_network_id, new_share_type_id = (
|
||||
self._get_migration_data(share))
|
||||
|
||||
share = self.migrate_share(
|
||||
share['id'], dest_pool,
|
||||
wait_for_status=task_state, new_share_type_id=new_share_type_id,
|
||||
new_share_network_id=new_share_network_id, preserve_snapshots=True)
|
||||
|
||||
share = self.migration_cancel(share['id'], dest_pool)
|
||||
self._validate_snapshot(share, snapshot1, snapshot2)
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_snapshot_tests,
|
||||
'Snapshot tests are disabled.')
|
||||
@ddt.data(True, False)
|
||||
def test_migrate_share_to_snapshot_capability_share_type(
|
||||
self, force_host_assisted):
|
||||
# Verify that share with no snapshot support type can be migrated
|
||||
# to new share type which supports the snapshot
|
||||
self._validate_share_migration_with_different_snapshot_capability_type(
|
||||
force_host_assisted, True)
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@testtools.skipUnless(CONF.share.run_snapshot_tests,
|
||||
'Snapshot tests are disabled.')
|
||||
@ddt.data(True, False)
|
||||
def test_migrate_share_to_no_snapshot_capability_share_type(
|
||||
self, force_host_assisted):
|
||||
# Verify that share with snapshot support type can be migrated
|
||||
# to new share type which doesn't support the snapshot
|
||||
self._validate_share_migration_with_different_snapshot_capability_type(
|
||||
force_host_assisted, False)
|
||||
|
||||
def _setup_migration(self, share, opposite=False):
|
||||
|
||||
old_exports = self.shares_v2_client.list_share_export_locations(
|
||||
share['id'])
|
||||
self.assertNotEmpty(old_exports)
|
||||
@ -262,7 +359,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
else:
|
||||
dest_type = self.new_type['share_type']
|
||||
|
||||
dest_pool = utils.choose_matching_backend(share, pools, dest_type)
|
||||
dest_pool = utils.choose_matching_backend(share, self.pools, dest_type)
|
||||
|
||||
if opposite:
|
||||
if not dest_pool:
|
||||
@ -274,6 +371,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
self.assertIsNotNone(dest_pool.get('name'))
|
||||
|
||||
dest_pool = dest_pool['name']
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
return share, dest_pool
|
||||
|
||||
@ -325,10 +423,6 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
self.assertIn(r, filtered_rules)
|
||||
self.assertEqual(len(expected_rules), len(filtered_rules))
|
||||
|
||||
self.shares_v2_client.delete_share(share['id'])
|
||||
self.shares_v2_client.wait_for_resource_deletion(
|
||||
share_id=share['id'])
|
||||
|
||||
# Share not migrated yet
|
||||
else:
|
||||
self.assertNotEqual(dest_pool, share['host'])
|
||||
@ -355,3 +449,144 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
|
||||
neutron_subnet_id=old_share_network['neutron_subnet_id'])
|
||||
|
||||
return new_share_network['id']
|
||||
|
||||
def _test_resize_post_migration(self, force_host_assisted, resize):
|
||||
self._check_migration_enabled(force_host_assisted)
|
||||
new_size = CONF.share.share_size + 1
|
||||
share = self.create_share(self.protocol, size=new_size)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
share, dest_pool = self._setup_migration(share)
|
||||
|
||||
task_state, new_share_network_id, new_share_type_id = (
|
||||
self._get_migration_data(share, force_host_assisted))
|
||||
|
||||
share = self.migrate_share(
|
||||
share['id'], dest_pool,
|
||||
force_host_assisted_migration=force_host_assisted,
|
||||
wait_for_status=task_state, new_share_type_id=new_share_type_id,
|
||||
new_share_network_id=new_share_network_id)
|
||||
|
||||
share = self.migration_complete(share['id'], dest_pool)
|
||||
if resize == 'extend':
|
||||
new_size = CONF.share.share_size + 2
|
||||
self.shares_v2_client.extend_share(share['id'], new_size)
|
||||
self.shares_v2_client.wait_for_share_status(
|
||||
share['id'], constants.STATUS_AVAILABLE)
|
||||
share = self.shares_v2_client.get_share(share["id"])
|
||||
self.assertEqual(new_size, int(share["size"]))
|
||||
else:
|
||||
new_size = CONF.share.share_size
|
||||
self.shares_v2_client.shrink_share(share['id'], new_size)
|
||||
self.shares_v2_client.wait_for_share_status(
|
||||
share['id'], constants.STATUS_AVAILABLE)
|
||||
share = self.shares_v2_client.get_share(share["id"])
|
||||
self.assertEqual(new_size, int(share["size"]))
|
||||
|
||||
self._cleanup_share(share)
|
||||
|
||||
def _get_migration_data(self, share, force_host_assisted=False):
|
||||
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
|
||||
if force_host_assisted
|
||||
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
|
||||
|
||||
old_share_network_id = share['share_network_id']
|
||||
|
||||
if CONF.share.multitenancy_enabled:
|
||||
new_share_network_id = self._create_secondary_share_network(
|
||||
old_share_network_id)
|
||||
|
||||
else:
|
||||
new_share_network_id = None
|
||||
|
||||
new_share_type_id = self.new_type['share_type']['id']
|
||||
return task_state, new_share_network_id, new_share_type_id
|
||||
|
||||
def _validate_snapshot(self, share, snapshot1, snapshot2):
|
||||
snapshot_list = self.shares_v2_client.list_snapshots_for_share(
|
||||
share['id'])
|
||||
msg = "Share %s has no snapshot." % share['id']
|
||||
# Verify that snapshot list is not empty
|
||||
self.assertNotEmpty(snapshot_list, msg)
|
||||
snapshot_id_list = [snap['id'] for snap in snapshot_list]
|
||||
|
||||
# verify that after migration original snapshots are retained
|
||||
self.assertIn(snapshot1['id'], snapshot_id_list)
|
||||
self.assertIn(snapshot2['id'], snapshot_id_list)
|
||||
# Verify that a share can be created from a snapshot after migration
|
||||
snapshot1_share = self.create_share(
|
||||
self.protocol, size=share['size'], snapshot_id=snapshot1['id'],
|
||||
share_network_id=share['share_network_id'])
|
||||
self.assertEqual(snapshot1['id'], snapshot1_share['snapshot_id'])
|
||||
self._cleanup_share(share)
|
||||
|
||||
def _validate_share_migration_with_different_snapshot_capability_type(
|
||||
self, force_host_assisted, snapshot_capable):
|
||||
|
||||
self._check_migration_enabled(force_host_assisted)
|
||||
ss_type, no_ss_type = self._create_share_type_for_snapshot_capability()
|
||||
|
||||
if snapshot_capable:
|
||||
share_type = ss_type['share_type']
|
||||
share_type_id = no_ss_type['share_type']['id']
|
||||
new_share_type_id = ss_type['share_type']['id']
|
||||
else:
|
||||
share_type = no_ss_type['share_type']
|
||||
share_type_id = ss_type['share_type']['id']
|
||||
new_share_type_id = no_ss_type['share_type']['id']
|
||||
|
||||
share = self.create_share(
|
||||
self.protocol, share_type_id=share_type_id)
|
||||
share = self.shares_v2_client.get_share(share['id'])
|
||||
|
||||
if snapshot_capable:
|
||||
self.assertEqual(False, share['snapshot_support'])
|
||||
else:
|
||||
# Verify that share has snapshot support capability
|
||||
self.assertTrue(share['snapshot_support'])
|
||||
|
||||
dest_pool = utils.choose_matching_backend(share, self.pools,
|
||||
share_type)
|
||||
task_state, new_share_network_id, __ = (
|
||||
self._get_migration_data(share, force_host_assisted))
|
||||
share = self.migrate_share(
|
||||
share['id'], dest_pool['name'],
|
||||
force_host_assisted_migration=force_host_assisted,
|
||||
wait_for_status=task_state,
|
||||
new_share_type_id=new_share_type_id,
|
||||
new_share_network_id=new_share_network_id)
|
||||
share = self.migration_complete(share['id'], dest_pool)
|
||||
|
||||
if snapshot_capable:
|
||||
# Verify that migrated share does have snapshot support capability
|
||||
self.assertTrue(share['snapshot_support'])
|
||||
else:
|
||||
# Verify that migrated share don't have snapshot support capability
|
||||
self.assertEqual(False, share['snapshot_support'])
|
||||
|
||||
self._cleanup_share(share)
|
||||
|
||||
def _create_share_type_for_snapshot_capability(self):
|
||||
# Share type with snapshot support
|
||||
st_name = data_utils.rand_name(
|
||||
'snapshot_capable_share_type_for_migration')
|
||||
extra_specs = self.add_extra_specs_to_dict({"snapshot_support": True})
|
||||
ss_type = self.create_share_type(st_name, extra_specs=extra_specs)
|
||||
|
||||
# New share type with no snapshot support capability
|
||||
# to which a share will be migrated
|
||||
new_st_name = data_utils.rand_name(
|
||||
'snapshot_noncapable_share_type_for_migration')
|
||||
extra_specs = {
|
||||
"driver_handles_share_servers": CONF.share.multitenancy_enabled
|
||||
}
|
||||
no_ss_type = self.create_share_type(new_st_name,
|
||||
extra_specs=extra_specs)
|
||||
return ss_type, no_ss_type
|
||||
|
||||
def _cleanup_share(self, share):
|
||||
resource = {"type": "share", "id": share["id"],
|
||||
"client": self.shares_v2_client}
|
||||
# NOTE(Yogi1): Share needs to be cleaned up explicitly at the end of
|
||||
# test otherwise, newly created share_network will not get cleaned up.
|
||||
self.method_resources.insert(0, resource)
|
||||
|
@ -54,7 +54,8 @@ class MigrationNegativeTest(base.BaseSharesAdminTest):
|
||||
raise cls.skipException("At least two different pool entries "
|
||||
"are needed to run share migration tests.")
|
||||
|
||||
cls.share = cls.create_share(cls.protocol)
|
||||
cls.share = cls.create_share(cls.protocol,
|
||||
size=CONF.share.share_size+1)
|
||||
cls.share = cls.shares_client.get_share(cls.share['id'])
|
||||
|
||||
cls.default_type = cls.shares_v2_client.list_share_types(
|
||||
@ -130,8 +131,9 @@ class MigrationNegativeTest(base.BaseSharesAdminTest):
|
||||
lib_exc.Conflict, self.shares_v2_client.migrate_share,
|
||||
self.share['id'], self.dest_pool,
|
||||
force_host_assisted_migration=True)
|
||||
self.shares_client.delete_snapshot(snap['id'])
|
||||
self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"])
|
||||
self.shares_v2_client.delete_snapshot(snap['id'])
|
||||
self.shares_v2_client.wait_for_resource_deletion(snapshot_id=snap[
|
||||
"id"])
|
||||
|
||||
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@ -258,3 +260,67 @@ class MigrationNegativeTest(base.BaseSharesAdminTest):
|
||||
self.share['id'], self.dest_pool,
|
||||
new_share_type_id=new_type_opposite['share_type']['id'],
|
||||
new_share_network_id=new_share_network_id)
|
||||
|
||||
@testtools.skipUnless(CONF.share.run_driver_assisted_migration_tests,
|
||||
"Driver-assisted migration tests are disabled.")
|
||||
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
def test_create_snapshot_during_share_migration(self):
|
||||
self._test_share_actions_during_share_migration('create_snapshot', [])
|
||||
|
||||
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
@ddt.data(('extend_share', [CONF.share.share_size + 2]),
|
||||
('shrink_share', [CONF.share.share_size]))
|
||||
@ddt.unpack
|
||||
def test_share_resize_during_share_migration(self, method_name, *args):
|
||||
self._test_share_actions_during_share_migration(method_name, *args)
|
||||
|
||||
def skip_if_tests_are_disabled(self, method_name):
|
||||
property_to_evaluate = {
|
||||
'extend_share': CONF.share.run_extend_tests,
|
||||
'shrink_share': CONF.share.run_shrink_tests,
|
||||
'create_snapshot': CONF.share.run_snapshot_tests,
|
||||
}
|
||||
if not property_to_evaluate[method_name]:
|
||||
raise self.skipException(method_name + 'tests are disabled.')
|
||||
|
||||
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
def test_add_access_rule_during_migration(self):
|
||||
access_type = "ip"
|
||||
access_to = "50.50.50.50"
|
||||
self.shares_v2_client.reset_state(self.share['id'],
|
||||
constants.STATUS_MIGRATING)
|
||||
self.shares_v2_client.reset_task_state(
|
||||
self.share['id'],
|
||||
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest,
|
||||
self.shares_v2_client.create_access_rule,
|
||||
self.share['id'], access_type, access_to)
|
||||
# Revert the migration state by cancelling the migration
|
||||
self.shares_v2_client.reset_state(self.share['id'],
|
||||
constants.STATUS_AVAILABLE)
|
||||
self.shares_v2_client.reset_task_state(
|
||||
self.share['id'],
|
||||
constants.TASK_STATE_MIGRATION_CANCELLED)
|
||||
|
||||
def _test_share_actions_during_share_migration(self, method_name, *args):
|
||||
self.skip_if_tests_are_disabled(method_name)
|
||||
# Verify various share operations during share migration
|
||||
self.shares_v2_client.reset_state(self.share['id'],
|
||||
constants.STATUS_MIGRATING)
|
||||
self.shares_v2_client.reset_task_state(
|
||||
self.share['id'],
|
||||
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
|
||||
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest, getattr(self.shares_v2_client, method_name),
|
||||
self.share['id'], *args)
|
||||
# Revert the migration state by cancelling the migration
|
||||
self.shares_v2_client.reset_state(self.share['id'],
|
||||
constants.STATUS_AVAILABLE)
|
||||
self.shares_v2_client.reset_task_state(
|
||||
self.share['id'],
|
||||
constants.TASK_STATE_MIGRATION_CANCELLED)
|
||||
|
@ -175,6 +175,27 @@ class ReplicationNegativeTest(base.BaseSharesMixedTest):
|
||||
self.admin_client.create_access_rule,
|
||||
self.share1["id"], access_type, access_to, 'ro')
|
||||
|
||||
@testtools.skipUnless(CONF.share.run_host_assisted_migration_tests or
|
||||
CONF.share.run_driver_assisted_migration_tests,
|
||||
"Share migration tests are disabled.")
|
||||
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
|
||||
@base.skip_if_microversion_lt("2.29")
|
||||
def test_migration_of_replicated_share(self):
|
||||
pools = self.admin_client.list_pools(detail=True)['pools']
|
||||
hosts = [p['name'] for p in pools]
|
||||
self.create_share_replica(self.share1["id"], self.replica_zone,
|
||||
cleanup_in_class=False)
|
||||
share_host = self.share1['host']
|
||||
|
||||
for host in hosts:
|
||||
if host != share_host:
|
||||
dest_host = host
|
||||
break
|
||||
|
||||
self.assertRaises(
|
||||
lib_exc.Conflict, self.admin_client.migrate_share,
|
||||
self.share1['id'], dest_host)
|
||||
|
||||
|
||||
@testtools.skipUnless(CONF.share.run_replication_tests,
|
||||
'Replication tests are disabled.')
|
||||
|
Loading…
Reference in New Issue
Block a user