Add tests to ensure snapshots across replicas
Related-Bug: #1546303 Depends-On: Ia4cd2a36e31418e7a3d1c218080caa632755fe16 Depends-On: Id318a4adc0faf64a4bef57252aa2f0d9083b82b1 Change-Id: I269225b976efe13b2cbb9e0648d541a063df70e5
This commit is contained in:
parent
547807b464
commit
7bf4f0262e
@ -77,6 +77,10 @@ elif [[ "$DRIVER" == "lvm" ]]; then
|
||||
elif [[ "$DRIVER" == "zfsonlinux" ]]; then
|
||||
echo "SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" >> $localrc_path
|
||||
echo "RUN_MANILA_REPLICATION_TESTS=True" >> $localrc_path
|
||||
# Set the replica_state_update_interval to 60 seconds to make
|
||||
# replication tests run faster. The default is 300, which is greater than
|
||||
# the build timeout for ZFS on the gate.
|
||||
echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path
|
||||
echo "MANILA_ZFSONLINUX_USE_SSH=True" >> $localrc_path
|
||||
fi
|
||||
|
||||
|
@ -187,6 +187,9 @@ function configure_manila {
|
||||
|
||||
iniset $MANILA_CONF DEFAULT lvm_share_volume_group $SHARE_GROUP
|
||||
|
||||
# Set the replica_state_update_interval
|
||||
iniset $MANILA_CONF DEFAULT replica_state_update_interval $MANILA_REPLICA_STATE_UPDATE_INTERVAL
|
||||
|
||||
if is_service_enabled neutron; then
|
||||
configure_auth_token_middleware $MANILA_CONF neutron $MANILA_AUTH_CACHE_DIR neutron
|
||||
fi
|
||||
|
@ -141,6 +141,9 @@ SMB_CONF=${SMB_CONF:-/etc/samba/smb.conf}
|
||||
SMB_PRIVATE_DIR=${SMB_PRIVATE_DIR:-/var/lib/samba/private}
|
||||
CONFIGURE_BACKING_FILE=${CONFIGURE_BACKING_FILE:-"True"}
|
||||
|
||||
# Options for replication
|
||||
MANILA_REPLICA_STATE_UPDATE_INTERVAL=${MANILA_REPLICA_STATE_UPDATE_INTERVAL:-300}
|
||||
|
||||
# Options for configuration of ZFSonLinux driver
|
||||
# 'MANILA_ZFSONLINUX_ZPOOL_SIZE' defines size of each zpool. That value
|
||||
# will be used for creation of sparse files.
|
||||
|
@ -468,7 +468,7 @@ class BaseSharesTest(test.BaseTestCase):
|
||||
description=None, force=False,
|
||||
client=None, cleanup_in_class=True):
|
||||
if client is None:
|
||||
client = cls.shares_client
|
||||
client = cls.shares_v2_client
|
||||
if description is None:
|
||||
description = "Tempest's snapshot"
|
||||
snapshot = client.create_snapshot(share_id, name, description, force)
|
||||
@ -521,6 +521,16 @@ class BaseSharesTest(test.BaseTestCase):
|
||||
service['state'] == 'up']
|
||||
return zones
|
||||
|
||||
def get_pools_for_replication_domain(self):
|
||||
# Get the list of pools for the replication domain
|
||||
pools = self.admin_client.list_pools(detail=True)['pools']
|
||||
instance_host = self.shares[0]['host']
|
||||
host_pool = [p for p in pools if p['name'] == instance_host][0]
|
||||
rep_domain = host_pool['capabilities']['replication_domain']
|
||||
pools_in_rep_domain = [p for p in pools if p['capabilities'][
|
||||
'replication_domain'] == rep_domain]
|
||||
return rep_domain, pools_in_rep_domain
|
||||
|
||||
@classmethod
|
||||
def create_share_replica(cls, share_id, availability_zone, client=None,
|
||||
cleanup_in_class=False, cleanup=True):
|
||||
@ -545,8 +555,11 @@ class BaseSharesTest(test.BaseTestCase):
|
||||
@classmethod
|
||||
def delete_share_replica(cls, replica_id, client=None):
|
||||
client = client or cls.shares_v2_client
|
||||
client.delete_share_replica(replica_id)
|
||||
client.wait_for_resource_deletion(replica_id=replica_id)
|
||||
try:
|
||||
client.delete_share_replica(replica_id)
|
||||
client.wait_for_resource_deletion(replica_id=replica_id)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def promote_share_replica(cls, replica_id, client=None):
|
||||
@ -634,6 +647,19 @@ class BaseSharesTest(test.BaseTestCase):
|
||||
ic["method"]()
|
||||
ic["deleted"] = True
|
||||
|
||||
@classmethod
|
||||
def clear_share_replicas(cls, share_id, client=None):
|
||||
client = client or cls.shares_v2_client
|
||||
share_replicas = client.list_share_replicas(
|
||||
share_id=share_id)
|
||||
|
||||
for replica in share_replicas:
|
||||
try:
|
||||
cls.delete_share_replica(replica['id'])
|
||||
except exceptions.BadRequest:
|
||||
# Ignore the exception due to deletion of last active replica
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def clear_resources(cls, resources=None):
|
||||
"""Deletes resources, that were created in test suites.
|
||||
@ -658,6 +684,7 @@ class BaseSharesTest(test.BaseTestCase):
|
||||
client = res["client"]
|
||||
with handle_cleanup_exceptions():
|
||||
if res["type"] is "share":
|
||||
cls.clear_share_replicas(res_id)
|
||||
cg_id = res.get('consistency_group_id')
|
||||
if cg_id:
|
||||
params = {'consistency_group_id': cg_id}
|
||||
|
@ -108,16 +108,6 @@ class ReplicationTest(base.BaseSharesTest):
|
||||
return [replica for replica in replica_list
|
||||
if replica['replica_state'] == r_state]
|
||||
|
||||
def _get_pools_for_replication_domain(self):
|
||||
# Get the list of pools for the replication domain
|
||||
pools = self.admin_client.list_pools(detail=True)['pools']
|
||||
instance_host = self.shares[0]['host']
|
||||
host_pool = [p for p in pools if p['name'] == instance_host][0]
|
||||
rep_domain = host_pool['capabilities']['replication_domain']
|
||||
pools_in_rep_domain = [p for p in pools if p['capabilities'][
|
||||
'replication_domain'] == rep_domain]
|
||||
return rep_domain, pools_in_rep_domain
|
||||
|
||||
def _verify_config_and_set_access_rule_data(self):
|
||||
"""Verify the access rule configuration is enabled for NFS.
|
||||
|
||||
@ -157,9 +147,10 @@ class ReplicationTest(base.BaseSharesTest):
|
||||
# Create the replica
|
||||
self._verify_create_replica()
|
||||
|
||||
# Verify access rule transitions to 'active' state.
|
||||
self.shares_v2_client.wait_for_access_rule_status(
|
||||
self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE)
|
||||
# Verify access_rules_status transitions to 'active' state.
|
||||
self.shares_v2_client.wait_for_share_status(
|
||||
self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
|
||||
status_attr='access_rules_status')
|
||||
|
||||
# Delete rule and wait for deletion
|
||||
self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
|
||||
@ -174,17 +165,19 @@ class ReplicationTest(base.BaseSharesTest):
|
||||
share_replica = self._verify_create_replica()
|
||||
|
||||
# Add access rule
|
||||
rule = self.shares_v2_client.create_access_rule(
|
||||
self.shares_v2_client.create_access_rule(
|
||||
self.shares[0]["id"], access_type, access_to, 'ro')
|
||||
self.shares_v2_client.wait_for_access_rule_status(
|
||||
self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE)
|
||||
|
||||
self.shares_v2_client.wait_for_share_status(
|
||||
self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
|
||||
status_attr='access_rules_status')
|
||||
|
||||
# Delete the replica
|
||||
self.delete_share_replica(share_replica["id"])
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_add_multiple_share_replicas(self):
|
||||
rep_domain, pools = self._get_pools_for_replication_domain()
|
||||
rep_domain, pools = self.get_pools_for_replication_domain()
|
||||
if len(pools) < 3:
|
||||
msg = ("Replication domain %(domain)s has only %(count)s pools. "
|
||||
"Need at least 3 pools to run this test." %
|
||||
|
199
manila_tempest_tests/tests/api/test_replication_snapshots.py
Normal file
199
manila_tempest_tests/tests/api/test_replication_snapshots.py
Normal file
@ -0,0 +1,199 @@
|
||||
# Copyright 2016 Yogesh Kshirsagar
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest import test
|
||||
import testtools
|
||||
|
||||
from manila_tempest_tests import clients_share as clients
|
||||
from manila_tempest_tests.common import constants
|
||||
from manila_tempest_tests import share_exceptions
|
||||
from manila_tempest_tests.tests.api import base
|
||||
|
||||
CONF = config.CONF
|
||||
_MIN_SUPPORTED_MICROVERSION = '2.11'
|
||||
|
||||
|
||||
@testtools.skipUnless(CONF.share.run_replication_tests,
|
||||
'Replication tests are disabled.')
|
||||
@testtools.skipUnless(CONF.share.run_snapshot_tests,
|
||||
'Snapshot tests disabled.')
|
||||
@base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION)
|
||||
class ReplicationSnapshotTest(base.BaseSharesTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ReplicationSnapshotTest, cls).resource_setup()
|
||||
# Create share_type
|
||||
name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX)
|
||||
cls.admin_client = clients.AdminManager().shares_v2_client
|
||||
cls.replication_type = CONF.share.backend_replication_type
|
||||
|
||||
if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES:
|
||||
raise share_exceptions.ShareReplicationTypeException(
|
||||
replication_type=cls.replication_type
|
||||
)
|
||||
cls.zones = cls.get_availability_zones(client=cls.admin_client)
|
||||
cls.share_zone = cls.zones[0]
|
||||
cls.replica_zone = cls.zones[-1]
|
||||
|
||||
cls.extra_specs = cls.add_required_extra_specs_to_dict(
|
||||
{"replication_type": cls.replication_type})
|
||||
share_type = cls.create_share_type(
|
||||
name,
|
||||
extra_specs=cls.extra_specs,
|
||||
client=cls.admin_client)
|
||||
cls.share_type = share_type["share_type"]
|
||||
# Create share with above share_type
|
||||
cls.creation_data = {'kwargs': {
|
||||
'share_type_id': cls.share_type['id'],
|
||||
'availability_zone': cls.share_zone,
|
||||
}}
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_snapshot_after_share_replica(self):
|
||||
"""Test the snapshot for replicated share.
|
||||
|
||||
Create replica first and then create a snapshot.
|
||||
Verify that the snapshot is properly created under replica by
|
||||
creating a share from that snapshot.
|
||||
"""
|
||||
share = self.create_share(share_type_id=self.share_type['id'],
|
||||
availability_zone=self.share_zone)
|
||||
original_replica = self.shares_v2_client.list_share_replicas(
|
||||
share["id"])[0]
|
||||
|
||||
share_replica = self.create_share_replica(share["id"],
|
||||
self.replica_zone,
|
||||
cleanup=False)
|
||||
self.addCleanup(self.delete_share_replica, original_replica['id'])
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
|
||||
snapshot = self.create_snapshot_wait_for_active(share["id"])
|
||||
self.promote_share_replica(share_replica['id'])
|
||||
self.delete_share_replica(original_replica['id'])
|
||||
self.create_share(snapshot_id=snapshot['id'])
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_snapshot_before_share_replica(self):
|
||||
"""Test the snapshot for replicated share.
|
||||
|
||||
Create snapshot before creating share replica for the same
|
||||
share.
|
||||
Verify snapshot by creating share from the snapshot.
|
||||
"""
|
||||
share = self.create_share(share_type_id=self.share_type['id'],
|
||||
availability_zone=self.share_zone)
|
||||
snapshot = self.create_snapshot_wait_for_active(share["id"])
|
||||
|
||||
original_replica = self.shares_v2_client.list_share_replicas(
|
||||
share["id"])[0]
|
||||
share_replica = self.create_share_replica(share["id"],
|
||||
self.replica_zone,
|
||||
cleanup=False)
|
||||
self.addCleanup(self.delete_share_replica, original_replica['id'])
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
|
||||
# Wait for snapshot1 to become available
|
||||
self.shares_v2_client.wait_for_snapshot_status(
|
||||
snapshot['id'], "available")
|
||||
|
||||
self.promote_share_replica(share_replica['id'])
|
||||
self.delete_share_replica(original_replica['id'])
|
||||
self.create_share(snapshot_id=snapshot['id'])
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_snapshot_before_and_after_share_replica(self):
|
||||
"""Test the snapshot for replicated share.
|
||||
|
||||
Verify that snapshot can be created before and after share replica
|
||||
being created.
|
||||
Verify snapshots by creating share from the snapshots.
|
||||
"""
|
||||
share = self.create_share(share_type_id=self.share_type['id'],
|
||||
availability_zone=self.share_zone)
|
||||
snapshot1 = self.create_snapshot_wait_for_active(share["id"])
|
||||
|
||||
original_replica = self.shares_v2_client.list_share_replicas(
|
||||
share["id"])[0]
|
||||
|
||||
share_replica = self.create_share_replica(share["id"],
|
||||
self.replica_zone,
|
||||
cleanup=False)
|
||||
self.addCleanup(self.delete_share_replica, original_replica['id'])
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
|
||||
snapshot2 = self.create_snapshot_wait_for_active(share["id"])
|
||||
|
||||
# Wait for snapshot1 to become available
|
||||
self.shares_v2_client.wait_for_snapshot_status(
|
||||
snapshot1['id'], "available")
|
||||
|
||||
self.promote_share_replica(share_replica['id'])
|
||||
# Remove the original active replica to ensure that snapshot is
|
||||
# still being created successfully.
|
||||
self.delete_share_replica(original_replica['id'])
|
||||
|
||||
self.create_share(snapshot_id=snapshot1['id'])
|
||||
self.create_share(snapshot_id=snapshot2['id'])
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_delete_snapshot_after_adding_replica(self):
|
||||
"""Verify the snapshot delete.
|
||||
|
||||
Ensure that deleting the original snapshot also deletes the
|
||||
snapshot from replica.
|
||||
"""
|
||||
|
||||
share = self.create_share(share_type_id=self.share_type['id'],
|
||||
availability_zone=self.share_zone)
|
||||
share_replica = self.create_share_replica(share["id"],
|
||||
self.replica_zone)
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
snapshot = self.create_snapshot_wait_for_active(share["id"])
|
||||
self.shares_v2_client.delete_snapshot(snapshot['id'])
|
||||
self.shares_v2_client.wait_for_resource_deletion(
|
||||
snapshot_id=snapshot["id"])
|
||||
|
||||
@test.attr(type=["gate", ])
|
||||
def test_create_replica_from_snapshot_share(self):
|
||||
"""Test replica for a share that was created from snapshot."""
|
||||
|
||||
share = self.create_share(share_type_id=self.share_type['id'],
|
||||
availability_zone=self.share_zone)
|
||||
orig_snapshot = self.create_snapshot_wait_for_active(share["id"])
|
||||
snap_share = self.create_share(snapshot_id=orig_snapshot['id'])
|
||||
original_replica = self.shares_v2_client.list_share_replicas(
|
||||
snap_share["id"])[0]
|
||||
share_replica = self.create_share_replica(snap_share["id"],
|
||||
self.replica_zone,
|
||||
cleanup=False)
|
||||
self.addCleanup(self.delete_share_replica, original_replica['id'])
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
self.promote_share_replica(share_replica['id'])
|
||||
# Delete the demoted replica so promoted replica can be cleaned
|
||||
# during the cleanup
|
||||
self.delete_share_replica(original_replica['id'])
|
Loading…
Reference in New Issue
Block a user