[NetApp] Add readable replication type support

Implement the `readable` replication type for the NetApp driver.
The driver will keep having support for the `dr` type as well, being
the driver replication type a list containing them.

The replicas for readable style are mounted, created the export and
applied the QoS. When promoting, the original active replica does
not need to be unmounted. The user just loses the write access.

The update access interface is now applying rules for non active
replicas that are readable.

Implements: bp netapp-readable-replica
Change-Id: Icc74eaecc75c3064715f91bebb994e93c0053663
Signed-off-by: Felipe Rodrigues <felipefuty01@gmail.com>
This commit is contained in:
Felipe Rodrigues 2021-07-30 02:58:00 -03:00 committed by Nahim Alves de Souza
parent d955928947
commit 556c361558
14 changed files with 779 additions and 202 deletions

View File

@ -3279,7 +3279,12 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
@na_utils.trace
def remove_cifs_share(self, share_name):
self.send_request('cifs-share-delete', {'share-name': share_name})
try:
self.send_request('cifs-share-delete', {'share-name': share_name})
except netapp_api.NaApiError as e:
if e.code == netapp_api.EOBJECTNOTFOUND:
return
raise
@na_utils.trace
def add_nfs_export_rule(self, policy_name, client_match, readonly,

View File

@ -160,11 +160,12 @@ class DataMotionSession(object):
'last-transfer-end-timestamp'])
return snapmirrors
def create_snapmirror(self, source_share_obj, dest_share_obj):
def create_snapmirror(self, source_share_obj, dest_share_obj, mount=False):
"""Sets up a SnapMirror relationship between two volumes.
1. Create SnapMirror relationship
2. Initialize data transfer asynchronously
1. Create SnapMirror relationship.
2. Initialize data transfer asynchronously.
3. Mount destination volume if requested.
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
@ -188,6 +189,13 @@ class DataMotionSession(object):
dest_vserver,
dest_volume_name)
# 3. Mount the destination volume and create a junction path
if mount:
replica_config = get_backend_configuration(dest_backend)
self.wait_for_mount_replica(
dest_client, dest_volume_name,
timeout=replica_config.netapp_mount_replica_timeout)
def delete_snapmirror(self, source_share_obj, dest_share_obj,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.
@ -733,3 +741,38 @@ class DataMotionSession(object):
msg = _("Unable to release the snapmirror from source vserver %s. "
"Retries exhausted. Aborting") % source_vserver
raise exception.NetAppException(message=msg)
def wait_for_mount_replica(self, vserver_client, share_name, timeout=300):
"""Mount a replica share that is waiting for snapmirror initialize."""
interval = 10
retries = (timeout // interval or 1)
@utils.retry(exception.ShareBusyException, interval=interval,
retries=retries, backoff_rate=1)
def try_mount_volume():
try:
vserver_client.mount_volume(share_name)
except netapp_api.NaApiError as e:
undergoing_snap_init = 'snapmirror initialize'
msg_args = {'name': share_name}
if (e.code == netapp_api.EAPIERROR and
undergoing_snap_init in e.message):
msg = _('The share %(name)s is undergoing a snapmirror '
'initialize. Will retry the operation.') % msg_args
LOG.warning(msg)
raise exception.ShareBusyException(reason=msg)
else:
msg = _("Unable to perform mount operation for the share "
"%(name)s. Caught an unexpected error. Not "
"retrying.") % msg_args
raise exception.NetAppException(message=msg)
try:
try_mount_volume()
except exception.ShareBusyException:
msg_args = {'name': share_name}
msg = _("Unable to perform mount operation for the share %(name)s "
"because a snapmirror initialize operation is still in "
"progress. Retries exhausted. Not retrying.") % msg_args
raise exception.NetAppException(message=msg)

View File

@ -379,7 +379,8 @@ class NetAppCmodeFileStorageLibrary(object):
}
if self.configuration.replication_domain:
data['replication_type'] = 'dr'
data['replication_type'] = [constants.REPLICATION_TYPE_DR,
constants.REPLICATION_TYPE_READABLE]
data['replication_domain'] = self.configuration.replication_domain
return data
@ -642,7 +643,7 @@ class NetAppCmodeFileStorageLibrary(object):
# 2. Create a replica in destination host
self._allocate_container(
dest_share, dest_vserver, dest_vserver_client,
replica=True)
replica=True, set_qos=False)
# 3. Initialize snapmirror relationship with cloned share.
src_share_instance['replica_state'] = (
constants.REPLICA_STATE_ACTIVE)
@ -792,7 +793,8 @@ class NetAppCmodeFileStorageLibrary(object):
share,
[], # access_rules
[], # snapshot list
share_server)
share_server,
replication=False)
if replica_state in [None, constants.STATUS_ERROR]:
msg = _("Destination share has failed on replicating data "
"from source share.")
@ -864,7 +866,8 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _allocate_container(self, share, vserver, vserver_client,
replica=False, create_fpolicy=True):
replica=False, create_fpolicy=True,
set_qos=True):
"""Create new share on aggregate."""
share_name = self._get_backend_share_name(share['id'])
@ -875,7 +878,7 @@ class NetAppCmodeFileStorageLibrary(object):
raise exception.InvalidHost(reason=msg)
provisioning_options = self._get_provisioning_options_for_share(
share, vserver, vserver_client=vserver_client, replica=replica)
share, vserver, vserver_client=vserver_client, set_qos=set_qos)
if replica:
# If this volume is intended to be a replication destination,
@ -1101,7 +1104,7 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _get_provisioning_options_for_share(
self, share, vserver, vserver_client=None, replica=False):
self, share, vserver, vserver_client=None, set_qos=True):
"""Return provisioning options from a share.
Starting with a share, this method gets the extra specs, rationalizes
@ -1117,7 +1120,7 @@ class NetAppCmodeFileStorageLibrary(object):
self.validate_provisioning_options_for_share(provisioning_options,
extra_specs=extra_specs,
qos_specs=qos_specs)
if qos_specs and not replica:
if qos_specs and set_qos:
qos_policy_group = self._create_qos_policy_group(
share, vserver, qos_specs, vserver_client)
provisioning_options['qos_policy_group'] = qos_policy_group
@ -1264,7 +1267,7 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _delete_share(self, share, vserver, vserver_client,
remove_export=True):
remove_export=True, remove_qos=True):
share_name = self._get_backend_share_name(share['id'])
# Share doesn't need to exist to be assigned to a fpolicy scope
self._delete_fpolicy_for_share(share, vserver, vserver_client)
@ -1273,10 +1276,11 @@ class NetAppCmodeFileStorageLibrary(object):
if remove_export:
self._remove_export(share, vserver_client)
self._deallocate_container(share_name, vserver_client)
qos_policy_for_share = self._get_backend_qos_policy_group_name(
share['id'])
vserver_client.mark_qos_policy_group_for_deletion(
qos_policy_for_share)
if remove_qos:
qos_policy_for_share = self._get_backend_qos_policy_group_name(
share['id'])
vserver_client.mark_qos_policy_group_for_deletion(
qos_policy_for_share)
else:
LOG.info("Share %s does not exist.", share['id'])
@ -1306,7 +1310,7 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _create_export(self, share, share_server, vserver, vserver_client,
clear_current_export_policy=True,
ensure_share_already_exists=False):
ensure_share_already_exists=False, replica=False):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
@ -1329,7 +1333,8 @@ class NetAppCmodeFileStorageLibrary(object):
callback = helper.create_share(
share, share_name,
clear_current_export_policy=clear_current_export_policy,
ensure_share_already_exists=ensure_share_already_exists)
ensure_share_already_exists=ensure_share_already_exists,
replica=replica)
# Generate export locations using addresses, metadata and callback
export_locations = [
@ -1907,11 +1912,13 @@ class NetAppCmodeFileStorageLibrary(object):
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Updates access rules for a share."""
# NOTE(ameade): We do not need to add export rules to a non-active
# replica as it will fail.
# NOTE(felipe_rodrigues): do not add export rules to a non-active
# replica that is DR type, it might not have its policy yet.
replica_state = share.get('replica_state')
if (replica_state is not None and
replica_state != constants.REPLICA_STATE_ACTIVE):
replica_state != constants.REPLICA_STATE_ACTIVE and
not self._is_readable_replica(share)):
return
try:
vserver, vserver_client = self._get_vserver(
@ -2022,17 +2029,35 @@ class NetAppCmodeFileStorageLibrary(object):
vserver_client = data_motion.get_client_for_backend(
dest_backend, vserver_name=vserver)
is_readable = self._is_readable_replica(new_replica)
self._allocate_container(new_replica, vserver, vserver_client,
replica=True, create_fpolicy=False)
replica=True, create_fpolicy=False,
set_qos=is_readable)
# 2. Setup SnapMirror
dm_session.create_snapmirror(active_replica, new_replica)
# 2. Setup SnapMirror with mounting replica whether 'readable' type
dm_session.create_snapmirror(active_replica, new_replica,
mount=is_readable)
# 3. Create export location
model_update = {
'export_locations': [],
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
'access_rules_status': constants.STATUS_ACTIVE,
}
if is_readable:
model_update['export_locations'] = self._create_export(
new_replica, share_server, vserver, vserver_client,
replica=True)
if access_rules:
helper = self._get_helper(new_replica)
helper.set_client(vserver_client)
share_name = self._get_backend_share_name(new_replica['id'])
try:
helper.update_access(new_replica, share_name, access_rules)
except Exception:
model_update['access_rules_status'] = (
constants.SHARE_INSTANCE_RULES_ERROR)
return model_update
@ -2054,14 +2079,15 @@ class NetAppCmodeFileStorageLibrary(object):
dm_session.delete_snapmirror(replica, other_replica)
# 2. Delete share
is_readable = self._is_readable_replica(replica)
vserver_client = data_motion.get_client_for_backend(
dest_backend, vserver_name=vserver)
share_name = self._get_backend_share_name(replica['id'])
if self._share_exists(share_name, vserver_client):
self._deallocate_container(share_name, vserver_client)
self._delete_share(replica, vserver, vserver_client,
remove_export=is_readable, remove_qos=is_readable)
def update_replica_state(self, context, replica_list, replica,
access_rules, share_snapshots, share_server=None):
access_rules, share_snapshots, share_server=None,
replication=True):
"""Returns the status of the given replica on this backend."""
active_replica = self.find_active_replica(replica_list)
@ -2088,10 +2114,12 @@ class NetAppCmodeFileStorageLibrary(object):
replica['id'])
return constants.STATUS_ERROR
is_readable = replication and self._is_readable_replica(replica)
if not snapmirrors:
if replica['status'] != constants.STATUS_CREATING:
try:
dm_session.create_snapmirror(active_replica, replica)
dm_session.create_snapmirror(active_replica, replica,
mount=is_readable)
except netapp_api.NaApiError:
LOG.exception("Could not create snapmirror for "
"replica %s.", replica['id'])
@ -2153,6 +2181,11 @@ class NetAppCmodeFileStorageLibrary(object):
SnapMirror source volume) and the replica. Also attempts setting up
SnapMirror relationships between the other replicas and the new
SnapMirror source volume ('active' instance).
For DR style, the promotion creates the QoS policy and export policy
for the new active replica. While for 'readable', those specs are only
updated without unmounting.
:param context: Request Context
:param replica_list: List of replicas, including the 'active' instance
:param replica: Replica to promote to SnapMirror source
@ -2186,43 +2219,46 @@ class NetAppCmodeFileStorageLibrary(object):
# Change the source replica for all destinations to the new
# active replica.
is_dr = not self._is_readable_replica(replica)
for r in replica_list:
if r['id'] != replica['id']:
r = self._safe_change_replica_source(dm_session, r,
orig_active_replica,
replica,
replica_list)
replica, replica_list,
is_dr, access_rules,
share_server=share_server)
new_replica_list.append(r)
orig_active_vserver = dm_session.get_vserver_from_share(
orig_active_replica)
if is_dr:
# NOTE(felipe_rodrigues): non active DR replica does not have the
# export location set, so during replica deletion the driver cannot
# delete the ONTAP export. Clean up it when becoming non active.
orig_active_vserver = dm_session.get_vserver_from_share(
orig_active_replica)
orig_active_replica_backend = (
share_utils.extract_host(orig_active_replica['host'],
level='backend_name'))
orig_active_replica_name = self._get_backend_share_name(
orig_active_replica['id'])
orig_active_vserver_client = data_motion.get_client_for_backend(
orig_active_replica_backend, vserver_name=orig_active_vserver)
orig_active_replica_helper = self._get_helper(orig_active_replica)
orig_active_replica_helper.set_client(orig_active_vserver_client)
try:
orig_active_replica_helper.cleanup_demoted_replica(
orig_active_replica, orig_active_replica_name)
except exception.StorageCommunicationException:
LOG.exception(
"Could not cleanup the original active replica export %s.",
orig_active_replica['id'])
# Cleanup the original active share if necessary
orig_active_replica_backend = (
share_utils.extract_host(orig_active_replica['host'],
level='backend_name'))
orig_active_replica_name = self._get_backend_share_name(
orig_active_replica['id'])
orig_active_vserver_client = data_motion.get_client_for_backend(
orig_active_replica_backend, vserver_name=orig_active_vserver)
orig_active_replica_helper = self._get_helper(orig_active_replica)
orig_active_replica_helper.set_client(orig_active_vserver_client)
try:
orig_active_replica_helper.cleanup_demoted_replica(
orig_active_replica, orig_active_replica_name)
except exception.StorageCommunicationException:
LOG.exception("Could not cleanup the original active replica %s.",
orig_active_replica['id'])
# Unmount the original active replica.
self._unmount_orig_active_replica(orig_active_replica,
orig_active_vserver)
self._unmount_orig_active_replica(orig_active_replica,
orig_active_vserver)
self._handle_qos_on_replication_change(dm_session,
new_active_replica,
orig_active_replica,
is_dr,
share_server=share_server)
return new_replica_list
@ -2247,16 +2283,22 @@ class NetAppCmodeFileStorageLibrary(object):
orig_active_replica['id'])
def _handle_qos_on_replication_change(self, dm_session, new_active_replica,
orig_active_replica,
orig_active_replica, is_dr,
share_server=None):
# QoS operations: Remove and purge QoS policy on old active replica
# if any and create a new policy on the destination if necessary.
"""Handle QoS change while promoting a replica."""
# QoS is only available for cluster credentials.
if not self._have_cluster_creds:
return
extra_specs = share_types.get_extra_specs_from_share(
orig_active_replica)
qos_specs = self._get_normalized_qos_specs(extra_specs)
if qos_specs and self._have_cluster_creds:
if is_dr and qos_specs:
dm_session.remove_qos_on_old_active_replica(orig_active_replica)
if qos_specs:
# Check if a QoS policy already exists for the promoted replica,
# if it does, modify it as necessary, else create it:
try:
@ -2293,6 +2335,7 @@ class NetAppCmodeFileStorageLibrary(object):
For promotion, the existing SnapMirror relationship must be broken
and access rules have to be granted to the broken off replica to
use it as an independent share.
:param context: Request Context
:param dm_session: Data motion object for SnapMirror operations
:param orig_active_replica: Original SnapMirror source
@ -2340,15 +2383,21 @@ class NetAppCmodeFileStorageLibrary(object):
def _safe_change_replica_source(self, dm_session, replica,
orig_source_replica,
new_source_replica, replica_list):
new_source_replica, replica_list,
is_dr, access_rules,
share_server=None):
"""Attempts to change the SnapMirror source to new source.
If the attempt fails, 'replica_state' is set to 'error'.
:param dm_session: Data motion object for SnapMirror operations
:param replica: Replica that requires a change of source
:param orig_source_replica: Original SnapMirror source volume
:param new_source_replica: New SnapMirror source volume
:return: Updated replica
:param dm_session: Data motion object for SnapMirror operations.
:param replica: Replica that requires a change of source.
:param orig_source_replica: Original SnapMirror source volume.
:param new_source_replica: New SnapMirror source volume.
:param is_dr: the replication type is dr, otherwise it is readable.
:param access_rules: share access rules to be applied.
:param share_server: share server.
:return: Updated replica.
"""
try:
dm_session.change_snapmirror_source(replica,
@ -2358,22 +2407,70 @@ class NetAppCmodeFileStorageLibrary(object):
except exception.StorageCommunicationException:
replica['status'] = constants.STATUS_ERROR
replica['replica_state'] = constants.STATUS_ERROR
replica['export_locations'] = []
if is_dr:
replica['export_locations'] = []
msg = ("Failed to change replica (%s) to a SnapMirror "
"destination. Replica backend is unreachable.")
LOG.exception(msg, replica['id'])
return replica
except netapp_api.NaApiError:
replica['status'] = constants.STATUS_ERROR
replica['replica_state'] = constants.STATUS_ERROR
replica['export_locations'] = []
if is_dr:
replica['export_locations'] = []
msg = ("Failed to change replica (%s) to a SnapMirror "
"destination.")
LOG.exception(msg, replica['id'])
return replica
replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
replica['export_locations'] = []
replica['status'] = constants.STATUS_AVAILABLE
if is_dr:
replica['export_locations'] = []
return replica
# NOTE(felipe_rodrigues): readable replica might be in an error
# state without mounting and export. Retries to recover it.
replica_volume_name, replica_vserver, replica_backend = (
dm_session.get_backend_info_for_share(replica))
replica_client = data_motion.get_client_for_backend(
replica_backend, vserver_name=replica_vserver)
try:
replica_config = data_motion.get_backend_configuration(
replica_backend)
dm_session.wait_for_mount_replica(
replica_client, replica_volume_name,
timeout=replica_config.netapp_mount_replica_timeout)
except netapp_api.NaApiError:
replica['status'] = constants.STATUS_ERROR
replica['replica_state'] = constants.STATUS_ERROR
msg = "Failed to mount readable replica (%s)."
LOG.exception(msg, replica['id'])
return replica
try:
replica['export_locations'] = self._create_export(
replica, share_server, replica_vserver, replica_client,
replica=True)
except netapp_api.NaApiError:
replica['status'] = constants.STATUS_ERROR
replica['replica_state'] = constants.STATUS_ERROR
msg = "Failed to create export for readable replica (%s)."
LOG.exception(msg, replica['id'])
return replica
helper = self._get_helper(replica)
helper.set_client(replica_client)
try:
helper.update_access(
replica, replica_volume_name, access_rules)
except Exception:
replica['access_rules_status'] = (
constants.SHARE_INSTANCE_RULES_ERROR)
else:
replica['access_rules_status'] = constants.STATUS_ACTIVE
return replica
@ -2525,6 +2622,12 @@ class NetAppCmodeFileStorageLibrary(object):
if e.code != netapp_api.EOBJECTNOTFOUND:
raise
def _is_readable_replica(self, replica):
"""Check the replica type to find out if the replica is readable."""
extra_specs = share_types.get_extra_specs_from_share(replica)
return (extra_specs.get('replication_type') ==
constants.REPLICATION_TYPE_READABLE)
def _check_destination_vserver_for_vol_move(self, source_share,
source_vserver,
dest_share_server):

View File

@ -52,8 +52,26 @@ class NetAppBaseHelper(object):
"""Returns whether an access rule specifies read-only access."""
return access_level == constants.ACCESS_LEVEL_RO
@staticmethod
def _get_share_export_location(share):
"""Returns the export location of the share.
The share may contain only the list of export location, depending on
the entity provided by the manager.
"""
export_location = share.get('export_location')
if not export_location:
export_location_list = share.get('export_locations')
if (isinstance(export_location_list, list) and
len(export_location_list) > 0):
export_location = export_location_list[0]['path']
return export_location
@abc.abstractmethod
def create_share(self, share, share_name):
def create_share(self, share, share_name,
clear_current_export_policy=True,
ensure_share_already_exists=False, replica=False):
"""Creates NAS share."""
@abc.abstractmethod

View File

@ -30,20 +30,32 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def create_share(self, share, share_name,
clear_current_export_policy=True,
ensure_share_already_exists=False):
"""Creates CIFS share on Data ONTAP Vserver."""
if not ensure_share_already_exists:
self._client.create_cifs_share(share_name)
elif not self._client.cifs_share_exists(share_name):
ensure_share_already_exists=False, replica=False):
"""Creates CIFS share if does not exist on Data ONTAP Vserver.
The new CIFS share has Everyone access, so it removes all access after
creating.
:param share: share entity.
:param share_name: share name that must be the CIFS share name.
:param clear_current_export_policy: ignored, NFS only.
:param ensure_share_already_exists: ensures that CIFS share exists.
:param replica: it is a replica volume (DP type).
"""
cifs_exist = self._client.cifs_share_exists(share_name)
if ensure_share_already_exists and not cifs_exist:
msg = _("The expected CIFS share %(share_name)s was not found.")
msg_args = {'share_name': share_name}
raise exception.NetAppException(msg % msg_args)
if clear_current_export_policy:
elif not cifs_exist:
self._client.create_cifs_share(share_name)
self._client.remove_cifs_share_access(share_name, 'Everyone')
# Ensure 'ntfs' security style
self._client.set_volume_security_style(share_name,
security_style='ntfs')
# Ensure 'ntfs' security style for RW volume. DP volumes cannot set it.
if not replica:
self._client.set_volume_security_style(share_name,
security_style='ntfs')
# Return a callback that may be used for generating export paths
# for this share.
@ -159,10 +171,10 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
_, share_name = self._get_export_location(share)
return share_name
@staticmethod
def _get_export_location(share):
@na_utils.trace
def _get_export_location(self, share):
"""Returns host ip and share name for a given CIFS share."""
export_location = share['export_location'] or '\\\\\\'
export_location = self._get_share_export_location(share) or '\\\\\\'
regex = r'^(?:\\\\|//)(?P<host_ip>.*)(?:\\|/)(?P<share_name>.*)$'
match = re.match(regex, export_location)
if match:
@ -172,10 +184,5 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def cleanup_demoted_replica(self, share, share_name):
"""Cleans up some things regarding a demoted replica."""
# NOTE(carloss): This is necessary due to bug 1879368. If we do not
# remove this CIFS share, in case the demoted replica is promoted
# back, the promotion will fail due to a duplicated entry for the
# share, since a create share request is sent to the backend every
# time a promotion occurs.
"""Cleans up CIFS share for a demoted replica."""
self._client.remove_cifs_share(share_name)

View File

@ -42,11 +42,23 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def create_share(self, share, share_name,
clear_current_export_policy=True,
ensure_share_already_exists=False):
"""Creates NFS share."""
# TODO(dviroel): Ensure that nfs share already exists if
# ensure_share_already_exists is True. Although, no conflicts are
# expected here since there is no create share operation being made.
ensure_share_already_exists=False, replica=False):
"""Ensures the share export policy is set correctly.
The export policy must have the same name as the share. If it matches,
nothing is done. Otherwise, the possible scenarios:
1. policy as 'default': a new export policy is created.
2. policy as any name: renames the assigned policy to match the name.
:param share: share entity.
:param share_name: share name that must be the export policy name.
:param clear_current_export_policy: set the policy to 'default' before
the check.
:param ensure_share_already_exists: ignored, CIFS only.
:param replica: it is a replica volume (DP type).
"""
if clear_current_export_policy:
self._client.clear_nfs_export_policy_for_volume(share_name)
self._ensure_export_policy(share, share_name)
@ -139,7 +151,7 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def get_target(self, share):
"""Returns ID of target OnTap device based on export location."""
"""Returns ID of target ONTAP device based on export location."""
return self._get_export_location(share)[0]
@na_utils.trace
@ -149,10 +161,10 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
volume = self._client.get_volume_at_junction_path(volume_junction_path)
return volume.get('name') if volume else None
@staticmethod
def _get_export_location(share):
@na_utils.trace
def _get_export_location(self, share):
"""Returns IP address and export location of an NFS share."""
export_location = share['export_location'] or ':'
export_location = self._get_share_export_location(share) or ':'
result = export_location.rsplit(':', 1)
if len(result) != 2:
return ['', '']
@ -205,4 +217,6 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def cleanup_demoted_replica(self, share, share_name):
"""Cleans up export NFS policy for a demoted replica."""
self.delete_share(share, share_name)
return

View File

@ -210,6 +210,11 @@ netapp_data_motion_opts = [
help='The maximum time in seconds that a share server '
'migration waits for a vserver to change its internal '
'states.'),
cfg.IntOpt('netapp_mount_replica_timeout',
min=0,
default=3600, # One Hour
help='The maximum time in seconds to wait for mounting '
'a replica.'),
]
CONF = cfg.CONF

View File

@ -5015,6 +5015,18 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('cifs-share-delete', cifs_share_delete_args)])
def test_remove_cifs_share_not_found(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND))
self.client.remove_cifs_share(fake.SHARE_NAME)
cifs_share_args = {'share-name': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('cifs-share-delete', cifs_share_args)])
def test_add_nfs_export_rule(self):
mock_get_nfs_export_rule_indices = self.mock_object(

View File

@ -199,13 +199,19 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
fake.BACKEND_NAME, vserver_name=fake.VSERVER1
)
def test_create_snapmirror(self):
@ddt.data(True, False)
def test_create_snapmirror_mount(self, mount):
mock_dest_client = mock.Mock()
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(return_value=mock_dest_client))
self.mock_object(self.dm_session, 'wait_for_mount_replica')
mock_backend_config = na_fakes.create_configuration()
mock_backend_config.netapp_mount_replica_timeout = 30
self.mock_object(data_motion, 'get_backend_configuration',
mock.Mock(return_value=mock_backend_config))
self.dm_session.create_snapmirror(self.fake_src_share,
self.fake_dest_share)
self.fake_dest_share, mount=mount)
mock_dest_client.create_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
@ -215,6 +221,11 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
if mount:
self.dm_session.wait_for_mount_replica.assert_called_once_with(
mock_dest_client, self.fake_dest_vol_name, timeout=30)
else:
self.dm_session.wait_for_mount_replica.assert_not_called()
def test_create_snapmirror_svm(self):
mock_dest_client = mock.Mock()
@ -1051,3 +1062,48 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
src_mock_client.release_snapmirror_svm.assert_called_once_with(
fake.VSERVER1, fake.VSERVER2
)
def test_wait_for_mount_replica(self):
mock_client = mock.Mock()
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(data_motion.LOG, 'warning')
self.dm_session.wait_for_mount_replica(
mock_client, fake.SHARE_NAME)
mock_client.mount_volume.ssert_called_once_with(fake.SHARE_NAME)
self.assertEqual(0, mock_warning_log.call_count)
def test_wait_for_mount_replica_timeout(self):
mock_client = mock.Mock()
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(data_motion.LOG, 'warning')
undergoing_snapmirror = (
'The volume is undergoing a snapmirror initialize.')
na_api_error = netapp_api.NaApiError(code=netapp_api.EAPIERROR,
message=undergoing_snapmirror)
mock_client.mount_volume.side_effect = na_api_error
self.assertRaises(exception.NetAppException,
self.dm_session.wait_for_mount_replica,
mock_client, fake.SHARE_NAME, timeout=30)
self.assertEqual(3, mock_client.mount_volume.call_count)
self.assertEqual(3, mock_warning_log.call_count)
def test_wait_for_mount_replica_api_not_found(self):
mock_client = mock.Mock()
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(data_motion.LOG, 'warning')
na_api_error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
mock_client.mount_volume.side_effect = na_api_error
self.assertRaises(exception.NetAppException,
self.dm_session.wait_for_mount_replica,
mock_client, fake.SHARE_NAME, timeout=30)
mock_client.mount_volume.assert_called_once_with(fake.SHARE_NAME)
mock_warning_log.assert_not_called()

View File

@ -475,7 +475,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'driver_version': '1.0',
'netapp_storage_family': 'ontap_cluster',
'storage_protocol': 'NFS_CIFS',
'replication_type': 'dr',
'replication_type': ['dr', 'readable'],
'replication_domain': 'fake_domain',
'pools': fake.POOLS,
'share_group_stats': {'consistent_snapshot_support': 'host'},
@ -832,7 +832,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.src_vserver_client, split=False, create_fpolicy=False)
self.mock_allocate_container.assert_called_once_with(
self.fake_share, fake.VSERVER2,
self.dest_vserver_client, replica=True)
self.dest_vserver_client, replica=True, set_qos=False)
self.mock_dm_create_snapmirror.assert_called_once()
self.temp_src_share['replica_state'] = (
constants.REPLICA_STATE_ACTIVE)
@ -1214,7 +1214,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER)
self.mock_update_rep_state.assert_called_once_with(
None, [self.fake_src_share], fake.SHARE, [], [], fake.SHARE_SERVER)
None, [self.fake_src_share], fake.SHARE, [], [], fake.SHARE_SERVER,
replication=False
)
if replica_state == constants.REPLICA_STATE_IN_SYNC:
self.mock_update_snapmirror.assert_called_once_with(
self.fake_src_share, fake.SHARE)
@ -1291,7 +1293,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_provisioning_opts.assert_called_once_with(
fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=False)
set_qos=True)
vserver_client.create_volume.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'],
@ -1333,7 +1335,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_provisioning_opts.assert_called_once_with(
fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=True)
set_qos=True)
vserver_client.create_volume.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'],
@ -1423,12 +1425,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE_INSTANCE, fake.INVALID_EXTRA_SPEC_COMBO,
list(self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP))
@ddt.data({'extra_specs': fake.EXTRA_SPEC, 'is_replica': False},
{'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'is_replica': True},
{'extra_specs': fake.EXTRA_SPEC, 'is_replica': False},
{'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'is_replica': True})
@ddt.data({'extra_specs': fake.EXTRA_SPEC, 'set_qos': True},
{'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'set_qos': False},
{'extra_specs': fake.EXTRA_SPEC, 'set_qos': True},
{'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'set_qos': False})
@ddt.unpack
def test_get_provisioning_options_for_share(self, extra_specs, is_replica):
def test_get_provisioning_options_for_share(self, extra_specs, set_qos):
qos = True if fake.QOS_EXTRA_SPEC in extra_specs else False
vserver_client = mock.Mock()
@ -1453,9 +1455,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
result = self.library._get_provisioning_options_for_share(
fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=is_replica)
set_qos=set_qos)
if qos and is_replica:
if qos and not set_qos:
expected_provisioning_opts = fake.PROVISIONING_OPTIONS
self.assertFalse(mock_create_qos_policy_group.called)
else:
@ -1496,7 +1498,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._get_provisioning_options_for_share,
fake.SHARE_INSTANCE, fake.VSERVER1,
vserver_client=vserver_client,
replica=False)
set_qos=True)
mock_get_extra_specs_from_share.assert_called_once_with(
fake.SHARE_INSTANCE)
@ -1813,6 +1815,41 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
.assert_called_once_with(qos_policy_name))
self.assertEqual(0, lib_base.LOG.info.call_count)
def test__delete_share_no_remove_qos_and_export(self):
vserver_client = mock.Mock()
mock_share_exists = self.mock_object(self.library,
'_share_exists',
mock.Mock(return_value=True))
mock_remove_export = self.mock_object(self.library, '_remove_export')
mock_deallocate_container = self.mock_object(self.library,
'_deallocate_container')
mock_delete_policy = self.mock_object(self.library,
'_delete_fpolicy_for_share')
mock_get_backend_qos = self.mock_object(
self.library, '_get_backend_qos_policy_group_name')
mock_get_share_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.library._delete_share(fake.SHARE,
fake.VSERVER1,
vserver_client,
remove_export=False,
remove_qos=False)
mock_get_share_name.assert_called_once_with(fake.SHARE_ID)
mock_delete_policy.assert_called_once_with(fake.SHARE, fake.VSERVER1,
vserver_client)
mock_share_exists.assert_called_once_with(fake.SHARE_NAME,
vserver_client)
mock_deallocate_container.assert_called_once_with(fake.SHARE_NAME,
vserver_client)
mock_remove_export.assert_not_called()
mock_get_backend_qos.assert_not_called()
vserver_client.mark_qos_policy_group_for_deletion.assert_not_called()
@ddt.data(exception.InvalidInput(reason='fake_reason'),
exception.VserverNotSpecified(),
exception.VserverNotFound(vserver='fake_vserver'))
@ -1907,7 +1944,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
protocol_helper.create_share.assert_called_once_with(
fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True,
ensure_share_already_exists=False)
ensure_share_already_exists=False, replica=False)
def test_create_export_lifs_not_found(self):
@ -3019,6 +3056,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=True))
mock_share_exists = self.mock_object(self.library,
'_share_exists',
mock.Mock(return_value=True))
@ -3051,6 +3091,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=True))
mock_share_exists = self.mock_object(self.library, '_share_exists')
self.library.update_access(self.context,
@ -3077,6 +3120,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=True))
mock_share_exists = self.mock_object(self.library,
'_share_exists',
mock.Mock(return_value=False))
@ -3098,12 +3144,15 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertFalse(protocol_helper.update_access.called)
def test_update_access_to_active_replica(self):
fake_share = copy.deepcopy(fake.SHARE)
fake_share['replica_state'] = constants.REPLICA_STATE_ACTIVE
fake_share_copy = copy.deepcopy(fake.SHARE)
fake_share_copy['replica_state'] = constants.REPLICA_STATE_ACTIVE
vserver_client = mock.Mock()
mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=True))
protocol_helper = mock.Mock()
protocol_helper.update_access.return_value = None
self.mock_object(self.library,
@ -3114,7 +3163,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=True))
self.library.update_access(self.context,
fake_share,
fake_share_copy,
[fake.SHARE_ACCESS],
[],
[],
@ -3128,16 +3177,39 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
protocol_helper.update_access.assert_called_once_with(
fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS])
def test_update_access_to_in_sync_replica(self):
fake_share = copy.deepcopy(fake.SHARE)
fake_share['replica_state'] = constants.REPLICA_STATE_IN_SYNC
@ddt.data(True, False)
def test_update_access_to_in_sync_replica(self, is_readable):
fake_share_copy = copy.deepcopy(fake.SHARE)
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=is_readable))
fake_share_copy['replica_state'] = constants.REPLICA_STATE_IN_SYNC
vserver_client = mock.Mock()
mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
protocol_helper = mock.Mock()
protocol_helper.update_access.return_value = None
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library, '_share_exists',
mock.Mock(return_value=True))
self.library.update_access(self.context,
fake_share,
fake_share_copy,
[fake.SHARE_ACCESS],
[],
[],
share_server=fake.SHARE_SERVER)
if is_readable:
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)
else:
mock_get_vserver.assert_not_called()
def test_setup_server(self):
self.assertRaises(NotImplementedError,
self.library.setup_server,
@ -3257,30 +3329,67 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertDictEqual({}, ssc_stats)
self.assertFalse(self.library._client.get_aggregate_raid_types.called)
def test_create_replica(self):
@ddt.data(
{'is_readable': True, 'rules_status': constants.STATUS_ACTIVE},
{'is_readable': True, 'rules_status': (
constants.SHARE_INSTANCE_RULES_ERROR)},
{'is_readable': False, 'rules_status': constants.STATUS_ACTIVE})
@ddt.unpack
def test_create_replica(self, is_readable, rules_status):
vserver_client = mock.Mock()
self.mock_object(self.library,
'_allocate_container')
mock_dm_session = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=mock_dm_session))
self.mock_object(data_motion, 'get_client_for_backend')
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(return_value=vserver_client))
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_is_readable = self.mock_object(
self.library, '_is_readable_replica',
mock.Mock(return_value=is_readable))
mock_create_export = self.mock_object(
self.library, '_create_export', mock.Mock(return_value=[]))
protocol_helper = mock.Mock()
if rules_status == constants.STATUS_ACTIVE:
protocol_helper.update_access.return_value = None
else:
protocol_helper.update_access.side_effect = (
netapp_api.NaApiError(code=0))
mock_get_helper = self.mock_object(
self.library, '_get_helper',
mock.Mock(return_value=protocol_helper))
expected_model_update = {
'export_locations': [],
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
'access_rules_status': constants.STATUS_ACTIVE,
'access_rules_status': rules_status,
}
model_update = self.library.create_replica(
None, [fake.SHARE], fake.SHARE, [], [],
None, [fake.SHARE], fake.SHARE, [fake.SHARE_ACCESS], [],
share_server=None)
self.assertDictEqual(expected_model_update, model_update)
mock_dm_session.create_snapmirror.assert_called_once_with(
fake.SHARE, fake.SHARE)
fake.SHARE, fake.SHARE, mount=is_readable)
data_motion.get_client_for_backend.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
mock_is_readable.assert_called_once_with(fake.SHARE)
if is_readable:
mock_create_export.assert_called_once_with(
fake.SHARE, None, fake.VSERVER1, vserver_client, replica=True)
mock_get_helper.assert_called_once_with(fake.SHARE)
protocol_helper.update_access.assert_called_once_with(
fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS])
else:
mock_create_export.assert_not_called()
mock_get_helper.assert_not_called()
protocol_helper.update_access.assert_not_called()
def test_create_replica_with_share_server(self):
self.mock_object(self.library,
@ -3292,7 +3401,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(data_motion, 'get_client_for_backend')
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
expected_model_update = {
'export_locations': [],
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
@ -3305,7 +3416,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertDictEqual(expected_model_update, model_update)
mock_dm_session.create_snapmirror.assert_called_once_with(
fake.SHARE, fake.SHARE)
fake.SHARE, fake.SHARE, mount=False)
data_motion.get_client_for_backend.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
@ -3321,17 +3432,17 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
replica_list = [active_replica, replica_1, replica_2]
self.mock_object(self.library,
'_deallocate_container',
'_delete_share',
mock.Mock())
self.mock_object(self.library,
'_share_exists',
mock.Mock(return_value=False))
mock_dm_session = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=mock_dm_session))
self.mock_object(data_motion, 'get_client_for_backend')
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.delete_replica(None,
replica_list,
@ -3359,11 +3470,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
replica_list = [active_replica, replica]
self.mock_object(self.library,
'_deallocate_container',
mock.Mock())
self.mock_object(self.library,
'_share_exists',
'_is_readable_replica',
mock.Mock(return_value=False))
self.mock_object(self.library,
'_delete_share',
mock.Mock())
mock_dm_session = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=mock_dm_session))
@ -3384,44 +3495,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
data_motion.get_client_for_backend.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
def test_delete_replica_share_absent_on_backend(self):
active_replica = fake_replica(
replica_state=constants.REPLICA_STATE_ACTIVE)
replica = fake_replica(replica_state=constants.REPLICA_STATE_IN_SYNC,
host=fake.MANILA_HOST_NAME)
replica_list = [active_replica, replica]
self.mock_object(self.library,
'_deallocate_container',
mock.Mock())
self.mock_object(self.library,
'_share_exists',
mock.Mock(return_value=False))
mock_dm_session = mock.Mock()
self.mock_object(data_motion,
"DataMotionSession",
mock.Mock(return_value=mock_dm_session))
self.mock_object(data_motion, 'get_client_for_backend')
self.mock_object(mock_dm_session,
'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
result = self.library.delete_replica(None,
replica_list,
replica,
[],
share_server=None)
self.assertIsNone(result)
self.assertFalse(self.library._deallocate_container.called)
mock_dm_session.delete_snapmirror.assert_has_calls([
mock.call(active_replica, replica),
mock.call(replica, active_replica)],
any_order=True)
data_motion.get_client_for_backend.assert_called_with(
fake.BACKEND_NAME, vserver_name=mock.ANY)
self.assertEqual(1, data_motion.get_client_for_backend.call_count)
def test_update_replica_state_no_snapmirror_share_creating(self):
vserver_client = mock.Mock()
self.mock_object(vserver_client, 'volume_exists',
@ -3431,7 +3504,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
replica = copy.deepcopy(fake.SHARE)
replica['status'] = constants.STATUS_CREATING
@ -3470,6 +3545,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
self.mock_dm_session.create_snapmirror.side_effect = (
netapp_api.NaApiError(code=0))
@ -3492,7 +3570,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
replica = copy.deepcopy(fake.SHARE)
replica['status'] = status
@ -3520,6 +3600,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, [],
@ -3548,6 +3631,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, [],
@ -3589,6 +3675,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
vserver_client.resync_snapmirror_vol.side_effect = (
netapp_api.NaApiError)
@ -3617,6 +3706,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, [],
@ -3639,6 +3731,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, [],
@ -3678,6 +3773,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, snapshots,
@ -3703,6 +3801,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, snapshots,
@ -3710,7 +3811,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result)
def test_promote_replica(self):
@ddt.data(True, False)
def test_promote_replica(self, is_readable):
self.mock_object(self.library,
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
@ -3729,7 +3831,20 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=mock_dm_session))
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(fake.SHARE_NAME,
fake.VSERVER1,
fake.BACKEND_NAME)))
mock_client = mock.Mock()
self.mock_object(data_motion, "get_client_for_backend",
mock.Mock(return_value=mock_client))
mock_backend_config = fake.get_config_cmode()
self.mock_object(data_motion, 'get_backend_configuration',
mock.Mock(return_value=mock_backend_config))
self.mock_object(self.client, 'cleanup_demoted_replica')
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=is_readable))
replicas = self.library.promote_replica(
None, [self.fake_replica, self.fake_replica_2],
@ -3752,11 +3867,18 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
actual_replica_2['export_locations'])
self.assertEqual(constants.STATUS_ACTIVE,
actual_replica_2['access_rules_status'])
self.library._unmount_orig_active_replica.assert_called_once_with(
self.fake_replica, fake.VSERVER1)
if is_readable:
self.library._unmount_orig_active_replica.assert_not_called()
protocol_helper.cleanup_demoted_replica.assert_not_called()
self.assertEqual('fake_export_location',
actual_replica_1['export_locations'])
else:
self.library._unmount_orig_active_replica.assert_called_once_with(
self.fake_replica, fake.VSERVER1)
protocol_helper.cleanup_demoted_replica.assert_called_once_with(
self.fake_replica, fake.SHARE['name'])
self.assertEqual([], actual_replica_1['export_locations'])
self.library._handle_qos_on_replication_change.assert_called_once()
protocol_helper.cleanup_demoted_replica.assert_called_once_with(
self.fake_replica, fake.SHARE['name'])
def test_promote_replica_cleanup_demoted_storage_error(self):
self.mock_object(self.library,
@ -3767,6 +3889,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
self.mock_object(self.library, '_create_export',
mock.Mock(return_value='fake_export_location'))
self.mock_object(self.library, '_unmount_orig_active_replica')
@ -3847,6 +3972,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=mock_dm_session))
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
replicas = self.library.promote_replica(
None, [self.fake_replica, self.fake_replica_2, fake_replica_3],
@ -3897,6 +4025,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=mock_dm_session))
self.mock_object(mock_dm_session, 'get_vserver_from_share',
mock.Mock(return_value=fake.VSERVER1))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
replicas = self.library.promote_replica(
None, [self.fake_replica, self.fake_replica_2],
@ -3928,6 +4059,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
@ddt.data({'extra_specs': {'netapp:snapshot_policy': 'none'},
'have_cluster_creds': True},
{'extra_specs': {'netapp:snapshot_policy': 'none'},
'have_cluster_creds': True},
# Test Case 2 isn't possible input
{'extra_specs': {'qos': True, 'netapp:maxiops': '3000'},
'have_cluster_creds': False})
@ -3943,11 +4076,16 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
retval = self.library._handle_qos_on_replication_change(
self.mock_dm_session, self.fake_replica_2, self.fake_replica,
share_server=fake.SHARE_SERVER)
True, share_server=fake.SHARE_SERVER)
self.assertIsNone(retval)
lib_base.LOG.exception.assert_not_called()
lib_base.LOG.info.assert_not_called()
if have_cluster_creds:
share_types.get_extra_specs_from_share.assert_called_once_with(
self.fake_replica)
else:
share_types.get_extra_specs_from_share.assert_not_called()
def test_handle_qos_on_replication_change_exception(self):
self.library._have_cluster_creds = True
@ -3965,7 +4103,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(side_effect=netapp_api.NaApiError))
retval = self.library._handle_qos_on_replication_change(
self.mock_dm_session, self.fake_replica_2, self.fake_replica,
self.mock_dm_session, self.fake_replica_2, self.fake_replica, True,
share_server=fake.SHARE_SERVER)
self.assertIsNone(retval)
@ -3975,7 +4113,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
lib_base.LOG.info.assert_not_called()
vserver_client.set_qos_policy_group_for_volume.assert_not_called()
def test_handle_qos_on_replication_change_modify_existing_policy(self):
@ddt.data(True, False)
def test_handle_qos_on_replication_change_modify_existing_policy(self,
is_dr):
self.library._have_cluster_creds = True
extra_specs = {'qos': True, fake.QOS_EXTRA_SPEC: '3000'}
vserver_client = mock.Mock()
@ -3994,9 +4134,15 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
retval = self.library._handle_qos_on_replication_change(
self.mock_dm_session, self.fake_replica_2, self.fake_replica,
share_server=fake.SHARE_SERVER)
is_dr, share_server=fake.SHARE_SERVER)
self.assertIsNone(retval)
if is_dr:
(self.mock_dm_session.remove_qos_on_old_active_replica.
assert_called_once_with(self.fake_replica))
else:
(self.mock_dm_session.remove_qos_on_old_active_replica.
assert_not_called())
self.library._client.qos_policy_group_modify.assert_called_once_with(
'qos_' + volume_name_on_backend, '3000iops')
vserver_client.set_qos_policy_group_for_volume.assert_called_once_with(
@ -4021,7 +4167,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_create_qos_policy_group')
retval = self.library._handle_qos_on_replication_change(
self.mock_dm_session, self.fake_replica_2, self.fake_replica,
self.mock_dm_session, self.fake_replica_2, self.fake_replica, True,
share_server=fake.SHARE_SERVER)
self.assertIsNone(retval)
@ -4103,6 +4249,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=fake_helper))
self.mock_object(self.library, '_create_export',
mock.Mock(return_value='fake_export_location'))
self.mock_object(self.library,
'_is_readable_replica',
mock.Mock(return_value=False))
replicas = self.library.promote_replica(
None, [self.fake_replica, self.fake_replica_2],
@ -4204,18 +4353,100 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(constants.STATUS_ACTIVE,
replica['access_rules_status'])
def test_safe_change_replica_source(self):
@ddt.data(True, False)
def test_safe_change_replica_source(self, is_dr):
fake_replica_3 = copy.deepcopy(self.fake_replica_2)
fake_replica_3['id'] = fake.SHARE_ID3
fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
protocol_helper = mock.Mock()
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
self.mock_object(self.library, '_create_export',
mock.Mock(return_value='fake_export_location'))
self.mock_object(self.library, '_unmount_orig_active_replica')
self.mock_object(self.library, '_handle_qos_on_replication_change')
mock_dm_session = mock.Mock()
mock_dm_session.wait_for_mount_replica.return_value = None
self.mock_object(mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(fake.SHARE_NAME,
fake.VSERVER1,
fake.BACKEND_NAME)))
mock_client = mock.Mock()
self.mock_object(data_motion, "get_client_for_backend",
mock.Mock(return_value=mock_client))
mock_backend_config = fake.get_config_cmode()
mock_backend_config.netapp_mount_replica_timeout = 30
self.mock_object(data_motion, 'get_backend_configuration',
mock.Mock(return_value=mock_backend_config))
replica = self.library._safe_change_replica_source(
self.mock_dm_session, self.fake_replica, self.fake_replica_2,
mock_dm_session, self.fake_replica, self.fake_replica_2,
fake_replica_3, [self.fake_replica, self.fake_replica_2,
fake_replica_3]
fake_replica_3], is_dr, [fake.SHARE_ACCESS]
)
self.assertEqual([], replica['export_locations'])
self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
replica['replica_state'])
if is_dr:
self.assertEqual([], replica['export_locations'])
mock_dm_session.wait_for_mount_replica.assert_not_called()
else:
self.assertEqual('fake_export_location',
replica['export_locations'])
mock_dm_session.wait_for_mount_replica.assert_called_once_with(
mock_client, fake.SHARE_NAME, timeout=30)
@ddt.data({'fail_create_export': False, 'fail_mount': True},
{'fail_create_export': True, 'fail_mount': False})
@ddt.unpack
def test_safe_change_replica_source_fail_recover_readable(
self, fail_create_export, fail_mount):
fake_replica_3 = copy.deepcopy(self.fake_replica_2)
fake_replica_3['id'] = fake.SHARE_ID3
fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
protocol_helper = mock.Mock()
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
if fail_create_export:
self.mock_object(self.library, '_create_export',
mock.Mock(side_effect=netapp_api.NaApiError()))
else:
self.mock_object(self.library, '_create_export',
mock.Mock(return_value='fake_export_location'))
self.mock_object(self.library, '_unmount_orig_active_replica')
self.mock_object(self.library, '_handle_qos_on_replication_change')
mock_dm_session = mock.Mock()
if fail_mount:
mock_dm_session.wait_for_mount_replica.side_effect = (
netapp_api.NaApiError())
else:
mock_dm_session.wait_for_mount_replica.return_value = None
self.mock_object(mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(fake.SHARE_NAME,
fake.VSERVER1,
fake.BACKEND_NAME)))
mock_client = mock.Mock()
self.mock_object(data_motion, "get_client_for_backend",
mock.Mock(return_value=mock_client))
mock_backend_config = fake.get_config_cmode()
mock_backend_config.netapp_mount_replica_timeout = 30
self.mock_object(data_motion, 'get_backend_configuration',
mock.Mock(return_value=mock_backend_config))
replica = self.library._safe_change_replica_source(
mock_dm_session, self.fake_replica, self.fake_replica_2,
fake_replica_3, [self.fake_replica, self.fake_replica_2,
fake_replica_3], False, [fake.SHARE_ACCESS]
)
self.assertEqual(constants.STATUS_ERROR,
replica['replica_state'])
self.assertEqual(constants.STATUS_ERROR,
replica['status'])
def test_safe_change_replica_source_destination_unreachable(self):
self.mock_dm_session.change_snapmirror_source.side_effect = (
@ -4228,7 +4459,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
replica = self.library._safe_change_replica_source(
self.mock_dm_session, self.fake_replica, self.fake_replica_2,
fake_replica_3, [self.fake_replica, self.fake_replica_2,
fake_replica_3]
fake_replica_3], True, [],
)
self.assertEqual([], replica['export_locations'])
self.assertEqual(constants.STATUS_ERROR,
@ -4247,7 +4478,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
replica = self.library._safe_change_replica_source(
self.mock_dm_session, self.fake_replica, self.fake_replica_2,
fake_replica_3, [self.fake_replica, self.fake_replica_2,
fake_replica_3]
fake_replica_3], True, []
)
self.assertEqual([], replica['export_locations'])
self.assertEqual(constants.STATUS_ERROR,
@ -5021,6 +5252,29 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.call(self.fake_replica, fake_replica_3)],
any_order=True)
@ddt.data(
{'replication_type': constants.REPLICATION_TYPE_READABLE,
'is_readable': True},
{'replication_type': constants.REPLICATION_TYPE_DR,
'is_readable': False},
{'replication_type': constants.REPLICATION_TYPE_WRITABLE,
'is_readable': False},
{'replication_type': None,
'is_readable': False})
@ddt.unpack
def test__is_readable_replica(self, replication_type, is_readable):
extra_specs = {}
if replication_type:
extra_specs['replication_type'] = replication_type
mock_get_extra_spec = self.mock_object(
share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=extra_specs))
result = self.library._is_readable_replica(fake.SHARE)
self.assertEqual(is_readable, result)
mock_get_extra_spec.assert_called_once_with(fake.SHARE)
def test_migration_check_compatibility_no_cluster_credentials(self):
self.library._have_cluster_creds = False
self.mock_object(data_motion, 'get_backend_configuration')

View File

@ -45,3 +45,23 @@ class NetAppNASHelperBaseTestCase(test.TestCase):
result = helper._is_readonly(level)
self.assertEqual(readonly, result)
@ddt.data(
{'share': {'export_location': 'fake_export'},
'expected_export': 'fake_export'},
{'share': {'export_locations': [{'path': 'fake_export'}]},
'expected_export': 'fake_export'},
{'share': {'export_locations': 'error_type'},
'expected_export': None},
{'share': {'export_locations': []},
'expected_export': None},
{'share': {},
'expected_export': None})
@ddt.unpack
def test__get_share_export_location(self, share, expected_export):
helper = nfs_cmode.NetAppCmodeNFSHelper()
result = helper._get_share_export_location(share)
self.assertEqual(expected_export, result)

View File

@ -40,14 +40,16 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
self.helper = cifs_cmode.NetAppCmodeCIFSHelper()
self.helper.set_client(self.mock_client)
@ddt.data({'clear_export_policy': True, 'ensure_share_exists': False},
{'clear_export_policy': False, 'ensure_share_exists': True})
@ddt.data({'replica': True, 'cifs_exist': False},
{'replica': False, 'cifs_exist': True})
@ddt.unpack
def test_create_share(self, clear_export_policy, ensure_share_exists):
def test_create_share(self, replica, cifs_exist):
self.mock_client.cifs_share_exists.return_value = cifs_exist
result = self.helper.create_share(
fake.CIFS_SHARE, fake.SHARE_NAME,
clear_current_export_policy=clear_export_policy,
ensure_share_already_exists=ensure_share_exists)
replica=replica)
export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2]
export_paths = [result(address) for address in export_addresses]
@ -56,19 +58,32 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
r'\\%s\%s' % (fake.SHARE_ADDRESS_2, fake.SHARE_NAME),
]
self.assertEqual(expected_paths, export_paths)
if ensure_share_exists:
self.mock_client.cifs_share_exists.assert_called_once_with(
fake.SHARE_NAME)
self.mock_client.cifs_share_exists.assert_called_once_with(
fake.SHARE_NAME)
if cifs_exist:
self.mock_client.create_cifs_share.assert_not_called()
self.mock_client.remove_cifs_share.assert_not_called()
else:
self.mock_client.create_cifs_share.assert_called_once_with(
fake.SHARE_NAME)
self.mock_client.cifs_share_exists.assert_not_called()
if clear_export_policy:
self.mock_client.remove_cifs_share_access.assert_called_once_with(
fake.SHARE_NAME, 'Everyone')
self.mock_client.set_volume_security_style.assert_called_once_with(
fake.SHARE_NAME, security_style='ntfs')
if replica:
self.mock_client.set_volume_security_style.assert_not_called()
else:
self.mock_client.set_volume_security_style.assert_called_once_with(
fake.SHARE_NAME, security_style='ntfs')
def test_create_share_ensure_not_exist_error(self):
self.mock_client.cifs_share_exists.return_value = False
self.assertRaises(exception.NetAppException,
self.helper.create_share,
fake.CIFS_SHARE, fake.SHARE_NAME,
ensure_share_already_exists=True)
def test_delete_share(self):
@ -221,11 +236,14 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
share = fake.CIFS_SHARE.copy()
share['export_location'] = location
self.mock_object(self.helper, '_get_share_export_location',
mock.Mock(return_value=location))
result_ip, result_share_name = self.helper._get_export_location(share)
self.assertEqual(ip, result_ip)
self.assertEqual(share_name, result_share_name)
self.helper._get_share_export_location.assert_called_once_with(share)
def test_cleanup_demoted_replica(self):
self.helper.cleanup_demoted_replica(fake.CIFS_SHARE, fake.SHARE_NAME)

View File

@ -163,6 +163,10 @@ class NetAppClusteredNFSHelperTestCase(test.TestCase):
def test_get_export_location(self):
export = fake.NFS_SHARE['export_location']
self.mock_object(self.helper, '_get_share_export_location',
mock.Mock(return_value=export))
host_ip, export_path = self.helper._get_export_location(
fake.NFS_SHARE)
self.assertEqual(fake.SHARE_ADDRESS_1, host_ip)
@ -173,11 +177,15 @@ class NetAppClusteredNFSHelperTestCase(test.TestCase):
fake_share = fake.NFS_SHARE.copy()
fake_share['export_location'] = export
self.mock_object(self.helper, '_get_share_export_location',
mock.Mock(return_value=export))
host_ip, export_path = self.helper._get_export_location(fake_share)
self.assertEqual('', host_ip)
self.assertEqual('', export_path)
self.helper._get_share_export_location.assert_called_once_with(
fake_share)
def test_get_temp_export_policy_name(self):
@ -233,3 +241,11 @@ class NetAppClusteredNFSHelperTestCase(test.TestCase):
result = self.helper._get_auth_methods()
self.assertEqual(security_flavors, result)
def test_cleanup_demoted_replica(self):
self.mock_object(self.helper, 'delete_share')
self.helper.cleanup_demoted_replica(fake.NFS_SHARE, fake.SHARE_NAME)
self.helper.delete_share.assert_called_once_with(fake.NFS_SHARE,
fake.SHARE_NAME)

View File

@ -0,0 +1,6 @@
---
features:
- |
NetApp ONTAP driver: added support for `readable` replication. The driver
will continue having support for the `dr` type as well.