diff --git a/manila/opts.py b/manila/opts.py
index 17ef814a09..41003afaa9 100644
--- a/manila/opts.py
+++ b/manila/opts.py
@@ -130,6 +130,7 @@ _global_opt_lists = [
manila.share.drivers.netapp.options.netapp_transport_opts,
manila.share.drivers.netapp.options.netapp_basicauth_opts,
manila.share.drivers.netapp.options.netapp_provisioning_opts,
+ manila.share.drivers.netapp.options.netapp_replication_opts,
manila.share.drivers.quobyte.quobyte.quobyte_manila_share_opts,
manila.share.drivers.service_instance.common_opts,
manila.share.drivers.service_instance.no_share_servers_handling_mode_opts,
diff --git a/manila/share/drivers/netapp/dataontap/client/api.py b/manila/share/drivers/netapp/dataontap/client/api.py
index caa8bcdfc2..d926fdd5ab 100644
--- a/manila/share/drivers/netapp/dataontap/client/api.py
+++ b/manila/share/drivers/netapp/dataontap/client/api.py
@@ -43,6 +43,12 @@ EVOL_NOT_MOUNTED = '14716'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605'
+ERELATION_EXISTS = '17122'
+ENOTRANSFER_IN_PROGRESS = '17130'
+ETRANSFER_IN_PROGRESS = '17137'
+EANOTHER_OP_ACTIVE = '17131'
+ERELATION_NOT_QUIESCED = '17127'
+ESOURCE_IS_DIFFERENT = '17105'
class NaServer(object):
diff --git a/manila/share/drivers/netapp/dataontap/client/client_base.py b/manila/share/drivers/netapp/dataontap/client/client_base.py
index 4982a694b5..f469cece01 100644
--- a/manila/share/drivers/netapp/dataontap/client/client_base.py
+++ b/manila/share/drivers/netapp/dataontap/client/client_base.py
@@ -47,10 +47,35 @@ class NetAppBaseClient(object):
minor = result.get_child_content('minor-version')
return major, minor
+ @na_utils.trace
+ def get_system_version(self):
+ """Gets the current Data ONTAP version."""
+
+ result = self.send_request('system-get-version')
+
+ version_tuple = result.get_child_by_name(
+ 'version-tuple') or netapp_api.NaElement('none')
+ system_version_tuple = version_tuple.get_child_by_name(
+ 'system-version-tuple') or netapp_api.NaElement('none')
+
+ version = {}
+ version['version'] = result.get_child_content('version')
+ version['version-tuple'] = (
+ system_version_tuple.get_child_content('generation'),
+ system_version_tuple.get_child_content('major'),
+ system_version_tuple.get_child_content('minor'))
+
+ return version
+
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = Features()
+ def _strip_xml_namespace(self, string):
+ if string.startswith('{') and '}' in string:
+ return string.split('}', 1)[1]
+ return string
+
def send_request(self, api_name, api_args=None, enable_tunneling=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode.py b/manila/share/drivers/netapp/dataontap/client/client_cmode.py
index 62ad14d818..418f77afe1 100644
--- a/manila/share/drivers/netapp/dataontap/client/client_cmode.py
+++ b/manila/share/drivers/netapp/dataontap/client/client_cmode.py
@@ -56,11 +56,14 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
super(NetAppCmodeClient, self)._init_features()
ontapi_version = self.get_ontapi_version(cached=True)
+ ontapi_1_20 = ontapi_version >= (1, 20)
ontapi_1_30 = ontapi_version >= (1, 30)
+ self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
self.features.add_feature('BROADCAST_DOMAINS', supported=ontapi_1_30)
self.features.add_feature('IPSPACES', supported=ontapi_1_30)
self.features.add_feature('SUBNETS', supported=ontapi_1_30)
+ self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
@@ -1128,15 +1131,17 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
thin_provisioned=False, snapshot_policy=None,
language=None, dedup_enabled=False,
compression_enabled=False, max_files=None,
- snapshot_reserve=None):
+ snapshot_reserve=None, volume_type='rw'):
"""Creates a volume."""
api_args = {
'containing-aggr-name': aggregate_name,
'size': six.text_type(size_gb) + 'g',
'volume': volume_name,
- 'junction-path': '/%s' % volume_name,
+ 'volume-type': volume_type,
}
+ if volume_type != 'dp':
+ api_args['junction-path'] = '/%s' % volume_name
if thin_provisioned:
api_args['space-reserve'] = 'none'
if snapshot_policy is not None:
@@ -2190,3 +2195,494 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return False
else:
raise e
+
+ @na_utils.trace
+ def create_cluster_peer(self, addresses, username=None, password=None,
+ passphrase=None):
+ """Creates a cluster peer relationship."""
+
+ api_args = {
+ 'peer-addresses': [
+ {'remote-inet-address': address} for address in addresses
+ ],
+ }
+ if username:
+ api_args['user-name'] = username
+ if password:
+ api_args['password'] = password
+ if passphrase:
+ api_args['passphrase'] = passphrase
+
+ self.send_request('cluster-peer-create', api_args)
+
+ @na_utils.trace
+ def get_cluster_peers(self, remote_cluster_name=None):
+ """Gets one or more cluster peer relationships."""
+
+ api_args = {'max-records': 1000}
+ if remote_cluster_name:
+ api_args['query'] = {
+ 'cluster-peer-info': {
+ 'remote-cluster-name': remote_cluster_name,
+ }
+ }
+
+ result = self.send_request('cluster-peer-get-iter', api_args)
+ if not self._has_records(result):
+ return []
+
+ cluster_peers = []
+
+ for cluster_peer_info in result.get_child_by_name(
+ 'attributes-list').get_children():
+
+ cluster_peer = {
+ 'active-addresses': [],
+ 'peer-addresses': []
+ }
+
+ active_addresses = cluster_peer_info.get_child_by_name(
+ 'active-addresses') or netapp_api.NaElement('none')
+ for address in active_addresses.get_children():
+ cluster_peer['active-addresses'].append(address.get_content())
+
+ peer_addresses = cluster_peer_info.get_child_by_name(
+ 'peer-addresses') or netapp_api.NaElement('none')
+ for address in peer_addresses.get_children():
+ cluster_peer['peer-addresses'].append(address.get_content())
+
+ cluster_peer['availability'] = cluster_peer_info.get_child_content(
+ 'availability')
+ cluster_peer['cluster-name'] = cluster_peer_info.get_child_content(
+ 'cluster-name')
+ cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content(
+ 'cluster-uuid')
+ cluster_peer['remote-cluster-name'] = (
+ cluster_peer_info.get_child_content('remote-cluster-name'))
+ cluster_peer['serial-number'] = (
+ cluster_peer_info.get_child_content('serial-number'))
+ cluster_peer['timeout'] = cluster_peer_info.get_child_content(
+ 'timeout')
+
+ cluster_peers.append(cluster_peer)
+
+ return cluster_peers
+
+ @na_utils.trace
+ def delete_cluster_peer(self, cluster_name):
+ """Deletes a cluster peer relationship."""
+
+ api_args = {'cluster-name': cluster_name}
+ self.send_request('cluster-peer-delete', api_args)
+
+ @na_utils.trace
+ def get_cluster_peer_policy(self):
+ """Gets the cluster peering policy configuration."""
+
+ if not self.features.CLUSTER_PEER_POLICY:
+ return {}
+
+ result = self.send_request('cluster-peer-policy-get')
+
+ attributes = result.get_child_by_name(
+ 'attributes') or netapp_api.NaElement('none')
+ cluster_peer_policy = attributes.get_child_by_name(
+ 'cluster-peer-policy') or netapp_api.NaElement('none')
+
+ policy = {
+ 'is-unauthenticated-access-permitted':
+ cluster_peer_policy.get_child_content(
+ 'is-unauthenticated-access-permitted'),
+ 'passphrase-minimum-length':
+ cluster_peer_policy.get_child_content(
+ 'passphrase-minimum-length'),
+ }
+
+ if policy['is-unauthenticated-access-permitted'] is not None:
+ policy['is-unauthenticated-access-permitted'] = (
+ strutils.bool_from_string(
+ policy['is-unauthenticated-access-permitted']))
+ if policy['passphrase-minimum-length'] is not None:
+ policy['passphrase-minimum-length'] = int(
+ policy['passphrase-minimum-length'])
+
+ return policy
+
+ @na_utils.trace
+ def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None,
+ passphrase_minimum_length=None):
+ """Modifies the cluster peering policy configuration."""
+
+ if not self.features.CLUSTER_PEER_POLICY:
+ return
+
+ if (is_unauthenticated_access_permitted is None and
+ passphrase_minimum_length is None):
+ return
+
+ api_args = {}
+ if is_unauthenticated_access_permitted is not None:
+ api_args['is-unauthenticated-access-permitted'] = (
+ 'true' if strutils.bool_from_string(
+ is_unauthenticated_access_permitted) else 'false')
+ if passphrase_minimum_length is not None:
+ api_args['passphrase-minlength'] = six.text_type(
+ passphrase_minimum_length)
+
+ self.send_request('cluster-peer-policy-modify', api_args)
+
+ @na_utils.trace
+ def create_vserver_peer(self, vserver_name, peer_vserver_name):
+ """Creates a Vserver peer relationship for SnapMirrors."""
+ api_args = {
+ 'vserver': vserver_name,
+ 'peer-vserver': peer_vserver_name,
+ 'applications': [
+ {'vserver-peer-application': 'snapmirror'},
+ ],
+ }
+ self.send_request('vserver-peer-create', api_args)
+
+ @na_utils.trace
+ def delete_vserver_peer(self, vserver_name, peer_vserver_name):
+ """Deletes a Vserver peer relationship."""
+
+ api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
+ self.send_request('vserver-peer-delete', api_args)
+
+ @na_utils.trace
+ def accept_vserver_peer(self, vserver_name, peer_vserver_name):
+ """Accepts a pending Vserver peer relationship."""
+
+ api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
+ self.send_request('vserver-peer-accept', api_args)
+
+ @na_utils.trace
+ def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None):
+ """Gets one or more Vserver peer relationships."""
+
+ api_args = None
+ if vserver_name or peer_vserver_name:
+ api_args = {'query': {'vserver-peer-info': {}}}
+ if vserver_name:
+ api_args['query']['vserver-peer-info']['vserver'] = (
+ vserver_name)
+ if peer_vserver_name:
+ api_args['query']['vserver-peer-info']['peer-vserver'] = (
+ peer_vserver_name)
+ api_args['max-records'] = 1000
+
+ result = self.send_request('vserver-peer-get-iter', api_args)
+ if not self._has_records(result):
+ return []
+
+ vserver_peers = []
+
+ for vserver_peer_info in result.get_child_by_name(
+ 'attributes-list').get_children():
+
+ vserver_peer = {
+ 'vserver': vserver_peer_info.get_child_content('vserver'),
+ 'peer-vserver':
+ vserver_peer_info.get_child_content('peer-vserver'),
+ 'peer-state':
+ vserver_peer_info.get_child_content('peer-state'),
+ 'peer-cluster':
+ vserver_peer_info.get_child_content('peer-cluster'),
+ }
+ vserver_peers.append(vserver_peer)
+
+ return vserver_peers
+
+ def _ensure_snapmirror_v2(self):
+ """Verify support for SnapMirror control plane v2."""
+ if not self.features.SNAPMIRROR_V2:
+ msg = _('SnapMirror features require Data ONTAP 8.2 or later.')
+ raise exception.NetAppException(msg)
+
+ @na_utils.trace
+ def create_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ schedule=None, policy=None,
+ relationship_type='data_protection'):
+ """Creates a SnapMirror relationship (cDOT 8.2 or later only)."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ 'relationship-type': relationship_type,
+ }
+ if schedule:
+ api_args['schedule'] = schedule
+ if policy:
+ api_args['policy'] = policy
+
+ try:
+ self.send_request('snapmirror-create', api_args)
+ except netapp_api.NaApiError as e:
+ if e.code != netapp_api.ERELATION_EXISTS:
+ raise
+
+ @na_utils.trace
+ def initialize_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ source_snapshot=None, transfer_priority=None):
+ """Initializes a SnapMirror relationship (cDOT 8.2 or later only)."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ if source_snapshot:
+ api_args['source-snapshot'] = source_snapshot
+ if transfer_priority:
+ api_args['transfer-priority'] = transfer_priority
+
+ result = self.send_request('snapmirror-initialize', api_args)
+
+ result_info = {}
+ result_info['operation-id'] = result.get_child_content(
+ 'result-operation-id')
+ result_info['status'] = result.get_child_content('result-status')
+ result_info['jobid'] = result.get_child_content('result-jobid')
+ result_info['error-code'] = result.get_child_content(
+ 'result-error-code')
+ result_info['error-message'] = result.get_child_content(
+ 'result-error-message')
+
+ return result_info
+
+ @na_utils.trace
+ def release_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ relationship_info_only=False):
+ """Removes a SnapMirror relationship on the source endpoint."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'query': {
+ 'snapmirror-destination-info': {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ 'relationship-info-only': ('true' if relationship_info_only
+ else 'false'),
+ }
+ }
+ }
+ self.send_request('snapmirror-release-iter', api_args)
+
+ @na_utils.trace
+ def quiesce_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Disables future transfers to a SnapMirror destination."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ self.send_request('snapmirror-quiesce', api_args)
+
+ @na_utils.trace
+ def abort_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ clear_checkpoint=False):
+ """Stops ongoing transfers for a SnapMirror relationship."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ 'clear-checkpoint': 'true' if clear_checkpoint else 'false',
+ }
+ try:
+ self.send_request('snapmirror-abort', api_args)
+ except netapp_api.NaApiError as e:
+ if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS:
+ raise
+
+ @na_utils.trace
+ def break_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Breaks a data protection SnapMirror relationship."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ self.send_request('snapmirror-break', api_args)
+
+ @na_utils.trace
+ def modify_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ schedule=None, policy=None, tries=None,
+ max_transfer_rate=None):
+ """Modifies a SnapMirror relationship."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ if schedule:
+ api_args['schedule'] = schedule
+ if policy:
+ api_args['policy'] = policy
+ if tries is not None:
+ api_args['tries'] = tries
+ if max_transfer_rate is not None:
+ api_args['max-transfer-rate'] = max_transfer_rate
+
+ self.send_request('snapmirror-modify', api_args)
+
+ @na_utils.trace
+ def delete_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Destroys a SnapMirror relationship."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ }
+ }
+ self.send_request('snapmirror-destroy-iter', api_args)
+
+ @na_utils.trace
+ def update_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Schedules a snapmirror update."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ try:
+ self.send_request('snapmirror-update', api_args)
+ except netapp_api.NaApiError as e:
+ if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and
+ e.code != netapp_api.EANOTHER_OP_ACTIVE):
+ raise
+
+ @na_utils.trace
+ def resume_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Resume a SnapMirror relationship if it is quiesced."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ try:
+ self.send_request('snapmirror-resume', api_args)
+ except netapp_api.NaApiError as e:
+ if e.code != netapp_api.ERELATION_NOT_QUIESCED:
+ raise
+
+ @na_utils.trace
+ def resync_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Resync a SnapMirror relationship."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'source-volume': source_volume,
+ 'source-vserver': source_vserver,
+ 'destination-volume': destination_volume,
+ 'destination-vserver': destination_vserver,
+ }
+ self.send_request('snapmirror-resync', api_args)
+
+ @na_utils.trace
+ def _get_snapmirrors(self, source_vserver=None, source_volume=None,
+ destination_vserver=None, destination_volume=None,
+ desired_attributes=None):
+
+ query = None
+ if (source_vserver or source_volume or destination_vserver or
+ destination_volume):
+ query = {'snapmirror-info': {}}
+ if source_volume:
+ query['snapmirror-info']['source-volume'] = source_volume
+ if destination_volume:
+ query['snapmirror-info']['destination-volume'] = (
+ destination_volume)
+ if source_vserver:
+ query['snapmirror-info']['source-vserver'] = source_vserver
+ if destination_vserver:
+ query['snapmirror-info']['destination-vserver'] = (
+ destination_vserver)
+
+ api_args = {}
+ if query:
+ api_args['query'] = query
+ if desired_attributes:
+ api_args['desired-attributes'] = desired_attributes
+
+ result = self.send_request('snapmirror-get-iter', api_args)
+ if not self._has_records(result):
+ return []
+ else:
+ return result.get_child_by_name('attributes-list').get_children()
+
+ @na_utils.trace
+ def get_snapmirrors(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ desired_attributes=None):
+ """Gets one or more SnapMirror relationships.
+
+ Either the source or destination info may be omitted.
+ Desired attributes should be a flat list of attribute names.
+ """
+ self._ensure_snapmirror_v2()
+
+ if desired_attributes is not None:
+ desired_attributes = {
+ 'snapmirror-info': {attr: None for attr in desired_attributes},
+ }
+
+ result = self._get_snapmirrors(
+ source_vserver=source_vserver,
+ source_volume=source_volume,
+ destination_vserver=destination_vserver,
+ destination_volume=destination_volume,
+ desired_attributes=desired_attributes)
+
+ snapmirrors = []
+
+ for snapmirror_info in result:
+ snapmirror = {}
+ for child in snapmirror_info.get_children():
+ name = self._strip_xml_namespace(child.get_name())
+ snapmirror[name] = child.get_content()
+ snapmirrors.append(snapmirror)
+
+ return snapmirrors
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py b/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
new file mode 100644
index 0000000000..231bd674aa
--- /dev/null
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
@@ -0,0 +1,372 @@
+# Copyright (c) 2016 Alex Meade. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+NetApp Data ONTAP data motion library.
+
+This library handles transferring data from a source to a destination. Its
+responsibility is to handle this as efficiently as possible given the
+location of the data's source and destination. This includes cloning,
+SnapMirror, and copy-offload as improvements to brute force data transfer.
+"""
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import excutils
+
+from manila import exception
+from manila.i18n import _LE, _LI, _LW
+from manila.share import configuration
+from manila.share import driver
+from manila.share.drivers.netapp.dataontap.client import api as netapp_api
+from manila.share.drivers.netapp.dataontap.client import client_cmode
+from manila.share.drivers.netapp import options as na_opts
+from manila.share.drivers.netapp import utils as na_utils
+from manila.share import utils as share_utils
+from manila import utils
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def get_backend_configuration(backend_name):
+ for section in CONF.list_all_sections():
+ config = configuration.Configuration(driver.share_opts,
+ config_group=section)
+ config.append_config_values(na_opts.netapp_cluster_opts)
+ config.append_config_values(na_opts.netapp_connection_opts)
+ config.append_config_values(na_opts.netapp_basicauth_opts)
+ config.append_config_values(na_opts.netapp_transport_opts)
+ config.append_config_values(na_opts.netapp_support_opts)
+ config.append_config_values(na_opts.netapp_provisioning_opts)
+ config.append_config_values(na_opts.netapp_replication_opts)
+ if (config.share_backend_name and
+ config.share_backend_name.lower() == backend_name.lower()):
+ return config
+ msg = _LW("Could not find backend %s in configuration.")
+ LOG.warning(msg % backend_name)
+
+
+def get_client_for_backend(backend_name, vserver_name=None):
+ config = get_backend_configuration(backend_name)
+ client = client_cmode.NetAppCmodeClient(
+ transport_type=config.netapp_transport_type,
+ username=config.netapp_login,
+ password=config.netapp_password,
+ hostname=config.netapp_server_hostname,
+ port=config.netapp_server_port,
+ vserver=vserver_name or config.netapp_vserver,
+ trace=na_utils.TRACE_API)
+
+ return client
+
+
+class DataMotionSession(object):
+
+ def _get_backend_volume_name(self, config, share_obj):
+ """Return the calculated backend name of the share.
+
+ Uses the netapp_volume_name_template configuration value for the
+ backend to calculate the volume name on the array for the share.
+ """
+ volume_name = config.netapp_volume_name_template % {
+ 'share_id': share_obj['id'].replace('-', '_')}
+ return volume_name
+
+ def get_vserver_from_share(self, share_obj):
+ share_server = share_obj.get('share_server')
+ if share_server:
+ backend_details = share_server.get('backend_details')
+ if backend_details:
+ return backend_details.get('vserver_name')
+
+ def get_backend_info_for_share(self, share_obj):
+ backend_name = share_utils.extract_host(
+ share_obj['host'], level='backend_name')
+
+ config = get_backend_configuration(backend_name)
+ vserver = (self.get_vserver_from_share(share_obj) or
+ config.netapp_vserver)
+ volume_name = self._get_backend_volume_name(
+ config, share_obj)
+
+ return volume_name, vserver, backend_name
+
+ def get_snapmirrors(self, source_share_obj, dest_share_obj):
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ snapmirrors = dest_client.get_snapmirrors(
+ src_vserver, src_volume_name,
+ dest_vserver, dest_volume_name,
+ desired_attributes=['relationship-status',
+ 'mirror-state',
+ 'source-vserver',
+ 'source-volume',
+ 'last-transfer-end-timestamp'])
+ return snapmirrors
+
+ def create_snapmirror(self, source_share_obj, dest_share_obj):
+ """Sets up a SnapMirror relationship between two volumes.
+
+ 1. Create SnapMirror relationship
+ 2. Initialize data transfer asynchronously
+ """
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ # 1. Create SnapMirror relationship
+ # TODO(ameade): Change the schedule from hourly to a config value
+ dest_client.create_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name,
+ schedule='hourly')
+
+ # 2. Initialize async transfer of the initial data
+ dest_client.initialize_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ def delete_snapmirror(self, source_share_obj, dest_share_obj,
+ release=True):
+ """Ensures all information about a SnapMirror relationship is removed.
+
+ 1. Abort snapmirror
+ 2. Delete the snapmirror
+ 3. Release snapmirror to cleanup snapmirror metadata and snapshots
+ """
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, src_backend = (
+ self.get_backend_info_for_share(source_share_obj))
+
+ # 1. Abort any ongoing transfers
+ try:
+ dest_client.abort_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name,
+ clear_checkpoint=False)
+ except netapp_api.NaApiError as e:
+ # Snapmirror is already deleted
+ pass
+
+ # 2. Delete SnapMirror Relationship and cleanup destination snapshots
+ try:
+ dest_client.delete_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+ except netapp_api.NaApiError as e:
+ with excutils.save_and_reraise_exception() as exc_context:
+ if (e.code == netapp_api.EOBJECTNOTFOUND or
+ e.code == netapp_api.ESOURCE_IS_DIFFERENT or
+ "(entry doesn't exist)" in e.message):
+ LOG.info(_LI('No snapmirror relationship to delete'))
+ exc_context.reraise = False
+
+ if release:
+ # If the source is unreachable, do not perform the release
+ try:
+ src_client = get_client_for_backend(src_backend,
+ vserver_name=src_vserver)
+ except Exception:
+ src_client = None
+ # 3. Cleanup SnapMirror relationship on source
+ try:
+ if src_client:
+ src_client.release_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+ except netapp_api.NaApiError as e:
+ with excutils.save_and_reraise_exception() as exc_context:
+ if (e.code == netapp_api.EOBJECTNOTFOUND or
+ e.code == netapp_api.ESOURCE_IS_DIFFERENT or
+ "(entry doesn't exist)" in e.message):
+ # Handle the case where the snapmirror is already
+ # cleaned up
+ exc_context.reraise = False
+
+ def update_snapmirror(self, source_share_obj, dest_share_obj):
+ """Schedule a snapmirror update to happen on the backend."""
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ # Update SnapMirror
+ dest_client.update_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ def quiesce_then_abort(self, source_share_obj, dest_share_obj):
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ # 1. Attempt to quiesce, then abort
+ dest_client.quiesce_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ config = get_backend_configuration(share_utils.extract_host(
+ source_share_obj['host'], level='backend_name'))
+ retries = config.netapp_snapmirror_quiesce_timeout / 5
+
+ @utils.retry(exception.ReplicationException, interval=5,
+ retries=retries, backoff_rate=1)
+ def wait_for_quiesced():
+ snapmirror = dest_client.get_snapmirrors(
+ src_vserver, src_volume_name, dest_vserver,
+ dest_volume_name, desired_attributes=['relationship-status',
+ 'mirror-state']
+ )[0]
+ if snapmirror.get('relationship-status') != 'quiesced':
+ raise exception.ReplicationException(
+ reason=_LE("Snapmirror relationship is not quiesced."))
+
+ try:
+ wait_for_quiesced()
+ except exception.ReplicationException:
+ dest_client.abort_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name,
+ clear_checkpoint=False)
+
+ def break_snapmirror(self, source_share_obj, dest_share_obj):
+ """Breaks SnapMirror relationship.
+
+ 1. Quiesce any ongoing snapmirror transfers
+ 2. Wait until snapmirror finishes transfers and enters quiesced state
+ 3. Break snapmirror
+ 4. Mount the destination volume so it is exported as a share
+ """
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ # 1. Attempt to quiesce, then abort
+ self.quiesce_then_abort(source_share_obj, dest_share_obj)
+
+ # 2. Break SnapMirror
+ dest_client.break_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ # 3. Mount the destination volume and create a junction path
+ dest_client.mount_volume(dest_volume_name)
+
+ def resync_snapmirror(self, source_share_obj, dest_share_obj):
+ """Resync SnapMirror relationship. """
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ dest_client.resync_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ def resume_snapmirror(self, source_share_obj, dest_share_obj):
+ """Resume SnapMirror relationship from a quiesced state."""
+ dest_volume_name, dest_vserver, dest_backend = (
+ self.get_backend_info_for_share(dest_share_obj))
+ dest_client = get_client_for_backend(dest_backend,
+ vserver_name=dest_vserver)
+
+ src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ source_share_obj)
+
+ dest_client.resume_snapmirror(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ def change_snapmirror_source(self, replica,
+ orig_source_replica,
+ new_source_replica, replica_list):
+ """Creates SnapMirror relationship from the new source to destination.
+
+ 1. Delete all snapmirrors involving the replica, but maintain
+ snapmirror metadata and snapshots for efficiency
+ 2. Ensure a new source -> replica snapmirror exists
+ 3. Resync new source -> replica snapmirror relationship
+ """
+
+ replica_volume_name, replica_vserver, replica_backend = (
+ self.get_backend_info_for_share(replica))
+ replica_client = get_client_for_backend(replica_backend,
+ vserver_name=replica_vserver)
+
+ new_src_volume_name, new_src_vserver, __ = (
+ self.get_backend_info_for_share(new_source_replica))
+
+ # 1. delete
+ for other_replica in replica_list:
+ if other_replica['id'] == replica['id']:
+ continue
+
+ # We need to delete ALL snapmirror relationships
+ # involving this replica but do not remove snapmirror metadata
+ # so that the new snapmirror relationship is efficient.
+ self.delete_snapmirror(other_replica, replica, release=False)
+ self.delete_snapmirror(replica, other_replica, release=False)
+
+ # 2. create
+ # TODO(ameade): Update the schedule if needed.
+ replica_client.create_snapmirror(new_src_vserver,
+ new_src_volume_name,
+ replica_vserver,
+ replica_volume_name,
+ schedule='hourly')
+ # 3. resync
+ replica_client.resync_snapmirror(new_src_vserver,
+ new_src_volume_name,
+ replica_vserver,
+ replica_volume_name)
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
index 8749e399ab..6c6f01d01b 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
@@ -118,3 +118,18 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def _teardown_server(self, server_details, **kwargs):
self.library.teardown_server(server_details, **kwargs)
+
+ def create_replica(self, context, replica_list, replica, access_rules,
+ **kwargs):
+ raise NotImplementedError()
+
+ def delete_replica(self, context, replica_list, replica, **kwargs):
+ raise NotImplementedError()
+
+ def promote_replica(self, context, replica_list, replica, access_rules,
+ share_server=None):
+ raise NotImplementedError()
+
+ def update_replica_state(self, context, replica_list, replica,
+ access_rules, share_server=None):
+ raise NotImplementedError()
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
index cfb3ff1414..c38dff965d 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
@@ -117,3 +117,25 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
def _teardown_server(self, server_details, **kwargs):
self.library.teardown_server(server_details, **kwargs)
+
+ def create_replica(self, context, replica_list, replica, access_rules,
+ **kwargs):
+ return self.library.create_replica(context, replica_list, replica,
+ access_rules, **kwargs)
+
+ def delete_replica(self, context, replica_list, replica, **kwargs):
+ self.library.delete_replica(context, replica_list, replica, **kwargs)
+
+ def promote_replica(self, context, replica_list, replica, access_rules,
+ share_server=None):
+ return self.library.promote_replica(context, replica_list, replica,
+ access_rules,
+ share_server=share_server)
+
+ def update_replica_state(self, context, replica_list, replica,
+ access_rules, share_server=None):
+ return self.library.update_replica_state(context,
+ replica_list,
+ replica,
+ access_rules,
+ share_server=share_server)
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
index 80da7a5f0f..e8da4985bb 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
@@ -24,14 +24,19 @@ import math
import socket
import time
+from oslo_config import cfg
from oslo_log import log
from oslo_service import loopingcall
+from oslo_utils import timeutils
from oslo_utils import units
import six
+from manila.common import constants
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
+from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
+from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
from manila.share.drivers.netapp import options as na_opts
@@ -40,6 +45,7 @@ from manila.share import share_types
from manila.share import utils as share_utils
LOG = log.getLogger(__name__)
+CONF = cfg.CONF
class NetAppCmodeFileStorageLibrary(object):
@@ -84,6 +90,8 @@ class NetAppCmodeFileStorageLibrary(object):
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
self.configuration.append_config_values(
na_opts.netapp_provisioning_opts)
+ self.configuration.append_config_values(
+ na_opts.netapp_replication_opts)
self._licenses = []
self._client = None
@@ -177,16 +185,16 @@ class NetAppCmodeFileStorageLibrary(object):
housekeeping_periodic_task.start(
interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
- def _get_valid_share_name(self, share_id):
+ def _get_backend_share_name(self, share_id):
"""Get share name according to share name template."""
return self.configuration.netapp_volume_name_template % {
'share_id': share_id.replace('-', '_')}
- def _get_valid_snapshot_name(self, snapshot_id):
+ def _get_backend_snapshot_name(self, snapshot_id):
"""Get snapshot name according to snapshot name template."""
return 'share_snapshot_' + snapshot_id.replace('-', '_')
- def _get_valid_cg_snapshot_name(self, snapshot_id):
+ def _get_backend_cg_snapshot_name(self, snapshot_id):
"""Get snapshot name according to snapshot name template."""
return 'share_cg_snapshot_' + snapshot_id.replace('-', '_')
@@ -222,6 +230,12 @@ class NetAppCmodeFileStorageLibrary(object):
'consistency_group_support': 'host',
'pools': self._get_pools(),
}
+
+ if (self.configuration.replication_domain and
+ not self.configuration.driver_handles_share_servers):
+ data['replication_type'] = 'dr'
+ data['replication_domain'] = self.configuration.replication_domain
+
return data
@na_utils.trace
@@ -345,7 +359,7 @@ class NetAppCmodeFileStorageLibrary(object):
if pool:
return pool
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
return self._client.get_aggregate_for_volume(share_name)
@na_utils.trace
@@ -366,9 +380,9 @@ class NetAppCmodeFileStorageLibrary(object):
vserver_client)
@na_utils.trace
- def _allocate_container(self, share, vserver_client):
+ def _allocate_container(self, share, vserver_client, replica=False):
"""Create new share on aggregate."""
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
# Get Data ONTAP aggregate name as pool name.
pool_name = share_utils.extract_host(share['host'], level='pool')
@@ -380,6 +394,10 @@ class NetAppCmodeFileStorageLibrary(object):
extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
self._check_extra_specs_validity(share, extra_specs)
provisioning_options = self._get_provisioning_options(extra_specs)
+ if replica:
+ # If this volume is intended to be a replication destination,
+ # create it as the 'data-protection' type
+ provisioning_options['volume_type'] = 'dp'
LOG.debug('Creating share %(share)s on pool %(pool)s with '
'provisioning options %(options)s',
@@ -541,10 +559,10 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _allocate_container_from_snapshot(
self, share, snapshot, vserver_client,
- snapshot_name_func=_get_valid_snapshot_name):
+ snapshot_name_func=_get_backend_snapshot_name):
"""Clones existing share."""
- share_name = self._get_valid_share_name(share['id'])
- parent_share_name = self._get_valid_share_name(snapshot['share_id'])
+ share_name = self._get_backend_share_name(share['id'])
+ parent_share_name = self._get_backend_share_name(snapshot['share_id'])
parent_snapshot_name = snapshot_name_func(self, snapshot['id'])
LOG.debug('Creating share from snapshot %s', snapshot['id'])
@@ -571,7 +589,7 @@ class NetAppCmodeFileStorageLibrary(object):
{'share': share['id'], 'error': error})
return
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
if self._share_exists(share_name, vserver_client):
self._remove_export(share, vserver_client)
self._deallocate_container(share_name, vserver_client)
@@ -590,7 +608,7 @@ class NetAppCmodeFileStorageLibrary(object):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
interfaces = vserver_client.get_network_interfaces(
protocols=[share['share_proto']])
@@ -683,7 +701,7 @@ class NetAppCmodeFileStorageLibrary(object):
"""Deletes NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
target = helper.get_target(share)
# Share may be in error state, so there's no share and target.
if target:
@@ -693,8 +711,8 @@ class NetAppCmodeFileStorageLibrary(object):
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot of a share."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
- share_name = self._get_valid_share_name(snapshot['share_id'])
- snapshot_name = self._get_valid_snapshot_name(snapshot['id'])
+ share_name = self._get_backend_share_name(snapshot['share_id'])
+ snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
LOG.debug('Creating snapshot %s', snapshot_name)
vserver_client.create_snapshot(share_name, snapshot_name)
@@ -713,8 +731,8 @@ class NetAppCmodeFileStorageLibrary(object):
{'snap': snapshot['id'], 'error': error})
return
- share_name = self._get_valid_share_name(snapshot['share_id'])
- snapshot_name = self._get_valid_snapshot_name(snapshot['id'])
+ share_name = self._get_backend_share_name(snapshot['share_id'])
+ snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
try:
self._handle_busy_snapshot(vserver_client, share_name,
@@ -785,7 +803,7 @@ class NetAppCmodeFileStorageLibrary(object):
msg_args = {'export': share['export_location']}
raise exception.ManageInvalidShare(reason=msg % msg_args)
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
aggregate_name = share_utils.extract_host(share['host'], level='pool')
# Get existing volume info
@@ -886,7 +904,7 @@ class NetAppCmodeFileStorageLibrary(object):
self._allocate_container_from_snapshot(
clone['share'], clone['snapshot'], vserver_client,
- NetAppCmodeFileStorageLibrary._get_valid_cg_snapshot_name)
+ NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name)
export_locations = self._create_export(clone['share'],
share_server,
@@ -955,9 +973,9 @@ class NetAppCmodeFileStorageLibrary(object):
"""Creates a consistency group snapshot."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
- share_names = [self._get_valid_share_name(member['share_id'])
+ share_names = [self._get_backend_share_name(member['share_id'])
for member in snap_dict.get('cgsnapshot_members', [])]
- snapshot_name = self._get_valid_cg_snapshot_name(snap_dict['id'])
+ snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
if share_names:
LOG.debug('Creating CG snapshot %s.', snapshot_name)
@@ -980,9 +998,9 @@ class NetAppCmodeFileStorageLibrary(object):
{'snap': snap_dict['id'], 'error': error})
return None, None
- share_names = [self._get_valid_share_name(member['share_id'])
+ share_names = [self._get_backend_share_name(member['share_id'])
for member in snap_dict.get('cgsnapshot_members', [])]
- snapshot_name = self._get_valid_cg_snapshot_name(snap_dict['id'])
+ snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
for share_name in share_names:
try:
@@ -1004,7 +1022,7 @@ class NetAppCmodeFileStorageLibrary(object):
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
LOG.debug('Extending share %(name)s to %(size)s GB.',
{'name': share_name, 'size': new_size})
vserver_client.set_volume_size(share_name, new_size)
@@ -1013,7 +1031,7 @@ class NetAppCmodeFileStorageLibrary(object):
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
LOG.debug('Shrinking share %(name)s to %(size)s GB.',
{'name': share_name, 'size': new_size})
vserver_client.set_volume_size(share_name, new_size)
@@ -1022,6 +1040,12 @@ class NetAppCmodeFileStorageLibrary(object):
def update_access(self, context, share, access_rules, add_rules=None,
delete_rules=None, share_server=None):
"""Updates access rules for a share."""
+ # NOTE(ameade): We do not need to add export rules to a non-active
+ # replica as it will fail.
+ replica_state = share.get('replica_state')
+ if (replica_state is not None and
+ replica_state != constants.REPLICA_STATE_ACTIVE):
+ return
try:
vserver, vserver_client = self._get_vserver(
share_server=share_server)
@@ -1034,7 +1058,7 @@ class NetAppCmodeFileStorageLibrary(object):
{'share': share['id'], 'error': error})
return
- share_name = self._get_valid_share_name(share['id'])
+ share_name = self._get_backend_share_name(share['id'])
if self._share_exists(share_name, vserver_client):
helper = self._get_helper(share)
helper.set_client(vserver_client)
@@ -1096,3 +1120,237 @@ class NetAppCmodeFileStorageLibrary(object):
disk_types = self._client.get_aggregate_disk_types(aggregate_names)
for aggregate_name, disk_type in disk_types.items():
ssc_stats[aggregate_name]['netapp_disk_type'] = disk_type
+
+ def _find_active_replica(self, replica_list):
+ # NOTE(ameade): Find current active replica. There can only be one
+ # active replica (SnapMirror source volume) at a time in cDOT.
+ for r in replica_list:
+ if r['replica_state'] == constants.REPLICA_STATE_ACTIVE:
+ return r
+
+ def create_replica(self, context, replica_list, new_replica,
+ access_rules=None, share_server=None):
+ """Creates the new replica on this backend and sets up SnapMirror."""
+ active_replica = self._find_active_replica(replica_list)
+ dm_session = data_motion.DataMotionSession()
+
+ # 1. Create the destination share
+ dest_backend = share_utils.extract_host(new_replica['host'],
+ level='backend_name')
+
+ vserver = (dm_session.get_vserver_from_share(new_replica) or
+ self.configuration.netapp_vserver)
+
+ vserver_client = data_motion.get_client_for_backend(
+ dest_backend, vserver_name=vserver)
+
+ self._allocate_container(new_replica, vserver_client, replica=True)
+
+ # 2. Setup SnapMirror
+ dm_session.create_snapmirror(active_replica, new_replica)
+
+ model_update = {
+ 'export_locations': [],
+ 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
+ 'access_rules_status': constants.STATUS_ACTIVE,
+ }
+
+ return model_update
+
+ def delete_replica(self, context, replica_list, replica,
+ share_server=None):
+ """Removes the replica on this backend and destroys SnapMirror."""
+ dm_session = data_motion.DataMotionSession()
+ # 1. Remove SnapMirror
+ dest_backend = share_utils.extract_host(replica['host'],
+ level='backend_name')
+ vserver = (dm_session.get_vserver_from_share(replica) or
+ self.configuration.netapp_vserver)
+
+ # Ensure that all potential snapmirror relationships and their metadata
+ # involving the replica are destroyed.
+ for other_replica in replica_list:
+ dm_session.delete_snapmirror(other_replica, replica)
+ dm_session.delete_snapmirror(replica, other_replica)
+
+ # 2. Delete share
+ vserver_client = data_motion.get_client_for_backend(
+ dest_backend, vserver_name=vserver)
+ share_name = self._get_backend_share_name(replica['id'])
+ self._deallocate_container(share_name, vserver_client)
+
+ def update_replica_state(self, context, replica_list, replica,
+ access_rules, share_server=None):
+ """Returns the status of the given replica on this backend."""
+ active_replica = self._find_active_replica(replica_list)
+
+ share_name = self._get_backend_share_name(replica['id'])
+ vserver, vserver_client = self._get_vserver(share_server=share_server)
+ dm_session = data_motion.DataMotionSession()
+ try:
+ snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
+ except netapp_api.NaApiError:
+ LOG.exception(_LE("Could not get snapmirrors for replica %s."),
+ replica['id'])
+ return constants.STATUS_ERROR
+
+ if not snapmirrors:
+ if replica['status'] != constants.STATUS_CREATING:
+ try:
+ dm_session.create_snapmirror(active_replica, replica)
+ except netapp_api.NaApiError:
+ LOG.exception(_LE("Could not create snapmirror for "
+ "replica %s."), replica['id'])
+ return constants.STATUS_ERROR
+ return constants.REPLICA_STATE_OUT_OF_SYNC
+
+ snapmirror = snapmirrors[0]
+
+ if (snapmirror.get('mirror-state') != 'snapmirrored' and
+ snapmirror.get('relationship-status') == 'transferring'):
+ return constants.REPLICA_STATE_OUT_OF_SYNC
+
+ if snapmirror.get('mirror-state') != 'snapmirrored':
+ try:
+ vserver_client.resume_snapmirror(snapmirror['source-vserver'],
+ snapmirror['source-volume'],
+ vserver,
+ share_name)
+ vserver_client.resync_snapmirror(snapmirror['source-vserver'],
+ snapmirror['source-volume'],
+ vserver,
+ share_name)
+ return constants.REPLICA_STATE_OUT_OF_SYNC
+ except netapp_api.NaApiError:
+ LOG.exception(_LE("Could not resync snapmirror."))
+ return constants.STATUS_ERROR
+
+ last_update_timestamp = float(
+ snapmirror.get('last-transfer-end-timestamp', 0))
+ # TODO(ameade): Have a configurable RPO for replicas, for now it is
+ # one hour.
+ if (last_update_timestamp and
+ (timeutils.is_older_than(
+ timeutils.iso8601_from_timestamp(last_update_timestamp),
+ 3600))):
+ return constants.REPLICA_STATE_OUT_OF_SYNC
+
+ return constants.REPLICA_STATE_IN_SYNC
+
+ def promote_replica(self, context, replica_list, replica, access_rules,
+ share_server=None):
+ """Switch SnapMirror relationships and allow r/w ops on replica.
+
+ Creates a DataMotion session and switches the direction of the
+ SnapMirror relationship between the currently 'active' instance (
+ SnapMirror source volume) and the replica. Also attempts setting up
+ SnapMirror relationships between the other replicas and the new
+ SnapMirror source volume ('active' instance).
+ :param context: Request Context
+ :param replica_list: List of replicas, including the 'active' instance
+ :param replica: Replica to promote to SnapMirror source
+ :param access_rules: Access rules to apply to the replica
+ :param share_server: ShareServer class instance of replica
+ :return: Updated replica_list
+ """
+ orig_active_replica = self._find_active_replica(replica_list)
+
+ dm_session = data_motion.DataMotionSession()
+
+ new_replica_list = []
+
+ # Setup the new active replica
+ new_active_replica = (
+ self._convert_destination_replica_to_independent(
+ context, dm_session, orig_active_replica, replica,
+ access_rules, share_server=share_server))
+ new_replica_list.append(new_active_replica)
+
+ # Change the source replica for all destinations to the new
+ # active replica.
+ for r in replica_list:
+ if r['id'] != replica['id']:
+ r = self._safe_change_replica_source(dm_session, r,
+ orig_active_replica,
+ replica,
+ replica_list)
+ new_replica_list.append(r)
+
+ return new_replica_list
+
+ def _convert_destination_replica_to_independent(
+ self, context, dm_session, orig_active_replica, replica,
+ access_rules, share_server=None):
+ """Breaks SnapMirror and allows r/w ops on the destination replica.
+
+ For promotion, the existing SnapMirror relationship must be broken
+ and access rules have to be granted to the broken off replica to
+ use it as an independent share.
+ :param context: Request Context
+ :param dm_session: Data motion object for SnapMirror operations
+ :param orig_active_replica: Original SnapMirror source
+ :param replica: Replica to promote to SnapMirror source
+ :param access_rules: Access rules to apply to the replica
+ :param share_server: ShareServer class instance of replica
+ :return: Updated replica
+ """
+ vserver, vserver_client = self._get_vserver(share_server=share_server)
+ share_name = self._get_backend_share_name(replica['id'])
+
+ try:
+ # 1. Start an update to try to get a last minute transfer before we
+ # quiesce and break
+ dm_session.update_snapmirror(orig_active_replica, replica)
+ except netapp_api.NaApiError:
+ # Ignore any errors since the current source replica may be
+ # unreachable
+ pass
+ # 2. Break SnapMirror
+ dm_session.break_snapmirror(orig_active_replica, replica)
+
+ # 3. Setup access rules
+ new_active_replica = copy.deepcopy(replica)
+ helper = self._get_helper(replica)
+ helper.set_client(vserver_client)
+ try:
+ helper.update_access(replica, share_name, access_rules)
+ except Exception:
+ new_active_replica['access_rules_status'] = (
+ constants.STATUS_OUT_OF_SYNC)
+ else:
+ new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE
+
+ new_active_replica['export_locations'] = self._create_export(
+ new_active_replica, share_server, vserver, vserver_client)
+ new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE
+ return new_active_replica
+
+ def _safe_change_replica_source(self, dm_session, replica,
+ orig_source_replica,
+ new_source_replica, replica_list):
+ """Attempts to change the SnapMirror source to new source.
+
+ If the attempt fails, 'replica_state' is set to 'error'.
+ :param dm_session: Data motion object for SnapMirror operations
+ :param replica: Replica that requires a change of source
+ :param orig_source_replica: Original SnapMirror source volume
+ :param new_source_replica: New SnapMirror source volume
+ :return: Updated replica
+ """
+ try:
+ dm_session.change_snapmirror_source(replica,
+ orig_source_replica,
+ new_source_replica,
+ replica_list)
+ except Exception:
+ replica['replica_state'] = constants.STATUS_ERROR
+ replica['export_locations'] = []
+ msg = _LE("Failed to change replica (%s) to a SnapMirror "
+ "destination."), replica['id']
+ LOG.exception(msg)
+ return replica
+
+ replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
+ replica['export_locations'] = []
+
+ return replica
diff --git a/manila/share/drivers/netapp/options.py b/manila/share/drivers/netapp/options.py
index 4a77e71aa8..24b04a2d23 100644
--- a/manila/share/drivers/netapp/options.py
+++ b/manila/share/drivers/netapp/options.py
@@ -107,6 +107,14 @@ netapp_support_opts = [
'trace info is written to the debug logs. Values '
'include method and api.')), ]
+netapp_replication_opts = [
+ cfg.IntOpt('netapp_snapmirror_quiesce_timeout',
+ min=0,
+ default=3600, # One Hour
+ help='The maximum time in seconds to wait for existing '
+ 'snapmirror transfers to complete before aborting when '
+ 'promoting a replica.'), ]
+
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)
CONF.register_opts(netapp_connection_opts)
@@ -114,3 +122,4 @@ CONF.register_opts(netapp_transport_opts)
CONF.register_opts(netapp_basicauth_opts)
CONF.register_opts(netapp_provisioning_opts)
CONF.register_opts(netapp_support_opts)
+CONF.register_opts(netapp_replication_opts)
diff --git a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
index 9874fcfaec..8a19a317a5 100644
--- a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
+++ b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
@@ -23,6 +23,11 @@ CONNECTION_INFO = {
'password': 'passw0rd'
}
+CLUSTER_NAME = 'fake_cluster'
+REMOTE_CLUSTER_NAME = 'fake_cluster_2'
+CLUSTER_ADDRESS_1 = 'fake_cluster_address'
+CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
+VERSION = 'NetApp Release 8.2.1 Cluster-Mode: Fri Mar 21 14:25:07 PDT 2014'
NODE_NAME = 'fake_node'
VSERVER_NAME = 'fake_vserver'
VSERVER_NAME_2 = 'fake_vserver_2'
@@ -68,6 +73,11 @@ LIF_NAME = LIF_NAME_TEMPLATE % {'net_allocation_id': NET_ALLOCATION_ID}
IPSPACE_NAME = 'fake_ipspace'
BROADCAST_DOMAIN = 'fake_domain'
MTU = 9000
+SM_SOURCE_VSERVER = 'fake_source_vserver'
+SM_SOURCE_VOLUME = 'fake_source_volume'
+SM_DEST_VSERVER = 'fake_destination_vserver'
+SM_DEST_VOLUME = 'fake_destination_volume'
+
IPSPACES = [{
'uuid': 'fake_uuid',
@@ -203,6 +213,21 @@ ONTAPI_VERSION_RESPONSE = etree.XML("""
""")
+SYSTEM_GET_VERSION_RESPONSE = etree.XML("""
+
+ 1395426307
+ true
+ %(version)s
+
+
+ 8
+ 2
+ 1
+
+
+
+""" % {'version': VERSION})
+
LICENSE_V2_LIST_INFO_RESPONSE = etree.XML("""
@@ -1568,3 +1593,122 @@ SIS_GET_ITER_RESPONSE = etree.XML("""
'vserver': VSERVER_NAME,
'volume': SHARE_NAME,
})
+
+CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML("""
+
+
+
+
+ %(addr1)s
+ %(addr2)s
+
+ available
+ %(cluster)s
+ fake_uuid
+
+ %(addr1)s
+
+ %(remote_cluster)s
+ fake_serial_number
+ 60
+
+
+ 1
+
+""" % {
+ 'addr1': CLUSTER_ADDRESS_1,
+ 'addr2': CLUSTER_ADDRESS_2,
+ 'cluster': CLUSTER_NAME,
+ 'remote_cluster': REMOTE_CLUSTER_NAME,
+})
+
+CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML("""
+
+
+
+ false
+ 8
+
+
+
+""")
+
+VSERVER_PEER_GET_ITER_RESPONSE = etree.XML("""
+
+
+
+
+ snapmirror
+
+ %(cluster)s
+ peered
+ %(vserver2)s
+ %(vserver1)s
+
+
+ 2
+
+""" % {
+ 'cluster': CLUSTER_NAME,
+ 'vserver1': VSERVER_NAME,
+ 'vserver2': VSERVER_NAME_2
+})
+
+SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
+
+
+
+ fake_destination_volume
+ fake_destination_node
+ fake_destination_vserver
+ fake_snapshot
+ 1442701782
+ false
+ true
+ 2187
+ 109
+ 1442701890
+ test:manila
+ 1171456
+ initialize
+ 0
+ snapmirrored
+ fake_snapshot
+ 1442701782
+ DPDefault
+ v2
+ ea8bfcc6-5f1d-11e5-8446-123478563412
+ idle
+ data_protection
+ daily
+ fake_source_volume
+ fake_source_vserver
+ fake_destination_vserver
+
+
+ 1
+
+""")
+
+SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
+
+
+
+ fake_destination_vserver
+ fake_destination_volume
+ true
+ snapmirrored
+ daily
+ fake_source_vserver
+ fake_source_volume
+
+
+ 1
+
+""")
+
+SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
+
+ succeeded
+
+""")
diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py
index 3ed8fbc9e6..04a89276ef 100644
--- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py
+++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py
@@ -23,6 +23,7 @@ from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
+@ddt.ddt
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
@@ -57,12 +58,29 @@ class NetAppBaseClientTestCase(test.TestCase):
self.assertEqual(1, major)
self.assertEqual(20, minor)
+ def test_get_system_version(self):
+ version_response = netapp_api.NaElement(
+ fake.SYSTEM_GET_VERSION_RESPONSE)
+ self.connection.invoke_successfully.return_value = version_response
+
+ result = self.client.get_system_version()
+
+ self.assertEqual(fake.VERSION, result['version'])
+ self.assertEqual(('8', '2', '1'), result['version-tuple'])
+
def test_init_features(self):
self.client._init_features()
self.assertSetEqual(set(), self.client.features.defined_features)
+ @ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name')
+ def test_strip_xml_namespace(self, element):
+
+ result = self.client._strip_xml_namespace(element)
+
+ self.assertEqual('tag_name', result)
+
def test_send_request(self):
element = netapp_api.NaElement('fake-api')
diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
index 07f3ce0a72..bea3d369ab 100644
--- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
+++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
@@ -2039,6 +2039,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
'containing-aggr-name': fake.SHARE_AGGREGATE_NAME,
'size': '100g',
'volume': fake.SHARE_NAME,
+ 'volume-type': 'rw',
'junction-path': '/%s' % fake.SHARE_NAME,
}
@@ -2065,6 +2066,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
'junction-path': '/%s' % fake.SHARE_NAME,
'space-reserve': 'none',
'language-code': 'en-US',
+ 'volume-type': 'rw',
'snapshot-policy': 'default',
'percentage-snapshot-reserve': '15',
}
@@ -3901,3 +3903,756 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.assertRaises(netapp_api.NaApiError,
self.client.check_for_cluster_credentials)
+
+ def test_create_cluster_peer(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'],
+ 'fake_user', 'fake_password',
+ 'fake_passphrase')
+
+ cluster_peer_create_args = {
+ 'peer-addresses': [
+ {'remote-inet-address': 'fake_address_1'},
+ {'remote-inet-address': 'fake_address_2'},
+ ],
+ 'user-name': 'fake_user',
+ 'password': 'fake_password',
+ 'passphrase': 'fake_passphrase',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('cluster-peer-create', cluster_peer_create_args)])
+
+ def test_get_cluster_peers(self):
+
+ api_response = netapp_api.NaElement(
+ fake.CLUSTER_PEER_GET_ITER_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_cluster_peers()
+
+ cluster_peer_get_iter_args = {'max-records': 1000}
+ self.client.send_request.assert_has_calls([
+ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)])
+
+ expected = [{
+ 'active-addresses': [
+ fake.CLUSTER_ADDRESS_1,
+ fake.CLUSTER_ADDRESS_2
+ ],
+ 'availability': 'available',
+ 'cluster-name': fake.CLUSTER_NAME,
+ 'cluster-uuid': 'fake_uuid',
+ 'peer-addresses': [fake.CLUSTER_ADDRESS_1],
+ 'remote-cluster-name': fake.REMOTE_CLUSTER_NAME,
+ 'serial-number': 'fake_serial_number',
+ 'timeout': '60',
+ }]
+
+ self.assertEqual(expected, result)
+
+ def test_get_cluster_peers_single(self):
+
+ api_response = netapp_api.NaElement(
+ fake.CLUSTER_PEER_GET_ITER_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ self.client.get_cluster_peers(remote_cluster_name=fake.CLUSTER_NAME)
+
+ cluster_peer_get_iter_args = {
+ 'query': {
+ 'cluster-peer-info': {
+ 'remote-cluster-name': fake.CLUSTER_NAME,
+ }
+ },
+ 'max-records': 1000,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)])
+
+ def test_get_cluster_peers_not_found(self):
+
+ api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_cluster_peers(
+ remote_cluster_name=fake.CLUSTER_NAME)
+
+ self.assertEqual([], result)
+ self.assertTrue(self.client.send_request.called)
+
+ def test_delete_cluster_peer(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.delete_cluster_peer(fake.CLUSTER_NAME)
+
+ cluster_peer_delete_args = {'cluster-name': fake.CLUSTER_NAME}
+ self.client.send_request.assert_has_calls([
+ mock.call('cluster-peer-delete', cluster_peer_delete_args)])
+
+ def test_get_cluster_peer_policy(self):
+
+ self.client.features.add_feature('CLUSTER_PEER_POLICY')
+
+ api_response = netapp_api.NaElement(
+ fake.CLUSTER_PEER_POLICY_GET_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_cluster_peer_policy()
+
+ expected = {
+ 'is-unauthenticated-access-permitted': False,
+ 'passphrase-minimum-length': 8
+ }
+ self.assertEqual(expected, result)
+ self.assertTrue(self.client.send_request.called)
+
+ def test_get_cluster_peer_policy_not_supported(self):
+
+ result = self.client.get_cluster_peer_policy()
+
+ self.assertEqual({}, result)
+
+ def test_set_cluster_peer_policy_not_supported(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.set_cluster_peer_policy()
+
+ self.assertFalse(self.client.send_request.called)
+
+ def test_set_cluster_peer_policy_no_arguments(self):
+
+ self.client.features.add_feature('CLUSTER_PEER_POLICY')
+ self.mock_object(self.client, 'send_request')
+
+ self.client.set_cluster_peer_policy()
+
+ self.assertFalse(self.client.send_request.called)
+
+ def test_set_cluster_peer_policy(self):
+
+ self.client.features.add_feature('CLUSTER_PEER_POLICY')
+ self.mock_object(self.client, 'send_request')
+
+ self.client.set_cluster_peer_policy(
+ is_unauthenticated_access_permitted=True,
+ passphrase_minimum_length=12)
+
+ cluster_peer_policy_modify_args = {
+ 'is-unauthenticated-access-permitted': 'true',
+ 'passphrase-minlength': '12',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('cluster-peer-policy-modify',
+ cluster_peer_policy_modify_args)])
+
+ def test_create_vserver_peer(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer')
+
+ vserver_peer_create_args = {
+ 'vserver': 'fake_vserver',
+ 'peer-vserver': 'fake_vserver_peer',
+ 'applications': [
+ {'vserver-peer-application': 'snapmirror'},
+ ],
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('vserver-peer-create', vserver_peer_create_args)])
+
+ def test_delete_vserver_peer(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer')
+
+ vserver_peer_delete_args = {
+ 'vserver': 'fake_vserver',
+ 'peer-vserver': 'fake_vserver_peer',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('vserver-peer-delete', vserver_peer_delete_args)])
+
+ def test_accept_vserver_peer(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer')
+
+ vserver_peer_accept_args = {
+ 'vserver': 'fake_vserver',
+ 'peer-vserver': 'fake_vserver_peer',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('vserver-peer-accept', vserver_peer_accept_args)])
+
+ def test_get_vserver_peers(self):
+
+ api_response = netapp_api.NaElement(
+ fake.VSERVER_PEER_GET_ITER_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_vserver_peers(
+ vserver_name=fake.VSERVER_NAME,
+ peer_vserver_name=fake.VSERVER_NAME_2)
+
+ vserver_peer_get_iter_args = {
+ 'query': {
+ 'vserver-peer-info': {
+ 'vserver': fake.VSERVER_NAME,
+ 'peer-vserver': fake.VSERVER_NAME_2,
+ }
+ },
+ 'max-records': 1000,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)])
+
+ expected = [{
+ 'vserver': 'fake_vserver',
+ 'peer-vserver': 'fake_vserver_2',
+ 'peer-state': 'peered',
+ 'peer-cluster': 'fake_cluster'
+ }]
+ self.assertEqual(expected, result)
+
+ def test_get_vserver_peers_not_found(self):
+
+ api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_vserver_peers(
+ vserver_name=fake.VSERVER_NAME,
+ peer_vserver_name=fake.VSERVER_NAME_2)
+
+ self.assertEqual([], result)
+ self.assertTrue(self.client.send_request.called)
+
+ def test_ensure_snapmirror_v2(self):
+
+ self.assertIsNone(self.client._ensure_snapmirror_v2())
+
+ def test_ensure_snapmirror_v2_not_supported(self):
+
+ self.client.features.add_feature('SNAPMIRROR_V2', supported=False)
+
+ self.assertRaises(exception.NetAppException,
+ self.client._ensure_snapmirror_v2)
+
+ @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'},
+ {'schedule': None, 'policy': None})
+ @ddt.unpack
+ def test_create_snapmirror(self, schedule, policy):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ schedule=schedule, policy=policy)
+
+ snapmirror_create_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'relationship-type': 'data_protection',
+ }
+ if schedule:
+ snapmirror_create_args['schedule'] = schedule
+ if policy:
+ snapmirror_create_args['policy'] = policy
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-create', snapmirror_create_args)])
+
+ def test_create_snapmirror_already_exists(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.ERELATION_EXISTS))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.client.create_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_create_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'relationship-type': 'data_protection',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-create', snapmirror_create_args)])
+
+ def test_create_snapmirror_error(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=0))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror,
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+ self.assertTrue(self.client.send_request.called)
+
+ @ddt.data(
+ {
+ 'source_snapshot': 'fake_snapshot',
+ 'transfer_priority': 'fake_priority'
+ },
+ {
+ 'source_snapshot': None,
+ 'transfer_priority': None
+ }
+ )
+ @ddt.unpack
+ def test_initialize_snapmirror(self, source_snapshot, transfer_priority):
+
+ api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.initialize_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ source_snapshot=source_snapshot,
+ transfer_priority=transfer_priority)
+
+ snapmirror_initialize_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ if source_snapshot:
+ snapmirror_initialize_args['source-snapshot'] = source_snapshot
+ if transfer_priority:
+ snapmirror_initialize_args['transfer-priority'] = transfer_priority
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-initialize', snapmirror_initialize_args)])
+
+ expected = {
+ 'operation-id': None,
+ 'status': 'succeeded',
+ 'jobid': None,
+ 'error-code': None,
+ 'error-message': None
+ }
+ self.assertEqual(expected, result)
+
+ @ddt.data(True, False)
+ def test_release_snapmirror(self, relationship_info_only):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.release_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ relationship_info_only=relationship_info_only)
+
+ snapmirror_release_args = {
+ 'query': {
+ 'snapmirror-destination-info': {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'relationship-info-only': ('true' if relationship_info_only
+ else 'false'),
+ }
+ }
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-release-iter', snapmirror_release_args)])
+
+ def test_quiesce_snapmirror(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.quiesce_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_quiesce_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)])
+
+ @ddt.data(True, False)
+ def test_abort_snapmirror(self, clear_checkpoint):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.abort_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ clear_checkpoint=clear_checkpoint)
+
+ snapmirror_abort_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'clear-checkpoint': 'true' if clear_checkpoint else 'false',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-abort', snapmirror_abort_args)])
+
+ def test_abort_snapmirror_no_transfer_in_progress(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.ENOTRANSFER_IN_PROGRESS))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.client.abort_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_abort_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'clear-checkpoint': 'false',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-abort', snapmirror_abort_args)])
+
+ def test_abort_snapmirror_error(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror,
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ def test_break_snapmirror(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.break_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_break_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-break', snapmirror_break_args)])
+
+ @ddt.data(
+ {
+ 'schedule': 'fake_schedule',
+ 'policy': 'fake_policy',
+ 'tries': 5,
+ 'max_transfer_rate': 1024,
+ },
+ {
+ 'schedule': None,
+ 'policy': None,
+ 'tries': None,
+ 'max_transfer_rate': None,
+ }
+ )
+ @ddt.unpack
+ def test_modify_snapmirror(self, schedule, policy, tries,
+ max_transfer_rate):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.modify_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ schedule=schedule, policy=policy, tries=tries,
+ max_transfer_rate=max_transfer_rate)
+
+ snapmirror_modify_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ if schedule:
+ snapmirror_modify_args['schedule'] = schedule
+ if policy:
+ snapmirror_modify_args['policy'] = policy
+ if tries:
+ snapmirror_modify_args['tries'] = tries
+ if max_transfer_rate:
+ snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-modify', snapmirror_modify_args)])
+
+ def test_update_snapmirror(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.update_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_update_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-update', snapmirror_update_args)])
+
+ def test_update_snapmirror_already_transferring(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.ETRANSFER_IN_PROGRESS))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.client.update_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_update_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-update', snapmirror_update_args)])
+
+ def test_update_snapmirror_already_transferring_two(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.EANOTHER_OP_ACTIVE))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.client.update_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_update_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-update', snapmirror_update_args)])
+
+ def test_update_snapmirror_error(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror,
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ def test_delete_snapmirror(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.delete_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_delete_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ }
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)])
+
+ def test__get_snapmirrors(self):
+
+ api_response = netapp_api.NaElement(fake.SNAPMIRROR_GET_ITER_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ desired_attributes = {
+ 'snapmirror-info': {
+ 'source-vserver': None,
+ 'source-volume': None,
+ 'destination-vserver': None,
+ 'destination-volume': None,
+ 'is-healthy': None,
+ }
+ }
+
+ result = self.client._get_snapmirrors(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ desired_attributes=desired_attributes)
+
+ snapmirror_get_iter_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ },
+ },
+ 'desired-attributes': {
+ 'snapmirror-info': {
+ 'source-vserver': None,
+ 'source-volume': None,
+ 'destination-vserver': None,
+ 'destination-volume': None,
+ 'is-healthy': None,
+ },
+ },
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
+ self.assertEqual(1, len(result))
+
+ def test__get_snapmirrors_not_found(self):
+
+ api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client._get_snapmirrors()
+
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-get-iter', {})])
+
+ self.assertEqual([], result)
+
+ def test_get_snapmirrors(self):
+
+ api_response = netapp_api.NaElement(
+ fake.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ desired_attributes = ['source-vserver', 'source-volume',
+ 'destination-vserver', 'destination-volume',
+ 'is-healthy', 'mirror-state', 'schedule']
+
+ result = self.client.get_snapmirrors(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ desired_attributes=desired_attributes)
+
+ snapmirror_get_iter_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ },
+ },
+ 'desired-attributes': {
+ 'snapmirror-info': {
+ 'source-vserver': None,
+ 'source-volume': None,
+ 'destination-vserver': None,
+ 'destination-volume': None,
+ 'is-healthy': None,
+ 'mirror-state': None,
+ 'schedule': None,
+ },
+ },
+ }
+
+ expected = [{
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ 'is-healthy': 'true',
+ 'mirror-state': 'snapmirrored',
+ 'schedule': 'daily',
+ }]
+
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
+ self.assertEqual(expected, result)
+
+ def test_resume_snapmirror(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.resume_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_resume_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-resume', snapmirror_resume_args)])
+
+ def test_resume_snapmirror_not_quiesed(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.ERELATION_NOT_QUIESCED))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.client.resume_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_resume_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-resume', snapmirror_resume_args)])
+
+ def test_resume_snapmirror_error(self):
+ mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
+ self.mock_object(self.client, 'send_request', mock_send_req)
+
+ self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror,
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ def test_resync_snapmirror(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.resync_snapmirror(
+ fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
+ fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
+
+ snapmirror_resync_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'source-volume': fake.SM_SOURCE_VOLUME,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'destination-volume': fake.SM_DEST_VOLUME,
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-resync', snapmirror_resync_args)])
diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
new file mode 100644
index 0000000000..566e6ad66c
--- /dev/null
+++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
@@ -0,0 +1,497 @@
+# Copyright (c) 2015 Alex Meade. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import time
+
+import mock
+from oslo_config import cfg
+
+from manila.share import configuration
+from manila.share import driver
+from manila.share.drivers.netapp.dataontap.client import api as netapp_api
+from manila.share.drivers.netapp.dataontap.client import client_cmode
+from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
+from manila.share.drivers.netapp import options as na_opts
+from manila import test
+from manila.tests.share.drivers.netapp.dataontap import fakes as fake
+from manila.tests.share.drivers.netapp import fakes as na_fakes
+
+
+CONF = cfg.CONF
+
+
+class NetAppCDOTDataMotionTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NetAppCDOTDataMotionTestCase, self).setUp()
+ self.backend = 'backend1'
+ self.mock_cmode_client = self.mock_object(client_cmode,
+ "NetAppCmodeClient",
+ mock.Mock())
+ self.config = configuration.Configuration(driver.share_opts,
+ config_group=self.backend)
+ self.config.append_config_values(na_opts.netapp_cluster_opts)
+ self.config.append_config_values(na_opts.netapp_connection_opts)
+ self.config.append_config_values(na_opts.netapp_basicauth_opts)
+ self.config.append_config_values(na_opts.netapp_transport_opts)
+ self.config.append_config_values(na_opts.netapp_support_opts)
+ self.config.append_config_values(na_opts.netapp_provisioning_opts)
+ self.config.append_config_values(na_opts.netapp_replication_opts)
+ CONF.set_override("share_backend_name", self.backend,
+ group=self.backend)
+ CONF.set_override("netapp_transport_type", "https",
+ group=self.backend)
+ CONF.set_override("netapp_login", "fake_user",
+ group=self.backend)
+ CONF.set_override("netapp_password", "fake_password",
+ group=self.backend)
+ CONF.set_override("netapp_server_hostname", "fake_hostname",
+ group=self.backend)
+ CONF.set_override("netapp_server_port", 8866,
+ group=self.backend)
+
+ def test_get_client_for_backend(self):
+ self.mock_object(data_motion, "get_backend_configuration",
+ mock.Mock(return_value=self.config))
+
+ data_motion.get_client_for_backend(self.backend)
+
+ self.mock_cmode_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver=None)
+
+ def test_get_client_for_backend_with_vserver(self):
+ self.mock_object(data_motion, "get_backend_configuration",
+ mock.Mock(return_value=self.config))
+
+ CONF.set_override("netapp_vserver", 'fake_vserver',
+ group=self.backend)
+
+ data_motion.get_client_for_backend(self.backend)
+
+ self.mock_cmode_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver='fake_vserver')
+
+ def test_get_config_for_backend(self):
+ self.mock_object(data_motion, "CONF")
+ data_motion.CONF.list_all_sections.return_value = [self.backend]
+
+ config = data_motion.get_backend_configuration(self.backend)
+
+ self.assertEqual(self.backend, config.share_backend_name)
+
+ def test_get_config_for_backend_share_backend_name_mismatch(self):
+ self.mock_object(data_motion, "CONF")
+ configuration.Configuration(driver.share_opts,
+ config_group='my_happy_stanza')
+ self.config.append_config_values(na_opts.netapp_cluster_opts)
+ self.config.append_config_values(na_opts.netapp_connection_opts)
+ self.config.append_config_values(na_opts.netapp_basicauth_opts)
+ self.config.append_config_values(na_opts.netapp_transport_opts)
+ self.config.append_config_values(na_opts.netapp_support_opts)
+ self.config.append_config_values(na_opts.netapp_provisioning_opts)
+ self.config.append_config_values(na_opts.netapp_replication_opts)
+ CONF.set_override("share_backend_name", self.backend,
+ group='my_happy_stanza')
+ data_motion.CONF.list_all_sections.return_value = ['my_happy_stanza']
+
+ config = data_motion.get_backend_configuration(self.backend)
+
+ self.assertEqual(self.backend, config.share_backend_name)
+
+ def test_get_config_for_backend_not_configured(self):
+ self.mock_object(data_motion, "CONF")
+ data_motion.CONF.list_all_sections.return_value = []
+
+ config = data_motion.get_backend_configuration(self.backend)
+
+ self.assertIsNone(config)
+
+
+class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NetAppCDOTDataMotionSessionTestCase, self).setUp()
+ self.source_backend = 'backend1'
+ self.dest_backend = 'backend2'
+
+ config = configuration.Configuration(driver.share_opts,
+ config_group=self.source_backend)
+ config.append_config_values(na_opts.netapp_cluster_opts)
+ config.append_config_values(na_opts.netapp_connection_opts)
+ config.append_config_values(na_opts.netapp_basicauth_opts)
+ config.append_config_values(na_opts.netapp_transport_opts)
+ config.append_config_values(na_opts.netapp_support_opts)
+ config.append_config_values(na_opts.netapp_provisioning_opts)
+ config.append_config_values(na_opts.netapp_replication_opts)
+
+ self.mock_object(data_motion, "get_backend_configuration",
+ mock.Mock(return_value=config))
+
+ self.mock_cmode_client = self.mock_object(client_cmode,
+ "NetAppCmodeClient",
+ mock.Mock())
+ self.dm_session = data_motion.DataMotionSession()
+ self.fake_src_share = copy.deepcopy(fake.SHARE)
+ self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER)
+ self.source_vserver = 'source_vserver'
+ self.fake_src_share_server['backend_details']['vserver_name'] = (
+ self.source_vserver
+ )
+ self.fake_src_share['share_server'] = self.fake_src_share_server
+ self.fake_src_share['id'] = 'c02d497a-236c-4852-812a-0d39373e312a'
+ self.fake_src_vol_name = 'share_c02d497a_236c_4852_812a_0d39373e312a'
+ self.fake_dest_share = copy.deepcopy(fake.SHARE)
+ self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER)
+ self.dest_vserver = 'dest_vserver'
+ self.fake_dest_share_server['backend_details']['vserver_name'] = (
+ self.dest_vserver
+ )
+ self.fake_dest_share['share_server'] = self.fake_dest_share_server
+ self.fake_dest_share['id'] = '34fbaf57-745d-460f-8270-3378c2945e30'
+ self.fake_dest_vol_name = 'share_34fbaf57_745d_460f_8270_3378c2945e30'
+
+ self.mock_src_client = mock.Mock()
+ self.mock_dest_client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[self.mock_dest_client,
+ self.mock_src_client]))
+
+ def test_create_snapmirror(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(return_value=mock_dest_client))
+
+ self.dm_session.create_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.create_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, schedule='hourly'
+ )
+ mock_dest_client.initialize_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_delete_snapmirror(self):
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ mock_src_client]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+ mock_src_client.release_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_delete_snapmirror_does_not_exist(self):
+ """Ensure delete succeeds when the snapmirror does not exist."""
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError(
+ code=netapp_api.EAPIERROR
+ )
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ mock_src_client]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+ mock_src_client.release_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_delete_snapmirror_error_deleting(self):
+ """Ensure delete succeeds when the snapmirror does not exist."""
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError(
+ code=netapp_api.ESOURCE_IS_DIFFERENT
+ )
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ mock_src_client]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+ mock_src_client.release_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_delete_snapmirror_error_releasing(self):
+ """Ensure delete succeeds when the snapmirror does not exist."""
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ mock_src_client.release_snapmirror.side_effect = (
+ netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND))
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ mock_src_client]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+ mock_src_client.release_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_delete_snapmirror_without_release(self):
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ mock_src_client]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share,
+ release=False)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+ self.assertFalse(mock_src_client.release_snapmirror.called)
+
+ def test_delete_snapmirror_source_unreachable(self):
+ mock_src_client = mock.Mock()
+ mock_dest_client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[mock_dest_client,
+ Exception]))
+
+ self.dm_session.delete_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name, clear_checkpoint=False
+ )
+ mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock.ANY, self.fake_src_vol_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ self.assertFalse(mock_src_client.release_snapmirror.called)
+
+ def test_break_snapmirror(self):
+ self.mock_object(self.dm_session, 'quiesce_then_abort')
+
+ self.dm_session.break_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.break_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ self.dm_session.quiesce_then_abort.assert_called_once_with(
+ self.fake_src_share, self.fake_dest_share)
+
+ self.mock_dest_client.mount_volume.assert_called_once_with(
+ self.fake_dest_vol_name)
+
+ def test_break_snapmirror_wait_for_quiesced(self):
+ self.mock_object(self.dm_session, 'quiesce_then_abort')
+
+ self.dm_session.break_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.dm_session.quiesce_then_abort.assert_called_once_with(
+ self.fake_src_share, self.fake_dest_share)
+
+ self.mock_dest_client.break_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ self.mock_dest_client.mount_volume.assert_called_once_with(
+ self.fake_dest_vol_name)
+
+ def test_quiesce_then_abort_timeout(self):
+ self.mock_object(time, 'sleep')
+ mock_get_snapmirrors = mock.Mock(
+ return_value=[{'relationship-status': "transferring"}])
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors',
+ mock_get_snapmirrors)
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_snapmirror_quiesce_timeout = 10
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+
+ self.dm_session.quiesce_then_abort(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.get_snapmirrors.assert_called_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name,
+ desired_attributes=['relationship-status', 'mirror-state']
+ )
+ self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
+
+ self.mock_dest_client.quiesce_snapmirror.assert_called_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ self.mock_dest_client.abort_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name,
+ clear_checkpoint=False
+ )
+
+ def test_quiesce_then_abort_wait_for_quiesced(self):
+ self.mock_object(time, 'sleep')
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors',
+ mock.Mock(side_effect=[
+ [{'relationship-status': "transferring"}],
+ [{'relationship-status': "quiesced"}]]))
+
+ self.dm_session.quiesce_then_abort(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.get_snapmirrors.assert_called_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name,
+ desired_attributes=['relationship-status', 'mirror-state']
+ )
+ self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
+
+ self.mock_dest_client.quiesce_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ def test_resync_snapmirror(self):
+ self.dm_session.resync_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.resync_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ def test_change_snapmirror_source(self):
+ fake_new_src_share = copy.deepcopy(fake.SHARE)
+ fake_new_src_share['id'] = 'd02d497a-236c-4852-812a-0d39373e312a'
+ fake_new_src_share_name = 'share_d02d497a_236c_4852_812a_0d39373e312a'
+ mock_new_src_client = mock.Mock()
+ self.mock_object(self.dm_session, 'delete_snapmirror')
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(side_effect=[self.mock_dest_client,
+ self.mock_src_client,
+ self.mock_dest_client,
+ mock_new_src_client]))
+
+ self.dm_session.change_snapmirror_source(
+ self.fake_dest_share, self.fake_src_share, fake_new_src_share,
+ [self.fake_dest_share, self.fake_src_share, fake_new_src_share])
+
+ self.assertFalse(self.mock_src_client.release_snapmirror.called)
+
+ self.assertEqual(4, self.dm_session.delete_snapmirror.call_count)
+ self.dm_session.delete_snapmirror.assert_called_with(
+ mock.ANY, mock.ANY, release=False
+ )
+
+ self.mock_dest_client.create_snapmirror.assert_called_once_with(
+ mock.ANY, fake_new_src_share_name, mock.ANY,
+ self.fake_dest_vol_name, schedule='hourly'
+ )
+
+ self.mock_dest_client.resync_snapmirror.assert_called_once_with(
+ mock.ANY, fake_new_src_share_name, mock.ANY,
+ self.fake_dest_vol_name
+ )
+
+ def test_get_snapmirrors(self):
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors')
+
+ self.dm_session.get_snapmirrors(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.get_snapmirrors.assert_called_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name,
+ desired_attributes=['relationship-status',
+ 'mirror-state',
+ 'source-vserver',
+ 'source-volume',
+ 'last-transfer-end-timestamp']
+ )
+ self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count)
+
+ def test_update_snapmirror(self):
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors')
+
+ self.dm_session.update_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.update_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
+
+ def test_resume_snapmirror(self):
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors')
+
+ self.dm_session.resume_snapmirror(self.fake_src_share,
+ self.fake_dest_share)
+
+ self.mock_dest_client.resume_snapmirror.assert_called_once_with(
+ self.source_vserver, self.fake_src_vol_name,
+ self.dest_vserver, self.fake_dest_vol_name)
diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
index 9928af3046..ddbf32b7b4 100644
--- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
+++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
@@ -25,10 +25,14 @@ import ddt
import mock
from oslo_log import log
from oslo_service import loopingcall
+from oslo_utils import timeutils
from oslo_utils import units
+from manila.common import constants
from manila import exception
+from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
+from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
@@ -73,6 +77,15 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._client = mock.Mock()
self.client = self.library._client
self.context = mock.Mock()
+ self.fake_replica = copy.deepcopy(fake.SHARE)
+ self.fake_replica_2 = copy.deepcopy(fake.SHARE)
+ self.fake_replica_2['id'] = fake.SHARE_ID2
+ self.fake_replica_2['replica_state'] = (
+ constants.REPLICA_STATE_OUT_OF_SYNC)
+ self.mock_dm_session = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=self.mock_dm_session))
+ self.mock_object(data_motion, 'get_client_for_backend')
def test_init(self):
self.assertEqual(fake.DRIVER_NAME, self.library.driver_name)
@@ -234,21 +247,28 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertTrue(mock_ems_periodic_task.start.called)
self.assertTrue(mock_housekeeping_periodic_task.start.called)
- def test_get_valid_share_name(self):
+ def test_get_backend_share_name(self):
- result = self.library._get_valid_share_name(fake.SHARE_ID)
+ result = self.library._get_backend_share_name(fake.SHARE_ID)
expected = (fake.VOLUME_NAME_TEMPLATE %
{'share_id': fake.SHARE_ID.replace('-', '_')})
self.assertEqual(expected, result)
- def test_get_valid_snapshot_name(self):
+ def test_get_backend_snapshot_name(self):
- result = self.library._get_valid_snapshot_name(fake.SNAPSHOT_ID)
+ result = self.library._get_backend_snapshot_name(fake.SNAPSHOT_ID)
expected = 'share_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_')
self.assertEqual(expected, result)
+ def test_get_backend_cg_snapshot_name(self):
+
+ result = self.library._get_backend_cg_snapshot_name(fake.SNAPSHOT_ID)
+ expected = 'share_cg_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_')
+
+ self.assertEqual(expected, result)
+
def test_get_aggregate_space_cluster_creds(self):
self.library._have_cluster_creds = True
@@ -326,6 +346,31 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
}
self.assertDictEqual(expected, result)
+ def test_get_share_stats_with_replication(self):
+
+ self.library.configuration.replication_domain = "fake_domain"
+ self.mock_object(self.library,
+ '_get_pools',
+ mock.Mock(return_value=fake.POOLS))
+
+ result = self.library.get_share_stats()
+
+ expected = {
+ 'share_backend_name': fake.BACKEND_NAME,
+ 'driver_name': fake.DRIVER_NAME,
+ 'vendor_name': 'NetApp',
+ 'driver_version': '1.0',
+ 'netapp_storage_family': 'ontap_cluster',
+ 'storage_protocol': 'NFS_CIFS',
+ 'total_capacity_gb': 0.0,
+ 'free_capacity_gb': 0.0,
+ 'consistency_group_support': 'host',
+ 'replication_type': 'dr',
+ 'replication_domain': 'fake_domain',
+ 'pools': fake.POOLS,
+ }
+ self.assertDictEqual(expected, result)
+
def test_get_share_server_pools(self):
self.mock_object(self.library,
@@ -550,7 +595,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual('fake_export_location', result)
def test_allocate_container(self):
- self.mock_object(self.library, '_get_valid_share_name', mock.Mock(
+ self.mock_object(self.library, '_get_backend_share_name', mock.Mock(
return_value=fake.SHARE_NAME))
self.mock_object(share_utils, 'extract_host', mock.Mock(
return_value=fake.POOL_NAME))
@@ -583,8 +628,31 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertDictEqual(fake.REMAPPED_OVERLAPPING_EXTRA_SPEC, result)
+ def test_allocate_container_as_replica(self):
+ self.mock_object(self.library, '_get_backend_share_name', mock.Mock(
+ return_value=fake.SHARE_NAME))
+ self.mock_object(share_utils, 'extract_host', mock.Mock(
+ return_value=fake.POOL_NAME))
+ self.mock_object(share_types, 'get_extra_specs_from_share',
+ mock.Mock(return_value=fake.EXTRA_SPEC))
+
+ self.mock_object(self.library, '_check_boolean_extra_specs_validity')
+ self.mock_object(self.library, '_get_boolean_provisioning_options',
+ mock.Mock(return_value=fake.PROVISIONING_OPTIONS))
+ vserver_client = mock.Mock()
+
+ self.library._allocate_container(fake.EXTRA_SPEC_SHARE,
+ vserver_client, replica=True)
+
+ vserver_client.create_volume.assert_called_once_with(
+ fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'],
+ thin_provisioned=True, snapshot_policy='default',
+ language='en-US', dedup_enabled=True,
+ compression_enabled=False, max_files=5000,
+ snapshot_reserve=8, volume_type='dp')
+
def test_allocate_container_no_pool_name(self):
- self.mock_object(self.library, '_get_valid_share_name', mock.Mock(
+ self.mock_object(self.library, '_get_backend_share_name', mock.Mock(
return_value=fake.SHARE_NAME))
self.mock_object(share_utils, 'extract_host', mock.Mock(
return_value=None))
@@ -596,7 +664,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._allocate_container, fake.SHARE,
vserver_client)
- self.library._get_valid_share_name.assert_called_once_with(
+ self.library._get_backend_share_name.assert_called_once_with(
fake.SHARE['id'])
share_utils.extract_host.assert_called_once_with(fake.SHARE['host'],
level='pool')
@@ -788,10 +856,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT,
vserver_client)
- share_name = self.library._get_valid_share_name(fake.SHARE['id'])
- parent_share_name = self.library._get_valid_share_name(
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
+ parent_share_name = self.library._get_backend_share_name(
fake.SNAPSHOT['share_id'])
- parent_snapshot_name = self.library._get_valid_snapshot_name(
+ parent_snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
vserver_client.create_volume_clone.assert_called_once_with(
share_name,
@@ -835,7 +903,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE,
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(fake.SHARE['id'])
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
mock_share_exists.assert_called_once_with(share_name, vserver_client)
mock_remove_export.assert_called_once_with(fake.SHARE, vserver_client)
mock_deallocate_container.assert_called_once_with(share_name,
@@ -884,7 +952,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE,
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(fake.SHARE['id'])
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
mock_share_exists.assert_called_once_with(share_name, vserver_client)
self.assertFalse(mock_remove_export.called)
self.assertFalse(mock_deallocate_container.called)
@@ -1049,9 +1117,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(
+ share_name = self.library._get_backend_share_name(
fake.SNAPSHOT['share_id'])
- snapshot_name = self.library._get_valid_snapshot_name(
+ snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
vserver_client.create_snapshot.assert_called_once_with(share_name,
snapshot_name)
@@ -1070,9 +1138,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(
+ share_name = self.library._get_backend_share_name(
fake.SNAPSHOT['share_id'])
- snapshot_name = self.library._get_valid_snapshot_name(
+ snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
self.assertTrue(mock_handle_busy_snapshot.called)
vserver_client.delete_snapshot.assert_called_once_with(share_name,
@@ -1612,12 +1680,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_server=fake.SHARE_SERVER)
share_names = [
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_1['share_id']),
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_2['share_id'])
]
- snapshot_name = self.library._get_valid_cg_snapshot_name(
+ snapshot_name = self.library._get_backend_cg_snapshot_name(
fake.CG_SNAPSHOT['id'])
vserver_client.create_cg_snapshot.assert_called_once_with(
share_names, snapshot_name)
@@ -1660,12 +1728,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_server=fake.SHARE_SERVER)
share_names = [
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_1['share_id']),
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_2['share_id'])
]
- snapshot_name = self.library._get_valid_cg_snapshot_name(
+ snapshot_name = self.library._get_backend_cg_snapshot_name(
fake.CG_SNAPSHOT['id'])
mock_handle_busy_snapshot.assert_has_calls([
@@ -1720,12 +1788,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_server=fake.SHARE_SERVER)
share_names = [
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_1['share_id']),
- self.library._get_valid_share_name(
+ self.library._get_backend_share_name(
fake.CG_SNAPSHOT_MEMBER_2['share_id'])
]
- snapshot_name = self.library._get_valid_cg_snapshot_name(
+ snapshot_name = self.library._get_backend_cg_snapshot_name(
fake.CG_SNAPSHOT['id'])
mock_handle_busy_snapshot.assert_has_calls([
@@ -1809,7 +1877,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(fake.SHARE['id'])
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
mock_share_exists.assert_called_once_with(share_name, vserver_client)
protocol_helper.set_client.assert_called_once_with(vserver_client)
protocol_helper.update_access.assert_called_once_with(
@@ -1863,11 +1931,48 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)
- share_name = self.library._get_valid_share_name(fake.SHARE['id'])
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
mock_share_exists.assert_called_once_with(share_name, vserver_client)
self.assertFalse(protocol_helper.set_client.called)
self.assertFalse(protocol_helper.update_access.called)
+ def test_update_access_to_active_replica(self):
+ fake_share = copy.deepcopy(fake.SHARE)
+ fake_share['replica_state'] = constants.REPLICA_STATE_ACTIVE
+ vserver_client = mock.Mock()
+ mock_get_vserver = self.mock_object(
+ self.library, '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
+ protocol_helper = mock.Mock()
+ protocol_helper.update_access.return_value = None
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=protocol_helper))
+ mock_share_exists = self.mock_object(self.library,
+ '_share_exists',
+ mock.Mock(return_value=True))
+
+ self.library.update_access(self.context,
+ fake_share,
+ [fake.SHARE_ACCESS],
+ share_server=fake.SHARE_SERVER)
+
+ mock_get_vserver.assert_called_once_with(
+ share_server=fake.SHARE_SERVER)
+ share_name = self.library._get_backend_share_name(fake.SHARE['id'])
+ mock_share_exists.assert_called_once_with(share_name, vserver_client)
+ protocol_helper.set_client.assert_called_once_with(vserver_client)
+ protocol_helper.update_access.assert_called_once_with(
+ fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS])
+
+ def test_update_access_to_in_sync_replica(self):
+ fake_share = copy.deepcopy(fake.SHARE)
+ fake_share['replica_state'] = constants.REPLICA_STATE_IN_SYNC
+ self.library.update_access(self.context,
+ fake_share,
+ [fake.SHARE_ACCESS],
+ share_server=fake.SHARE_SERVER)
+
def test_setup_server(self):
self.assertRaises(NotImplementedError,
self.library.setup_server,
@@ -1955,3 +2060,583 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertDictEqual({}, ssc_stats)
self.assertFalse(self.library._client.get_aggregate_raid_types.called)
+
+ def test_create_replica(self):
+ self.mock_object(self.library,
+ '_allocate_container')
+ mock_dm_session = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=mock_dm_session))
+ self.mock_object(data_motion, 'get_client_for_backend')
+ self.mock_object(mock_dm_session, 'get_vserver_from_share',
+ mock.Mock(return_value=fake.VSERVER1))
+ expected_model_update = {
+ 'export_locations': [],
+ 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
+ 'access_rules_status': constants.STATUS_ACTIVE,
+ }
+
+ model_update = self.library.create_replica(
+ None, [fake.SHARE], fake.SHARE, share_server=None)
+
+ self.assertDictMatch(expected_model_update, model_update)
+ mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE,
+ fake.SHARE)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
+
+ def test_create_replica_with_share_server(self):
+ self.mock_object(self.library,
+ '_allocate_container',
+ mock.Mock())
+ mock_dm_session = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=mock_dm_session))
+ self.mock_object(data_motion, 'get_client_for_backend')
+ self.mock_object(mock_dm_session, 'get_vserver_from_share',
+ mock.Mock(return_value=fake.VSERVER1))
+
+ expected_model_update = {
+ 'export_locations': [],
+ 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
+ 'access_rules_status': constants.STATUS_ACTIVE,
+ }
+
+ model_update = self.library.create_replica(
+ None, [fake.SHARE], fake.SHARE, share_server=fake.SHARE_SERVER)
+
+ self.assertDictMatch(expected_model_update, model_update)
+ mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE,
+ fake.SHARE)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
+
+ def test_delete_replica(self):
+ self.mock_object(self.library,
+ '_deallocate_container',
+ mock.Mock())
+ mock_dm_session = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=mock_dm_session))
+ self.mock_object(data_motion, 'get_client_for_backend')
+ self.mock_object(mock_dm_session, 'get_vserver_from_share',
+ mock.Mock(return_value=fake.VSERVER1))
+
+ result = self.library.delete_replica(None,
+ [fake.SHARE],
+ fake.SHARE,
+ share_server=None)
+ self.assertEqual(None, result)
+ mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE,
+ fake.SHARE)
+ self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count)
+ data_motion.get_client_for_backend.assert_called_with(
+ fake.BACKEND_NAME, vserver_name=mock.ANY)
+ self.assertEqual(1, data_motion.get_client_for_backend.call_count)
+
+ def test_delete_replica_with_share_server(self):
+ self.mock_object(self.library,
+ '_deallocate_container',
+ mock.Mock())
+ mock_dm_session = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=mock_dm_session))
+ self.mock_object(data_motion, 'get_client_for_backend')
+ self.mock_object(mock_dm_session, 'get_vserver_from_share',
+ mock.Mock(return_value=fake.VSERVER1))
+
+ result = self.library.delete_replica(None,
+ [fake.SHARE],
+ fake.SHARE,
+ share_server=fake.SHARE_SERVER)
+ self.assertEqual(None, result)
+ mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE,
+ fake.SHARE)
+ self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
+
+ def test_update_replica_state_no_snapmirror_share_creating(self):
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
+
+ replica = copy.deepcopy(fake.SHARE)
+ replica['status'] = constants.STATUS_CREATING
+
+ result = self.library.update_replica_state(
+ None, [replica], replica, None, share_server=None)
+
+ self.assertFalse(self.mock_dm_session.create_snapmirror.called)
+ self.assertEqual(constants.STATUS_OUT_OF_SYNC, result)
+
+ def test_update_replica_state_no_snapmirror_create_failed(self):
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
+ self.mock_dm_session.create_snapmirror.side_effect = (
+ netapp_api.NaApiError(code=0))
+
+ replica = copy.deepcopy(fake.SHARE)
+ replica['status'] = constants.REPLICA_STATE_OUT_OF_SYNC
+
+ result = self.library.update_replica_state(
+ None, [replica], replica, None, share_server=None)
+
+ self.assertTrue(self.mock_dm_session.create_snapmirror.called)
+ self.assertEqual(constants.STATUS_ERROR, result)
+
+ @ddt.data(constants.STATUS_ERROR, constants.STATUS_AVAILABLE)
+ def test_update_replica_state_no_snapmirror(self, status):
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
+
+ replica = copy.deepcopy(fake.SHARE)
+ replica['status'] = status
+
+ result = self.library.update_replica_state(
+ None, [replica], replica, None, share_server=None)
+
+ self.assertEqual(1, self.mock_dm_session.create_snapmirror.call_count)
+ self.assertEqual(constants.STATUS_OUT_OF_SYNC, result)
+
+ def test_update_replica_state_broken_snapmirror(self):
+ fake_snapmirror = {
+ 'mirror-state': 'broken-off',
+ 'relationship-status': 'idle',
+ 'source-vserver': fake.VSERVER2,
+ 'source-volume': 'fake_volume',
+ 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000)
+ }
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(
+ return_value=[fake_snapmirror])
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+
+ vserver_client.resync_snapmirror.assert_called_once_with(
+ fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name']
+ )
+
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result)
+
+ def test_update_replica_state_snapmirror_still_initializing(self):
+ fake_snapmirror = {
+ 'mirror-state': 'uninitialized',
+ 'relationship-status': 'transferring',
+ 'source-vserver': fake.VSERVER2,
+ 'source-volume': 'fake_volume',
+ 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000)
+ }
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(
+ return_value=[fake_snapmirror])
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result)
+
+ def test_update_replica_state_fail_to_get_snapmirrors(self):
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors.side_effect = (
+ netapp_api.NaApiError(code=0))
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+ self.assertTrue(self.mock_dm_session.get_snapmirrors.called)
+ self.assertEqual(constants.STATUS_ERROR, result)
+
+ def test_update_replica_state_broken_snapmirror_resync_error(self):
+ fake_snapmirror = {
+ 'mirror-state': 'broken-off',
+ 'relationship-status': 'idle',
+ 'source-vserver': fake.VSERVER2,
+ 'source-volume': 'fake_volume',
+ 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000)
+ }
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(
+ return_value=[fake_snapmirror])
+ vserver_client.resync_snapmirror.side_effect = netapp_api.NaApiError
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+
+ vserver_client.resync_snapmirror.assert_called_once_with(
+ fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name']
+ )
+
+ self.assertEqual(constants.STATUS_ERROR, result)
+
+ def test_update_replica_state_stale_snapmirror(self):
+ fake_snapmirror = {
+ 'mirror-state': 'snapmirrored',
+ 'last-transfer-end-timestamp': '%s' % float(
+ timeutils.utcnow_ts() - 10000)
+ }
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(
+ return_value=[fake_snapmirror])
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result)
+
+ def test_update_replica_state_in_sync(self):
+ fake_snapmirror = {
+ 'mirror-state': 'snapmirrored',
+ 'relationship-status': 'idle',
+ 'last-transfer-end-timestamp': '%s' % float(time.time())
+ }
+ vserver_client = mock.Mock()
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ vserver_client)))
+ self.mock_dm_session.get_snapmirrors = mock.Mock(
+ return_value=[fake_snapmirror])
+
+ result = self.library.update_replica_state(None, [fake.SHARE],
+ fake.SHARE, None,
+ share_server=None)
+
+ self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result)
+
+ def test_promote_replica(self):
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=mock.Mock()))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replicas = self.library.promote_replica(
+ None, [self.fake_replica, self.fake_replica_2],
+ self.fake_replica_2, [], share_server=None)
+
+ self.mock_dm_session.change_snapmirror_source.assert_called_once_with(
+ self.fake_replica, self.fake_replica, self.fake_replica_2,
+ mock.ANY
+ )
+
+ self.assertEqual(2, len(replicas))
+ actual_replica_1 = list(filter(
+ lambda x: x['id'] == self.fake_replica['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
+ actual_replica_1['replica_state'])
+ actual_replica_2 = list(filter(
+ lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ actual_replica_2['replica_state'])
+ self.assertEqual('fake_export_location',
+ actual_replica_2['export_locations'])
+ self.assertEqual(constants.STATUS_ACTIVE,
+ actual_replica_2['access_rules_status'])
+
+ def test_promote_replica_more_than_two_replicas(self):
+ fake_replica_3 = copy.deepcopy(self.fake_replica_2)
+ fake_replica_3['id'] = fake.SHARE_ID3
+ fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=mock.Mock()))
+
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replicas = self.library.promote_replica(
+ None, [self.fake_replica, self.fake_replica_2, fake_replica_3],
+ self.fake_replica_2, [], share_server=None)
+
+ self.mock_dm_session.change_snapmirror_source.assert_has_calls([
+ mock.call(fake_replica_3, self.fake_replica, self.fake_replica_2,
+ mock.ANY),
+ mock.call(self.fake_replica, self.fake_replica,
+ self.fake_replica_2, mock.ANY)
+ ], any_order=True)
+
+ self.assertEqual(3, len(replicas))
+ actual_replica_1 = list(filter(
+ lambda x: x['id'] == self.fake_replica['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
+ actual_replica_1['replica_state'])
+ actual_replica_2 = list(filter(
+ lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ actual_replica_2['replica_state'])
+ self.assertEqual('fake_export_location',
+ actual_replica_2['export_locations'])
+ actual_replica_3 = list(filter(
+ lambda x: x['id'] == fake_replica_3['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
+ actual_replica_3['replica_state'])
+
+ def test_promote_replica_with_access_rules(self):
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ mock_helper = mock.Mock()
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=mock_helper))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replicas = self.library.promote_replica(
+ None, [self.fake_replica, self.fake_replica_2],
+ self.fake_replica_2, [fake.SHARE_ACCESS], share_server=None)
+
+ self.mock_dm_session.change_snapmirror_source.assert_has_calls([
+ mock.call(self.fake_replica, self.fake_replica,
+ self.fake_replica_2, mock.ANY)
+ ], any_order=True)
+ self.assertEqual(2, len(replicas))
+ share_name = self.library._get_backend_share_name(
+ self.fake_replica_2['id'])
+ mock_helper.update_access.assert_called_once_with(self.fake_replica_2,
+ share_name,
+ [fake.SHARE_ACCESS])
+
+ def test_convert_destination_replica_to_independent(self):
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=mock.Mock()))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replica = self.library._convert_destination_replica_to_independent(
+ None, self.mock_dm_session, self.fake_replica,
+ self.fake_replica_2, [], share_server=None)
+
+ self.mock_dm_session.update_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+ self.mock_dm_session.break_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+
+ self.assertEqual('fake_export_location',
+ replica['export_locations'])
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ replica['replica_state'])
+
+ def test_convert_destination_replica_to_independent_update_failed(self):
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=mock.Mock()))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+ self.mock_object(self.mock_dm_session, 'update_snapmirror',
+ mock.Mock(side_effect=netapp_api.NaApiError(code=0)))
+
+ replica = self.library._convert_destination_replica_to_independent(
+ None, self.mock_dm_session, self.fake_replica,
+ self.fake_replica_2, [], share_server=None)
+
+ self.mock_dm_session.update_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+ self.mock_dm_session.break_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+
+ self.assertEqual('fake_export_location',
+ replica['export_locations'])
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ replica['replica_state'])
+
+ def test_promote_replica_fail_to_set_access_rules(self):
+ fake_helper = mock.Mock()
+ fake_helper.update_access.side_effect = Exception
+ fake_access_rules = [
+ {'access_to': "0.0.0.0",
+ 'access_level': constants.ACCESS_LEVEL_RO},
+ {'access_to': "10.10.10.10",
+ 'access_level': constants.ACCESS_LEVEL_RW},
+ ]
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=fake_helper))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replicas = self.library.promote_replica(
+ None, [self.fake_replica, self.fake_replica_2],
+ self.fake_replica_2, fake_access_rules, share_server=None)
+
+ self.mock_dm_session.change_snapmirror_source.assert_called_once_with(
+ self.fake_replica, self.fake_replica, self.fake_replica_2,
+ mock.ANY
+ )
+
+ self.assertEqual(2, len(replicas))
+ actual_replica_1 = list(filter(
+ lambda x: x['id'] == self.fake_replica['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
+ actual_replica_1['replica_state'])
+ actual_replica_2 = list(filter(
+ lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0]
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ actual_replica_2['replica_state'])
+ self.assertEqual('fake_export_location',
+ actual_replica_2['export_locations'])
+ self.assertEqual(constants.STATUS_OUT_OF_SYNC,
+ actual_replica_2['access_rules_status'])
+
+ def test_convert_destination_replica_to_independent_with_access_rules(
+ self):
+ fake_helper = mock.Mock()
+ fake_helper.update_access.side_effect = Exception
+ fake_access_rules = [
+ {'access_to': "0.0.0.0",
+ 'access_level': constants.ACCESS_LEVEL_RO},
+ {'access_to': "10.10.10.10",
+ 'access_level': constants.ACCESS_LEVEL_RW},
+ ]
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=fake_helper))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replica = self.library._convert_destination_replica_to_independent(
+ None, self.mock_dm_session, self.fake_replica,
+ self.fake_replica_2, fake_access_rules, share_server=None)
+
+ self.mock_dm_session.update_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+ self.mock_dm_session.break_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+
+ self.assertEqual('fake_export_location',
+ replica['export_locations'])
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ replica['replica_state'])
+ self.assertEqual(constants.STATUS_OUT_OF_SYNC,
+ replica['access_rules_status'])
+
+ def test_convert_destination_replica_to_independent_failed_access_rules(
+ self):
+ fake_helper = mock.Mock()
+ fake_access_rules = [
+ {'access_to': "0.0.0.0",
+ 'access_level': constants.ACCESS_LEVEL_RO},
+ {'access_to': "10.10.10.10",
+ 'access_level': constants.ACCESS_LEVEL_RW},
+ ]
+ self.mock_object(self.library,
+ '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock.Mock())))
+ self.mock_object(self.library,
+ '_get_helper',
+ mock.Mock(return_value=fake_helper))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value='fake_export_location'))
+
+ replica = self.library._convert_destination_replica_to_independent(
+ None, self.mock_dm_session, self.fake_replica,
+ self.fake_replica_2, fake_access_rules, share_server=None)
+
+ self.mock_dm_session.update_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+ self.mock_dm_session.break_snapmirror.assert_called_once_with(
+ self.fake_replica, self.fake_replica_2)
+
+ fake_helper.assert_has_calls([
+ mock.call.set_client(mock.ANY),
+ mock.call.update_access(mock.ANY, mock.ANY, fake_access_rules),
+ ])
+
+ self.assertEqual('fake_export_location',
+ replica['export_locations'])
+ self.assertEqual(constants.REPLICA_STATE_ACTIVE,
+ replica['replica_state'])
+ self.assertEqual(constants.STATUS_ACTIVE,
+ replica['access_rules_status'])
+
+ def test_safe_change_replica_source(self):
+ fake_replica_3 = copy.deepcopy(self.fake_replica_2)
+ fake_replica_3['id'] = fake.SHARE_ID3
+ fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
+ replica = self.library._safe_change_replica_source(
+ self.mock_dm_session, self.fake_replica, self.fake_replica_2,
+ fake_replica_3, [self.fake_replica, self.fake_replica_2,
+ fake_replica_3]
+ )
+ self.assertEqual([], replica['export_locations'])
+ self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC,
+ replica['replica_state'])
+
+ def test_safe_change_replica_source_error(self):
+ self.mock_dm_session.change_snapmirror_source.side_effect = Exception
+
+ fake_replica_3 = copy.deepcopy(self.fake_replica_2)
+ fake_replica_3['id'] = fake.SHARE_ID3
+ fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
+ replica = self.library._safe_change_replica_source(
+ self.mock_dm_session, self.fake_replica, self.fake_replica_2,
+ fake_replica_3, [self.fake_replica, self.fake_replica_2,
+ fake_replica_3]
+ )
+ self.assertEqual([], replica['export_locations'])
+ self.assertEqual(constants.STATUS_ERROR,
+ replica['replica_state'])
diff --git a/manila/tests/share/drivers/netapp/dataontap/fakes.py b/manila/tests/share/drivers/netapp/dataontap/fakes.py
index 5db22cfdc1..fce1be0c64 100644
--- a/manila/tests/share/drivers/netapp/dataontap/fakes.py
+++ b/manila/tests/share/drivers/netapp/dataontap/fakes.py
@@ -15,6 +15,7 @@
import copy
+from manila.common import constants
import manila.tests.share.drivers.netapp.fakes as na_fakes
@@ -89,6 +90,7 @@ SHARE = {
'network_info': {
'network_allocations': [{'ip_address': 'ip'}]
},
+ 'replica_state': constants.REPLICA_STATE_ACTIVE,
}
FLEXVOL_TO_MANAGE = {
@@ -562,4 +564,5 @@ def get_config_cmode():
config.netapp_root_volume = ROOT_VOLUME
config.netapp_lif_name_template = LIF_NAME_TEMPLATE
config.netapp_volume_snapshot_reserve_percent = 8
+ config.netapp_vserver = VSERVER1
return config