diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode.py b/manila/share/drivers/netapp/dataontap/client/client_cmode.py index d7affdbf65..ba1b58d7c3 100644 --- a/manila/share/drivers/netapp/dataontap/client/client_cmode.py +++ b/manila/share/drivers/netapp/dataontap/client/client_cmode.py @@ -2180,6 +2180,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient): raise exception.NetAppException(msg) else: api_args['encrypt'] = 'true' + else: + api_args['encrypt'] = 'false' return api_args diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py b/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py index b09f10e4fa..b7dd5df0d5 100644 --- a/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py +++ b/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py @@ -15,6 +15,7 @@ import copy from datetime import datetime from http import client as http_client +import math import re import time @@ -276,11 +277,12 @@ class NetAppRestClient(object): # NOTE(nahimsouza): SVM scoped account is not authorized to access # the /cluster/nodes endpoint, that's why we use /private/cli + response = self.send_request('/private/cli/version', 'get', query=query) # Response is formatted as: # 'NetApp Release 9.12.1: Wed Feb 01 01:10:18 UTC 2023' - version_full = response['records'][0]['version'] + version_full = response['records'][0]['version']['full'] version_parsed = re.findall(r'\d+\.\d+\.\d+', version_full)[0] version_splited = version_parsed.split('.') return { @@ -707,8 +709,16 @@ class NetAppRestClient(object): 'svm.name': vserver, } if max_throughput: - body['fixed.max_throughput_iops'] = ( - int(''.join(filter(str.isdigit, max_throughput)))) + value = max_throughput.lower() + if 'iops' in max_throughput: + value = value.replace('iops', '') + value = int(value) + body['fixed.max_throughput_iops'] = value + else: + value = value.replace('b/s', '') + value = int(value) + body['fixed.max_throughput_mbps'] = math.ceil(value / + units.Mi) return self.send_request('/storage/qos/policies', 'post', body=body) @@ -775,7 +785,16 @@ class NetAppRestClient(object): """Set NFS the export policy for the specified volume.""" query = {"name": volume_name} body = {'nas.export_policy.name': policy_name} - self.send_request('/storage/volumes/', 'patch', query=query, body=body) + + try: + self.send_request('/storage/volumes/', 'patch', query=query, + body=body) + except netapp_api.api.NaApiError as e: + # NOTE(nahimsouza): Since this error is ignored in ZAPI, we are + # replicating the behavior here. + if e.code == netapp_api.EREST_CANNOT_MODITY_OFFLINE_VOLUME: + LOG.debug('Cannot modify offline volume: %s', volume_name) + return @na_utils.trace def create_nfs_export_policy(self, policy_name): @@ -862,15 +881,15 @@ class NetAppRestClient(object): volume = { 'aggregate': aggregate, 'aggr-list': aggregate_list, - 'junction-path': volume_infos.get('nas', {}).get('path', ''), - 'name': volume_infos.get('name', ''), - 'owning-vserver-name': volume_infos.get('svm', {}).get('name', ''), - 'type': volume_infos.get('type', ''), - 'style': volume_infos.get('style', ''), - 'size': volume_infos.get('space', {}).get('size', ''), + 'junction-path': volume_infos.get('nas', {}).get('path'), + 'name': volume_infos.get('name'), + 'owning-vserver-name': volume_infos.get('svm', {}).get('name'), + 'type': volume_infos.get('type'), + 'style': volume_infos.get('style'), + 'size': volume_infos.get('space', {}).get('size'), 'qos-policy-group-name': ( - volume_infos.get('qos', {}).get('policy', {}).get('name', '')), - 'style-extended': volume_infos.get('style', '') + volume_infos.get('qos', {}).get('policy', {}).get('name')), + 'style-extended': volume_infos.get('style') } return volume @@ -887,12 +906,11 @@ class NetAppRestClient(object): return self._has_records(result) @na_utils.trace - def create_cifs_share(self, share_name): + def create_cifs_share(self, share_name, path): """Create a CIFS share.""" - share_path = f'/{share_name}' body = { 'name': share_name, - 'path': share_path, + 'path': path, 'svm.name': self.vserver, } self.send_request('/protocols/cifs/shares', 'post', body=body) @@ -1025,8 +1043,15 @@ class NetAppRestClient(object): body['qos.policy.name'] = qos_policy_group if adaptive_qos_policy_group is not None: body['qos.policy.name'] = adaptive_qos_policy_group + if encrypt is True: - body['encryption.enabled'] = 'true' + if not self.features.FLEXVOL_ENCRYPTION: + msg = 'Flexvol encryption is not supported on this backend.' + raise exception.NetAppException(msg) + else: + body['encryption.enabled'] = 'true' + else: + body['encryption.enabled'] = 'false' return body @@ -1166,7 +1191,14 @@ class NetAppRestClient(object): @na_utils.trace def set_volume_snapdir_access(self, volume_name, hide_snapdir): """Set volume snapshot directory visibility.""" - volume = self._get_volume_by_args(vol_name=volume_name) + + try: + volume = self._get_volume_by_args(vol_name=volume_name) + except exception.NetAppException: + msg = _('Could not find volume %s to set snapdir access') + LOG.error(msg, volume_name) + raise exception.SnapshotResourceNotFound(name=volume_name) + uuid = volume['uuid'] body = { @@ -1194,6 +1226,7 @@ class NetAppRestClient(object): try: volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] + except exception.NetAppException: LOG.debug('Could not find fpolicy. Share not found: %s.', share_name) @@ -1392,7 +1425,7 @@ class NetAppRestClient(object): except exception.NetAppException: msg = _("FPolicy event %s not found.") LOG.debug(msg, event_name) - + return try: self.send_request( f'/protocols/fpolicy/{svm_uuid}/events/{event_name}', 'delete') @@ -1415,7 +1448,7 @@ class NetAppRestClient(object): except exception.NetAppException: msg = _("FPolicy policy %s not found.") LOG.debug(msg, policy_name) - + return try: self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies/{policy_name}', @@ -1593,9 +1626,7 @@ class NetAppRestClient(object): LOG.debug('Volume %s unmounted.', volume_name) return except netapp_api.api.NaApiError as e: - # TODO(felipe_rodrigues): test the clone split mount error - # code for REST. - if (e.code == netapp_api.api.EAPIERROR + if (e.code == netapp_api.EREST_UNMOUNT_FAILED_LOCK and 'job ID' in e.message): msg = ('Could not unmount volume %(volume)s due to ' 'ongoing volume operation: %(exception)s') @@ -1637,7 +1668,8 @@ class NetAppRestClient(object): query = { 'name': qos_policy_group_name, - 'fields': 'name,object_count,fixed.max_throughput_iops,svm.name', + 'fields': 'name,object_count,fixed.max_throughput_iops,' + 'fixed.max_throughput_mbps,svm.name', } try: res = self.send_request('/storage/qos/policies', 'get', @@ -1659,10 +1691,21 @@ class NetAppRestClient(object): policy_info = { 'policy-group': qos_policy_group_info.get('name'), 'vserver': qos_policy_group_info.get('svm', {}).get('name'), - 'max-throughput': qos_policy_group_info.get('fixed', {}).get( - 'max_throughput_iops'), 'num-workloads': int(qos_policy_group_info.get('object_count')), } + + iops = qos_policy_group_info.get('fixed', {}).get( + 'max_throughput_iops') + mbps = qos_policy_group_info.get('fixed', {}).get( + 'max_throughput_mbps') + + if iops: + policy_info['max-throughput'] = f'{iops}iops' + elif mbps: + policy_info['max-throughput'] = f'{mbps * 1024 * 1024}b/s' + else: + policy_info['max-throughput'] = None + return policy_info @na_utils.trace @@ -1734,13 +1777,6 @@ class NetAppRestClient(object): # Attempt to delete any QoS policies named "deleted_manila-*". self.remove_unused_qos_policy_groups() - @na_utils.trace - def _sanitize_qos_spec_value(self, value): - value = value.lower() - value = value.replace('iops', '').replace('b/s', '') - value = int(value) - return value - @na_utils.trace def qos_policy_group_modify(self, qos_policy_group_name, max_throughput): """Modifies a QoS policy group.""" @@ -1748,10 +1784,19 @@ class NetAppRestClient(object): query = { 'name': qos_policy_group_name, } - body = { - 'fixed.max_throughput_iops': - self._sanitize_qos_spec_value(max_throughput) - } + body = {} + value = max_throughput.lower() + if 'iops' in value: + value = value.replace('iops', '') + value = int(value) + body['fixed.max_throughput_iops'] = value + body['fixed.max_throughput_mbps'] = 0 + elif 'b/s' in value: + value = value.replace('b/s', '') + value = int(value) + body['fixed.max_throughput_mbps'] = math.ceil(value / + units.Mi) + body['fixed.max_throughput_iops'] = 0 res = self.send_request('/storage/qos/policies', 'get', query=query) if not res.get('records'): msg = ('QoS %s not found.') % qos_policy_group_name @@ -1839,8 +1884,13 @@ class NetAppRestClient(object): @na_utils.trace def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" + try: + volume = self._get_volume_by_args(vol_name=volume_name) + except exception.NetAppException: + msg = _('Could not find volume %s to get snapshot') + LOG.error(msg, volume_name) + raise exception.SnapshotResourceNotFound(name=snapshot_name) - volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] query = { 'name': snapshot_name, @@ -1902,7 +1952,12 @@ class NetAppRestClient(object): def delete_snapshot(self, volume_name, snapshot_name, ignore_owners=False): """Deletes a volume snapshot.""" - volume = self._get_volume_by_args(vol_name=volume_name) + try: + volume = self._get_volume_by_args(vol_name=volume_name) + except exception.NetAppException: + msg = _('Could not find volume %s to delete snapshot') + LOG.warning(msg, volume_name) + return uuid = volume['uuid'] query = { @@ -2118,15 +2173,15 @@ class NetAppRestClient(object): aggregate = res.get('aggregates') + if not aggregate: + msg = _('Could not find aggregate for volume %s.') + raise exception.NetAppException(msg % volume_name) + aggregate_size = len(res.get('aggregates')) if aggregate_size > 1: aggregate = [aggr.get('name') for aggr in res.get('aggregates')] - if not aggregate: - msg = _('Could not find aggregate for volume %s.') - raise exception.NetAppException(msg % volume_name) - return aggregate @na_utils.trace @@ -2193,7 +2248,7 @@ class NetAppRestClient(object): fields = ['state', 'source.svm.name', 'source.path', 'destination.svm.name', 'destination.path', 'transfer.end_time', 'uuid', 'policy.type', - 'transfer_schedule.name'] + 'transfer_schedule.name', 'transfer.state'] query = {} query['fields'] = ','.join(fields) @@ -2222,7 +2277,11 @@ class NetAppRestClient(object): snapmirrors = [] for record in response.get('records', []): snapmirrors.append({ - 'relationship-status': record.get('state'), + 'relationship-status': ( + 'idle' + if record.get('state') == 'snapmirrored' + else record.get('state')), + 'transferring-state': record.get('transfer', {}).get('state'), 'mirror-state': record.get('state'), 'schedule': record['transfer_schedule']['name'], 'source-vserver': record['source']['svm']['name'], @@ -2255,7 +2314,8 @@ class NetAppRestClient(object): def get_snapmirrors(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, - desired_attributes=None): + desired_attributes=None, enable_tunneling=None, + list_destinations_only=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. @@ -2269,7 +2329,9 @@ class NetAppRestClient(object): source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, - dest_volume=dest_volume) + dest_volume=dest_volume, + enable_tunneling=enable_tunneling, + list_destinations_only=list_destinations_only) return snapmirrors @@ -2752,7 +2814,7 @@ class NetAppRestClient(object): clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" - snapmirror = self._get_snapmirrors( + snapmirror = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, @@ -2817,10 +2879,11 @@ class NetAppRestClient(object): def get_snapmirror_destinations(self, source_path=None, dest_path=None, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, - desired_attributes=None): + desired_attributes=None, + enable_tunneling=None): """Gets one or more SnapMirror at source endpoint.""" - snapmirrors = self._get_snapmirrors( + snapmirrors = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, @@ -2964,9 +3027,12 @@ class NetAppRestClient(object): wait_result=True, schedule=None): """Change the snapmirror state between two volumes.""" - snapmirror = self.get_snapmirrors(source_path, destination_path, - source_vserver, destination_vserver, - source_volume, destination_volume) + snapmirror = self.get_snapmirrors(source_path=source_path, + dest_path=destination_path, + source_vserver=source_vserver, + source_volume=source_volume, + dest_vserver=destination_vserver, + dest_volume=destination_volume) if not snapmirror: msg = _('Failed to get information about relationship between ' @@ -3069,11 +3135,19 @@ class NetAppRestClient(object): 'clone.is_flexclone': 'true', 'svm.name': self.connection.get_vserver(), } - if qos_policy_group is not None: - body['qos.policy.name'] = qos_policy_group self.send_request('/storage/volumes', 'post', body=body) + # NOTE(nahimsouza): QoS policy can not be set during the cloning + # process, so we need to make a separate request. + if qos_policy_group is not None: + volume = self._get_volume_by_args(vol_name=volume_name) + uuid = volume['uuid'] + body = { + 'qos.policy.name': qos_policy_group, + } + self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) + if split: self.split_volume_clone(volume_name) @@ -3096,7 +3170,7 @@ class NetAppRestClient(object): source_volume=None, dest_volume=None): """Disables future transfers to a SnapMirror destination.""" - snapmirror = self._get_snapmirrors( + snapmirror = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, @@ -3126,25 +3200,38 @@ class NetAppRestClient(object): source_volume=None, dest_volume=None): """Breaks a data protection SnapMirror relationship.""" - snapmirror = self._get_snapmirrors( - source_path=source_path, - dest_path=dest_path, - source_vserver=source_vserver, - source_volume=source_volume, - dest_vserver=dest_vserver, - dest_volume=dest_volume) + interval = 2 + retries = (10 / interval) - if snapmirror: - uuid = snapmirror[0]['uuid'] - body = {'state': 'broken_off'} - try: + @utils.retry(netapp_api.NaRetryableError, interval=interval, + retries=retries, backoff_rate=1) + def _waiter(): + snapmirror = self.get_snapmirrors( + source_path=source_path, + dest_path=dest_path, + source_vserver=source_vserver, + source_volume=source_volume, + dest_vserver=dest_vserver, + dest_volume=dest_volume) + + snapmirror_state = snapmirror[0].get('transferring-state') + if snapmirror_state == 'success': + uuid = snapmirror[0]['uuid'] + body = {'state': 'broken_off'} self.send_request(f'/snapmirror/relationships/{uuid}', 'patch', body=body) - except netapp_api.api.NaApiError as e: - transfer_in_progress = 'Another transfer is in progress' - if not (e.code == netapp_api.EREST_BREAK_SNAPMIRROR_FAILED - and transfer_in_progress in e.message): - raise + return + else: + message = 'Waiting for transfer state to be SUCCESS.' + code = '' + raise netapp_api.NaRetryableError(message=message, code=code) + + try: + return _waiter() + except netapp_api.NaRetryableError: + msg = _("Transfer state did not reach the expected state. Retries " + "exhausted. Aborting.") + raise na_utils.NetAppDriverException(msg) @na_utils.trace def resume_snapmirror_vol(self, source_vserver, source_volume, @@ -3169,9 +3256,12 @@ class NetAppRestClient(object): source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Resume a SnapMirror relationship if it is quiesced.""" - response = self._get_snapmirrors(source_path, dest_path, - source_vserver, source_volume, - dest_vserver, dest_volume) + response = self.get_snapmirrors(source_path=source_path, + dest_path=dest_path, + source_vserver=source_vserver, + dest_vserver=dest_vserver, + source_volume=source_volume, + dest_volume=dest_volume) if not response: # NOTE(nahimsouza): As ZAPI returns this error code, it was kept @@ -3247,9 +3337,12 @@ class NetAppRestClient(object): source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Update a snapmirror relationship asynchronously.""" - snapmirrors = self._get_snapmirrors(source_path, dest_path, - source_vserver, source_volume, - dest_vserver, dest_volume) + snapmirrors = self.get_snapmirrors(source_path=source_path, + dest_path=dest_path, + source_vserver=source_vserver, + dest_vserver=dest_vserver, + source_volume=source_volume, + dest_volume=dest_volume) if not snapmirrors: msg = _('Failed to get snapmirror relationship information') @@ -3578,13 +3671,13 @@ class NetAppRestClient(object): # response is empty. Also, REST API does not have an equivalent to # 'udp-max-xfer-size', so the default is always returned. nfs_info = { - 'tcp-max-xfer-size': DEFAULT_TCP_MAX_XFER_SIZE, - 'udp-max-xfer-size': DEFAULT_UDP_MAX_XFER_SIZE, + 'tcp-max-xfer-size': str(DEFAULT_TCP_MAX_XFER_SIZE), + 'udp-max-xfer-size': str(DEFAULT_UDP_MAX_XFER_SIZE), } records = response.get('records', []) if records: nfs_info['tcp-max-xfer-size'] = ( - records[0]['transport']['tcp_max_transfer_size']) + str(records[0]['transport']['tcp_max_transfer_size'])) return nfs_info @@ -4495,26 +4588,27 @@ class NetAppRestClient(object): @na_utils.trace def _configure_nfs(self, nfs_config, svm_id): """Sets the nfs configuraton""" + + if ('udp-max-xfer-size' in nfs_config and + (nfs_config['udp-max-xfer-size'] + != str(DEFAULT_UDP_MAX_XFER_SIZE))): + + msg = _('Failed to configure NFS. REST API does not support ' + 'setting udp-max-xfer-size default value %(default)s ' + 'is not equal to actual value %(actual)s') + msg_args = { + 'default': DEFAULT_UDP_MAX_XFER_SIZE, + 'actual': nfs_config['udp-max-xfer-size'], + } + raise exception.NetAppException(msg % msg_args) + + nfs_config_value = int(nfs_config['tcp-max-xfer-size']) body = { - 'transport.tcp_max_transfer_size': nfs_config['tcp-max-xfer-size'] + 'transport.tcp_max_transfer_size': nfs_config_value } self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) - @na_utils.trace - def _get_svm_uuid(self): - # Get SVM UUID. - query = { - 'name': self.vserver, - 'fields': 'uuid' - } - res = self.send_request('/svm/svms', 'get', query=query) - if not res.get('records'): - msg = _('Vserver %s not found.') % self.vserver - raise exception.NetAppException(msg) - svm_id = res.get('records')[0]['uuid'] - return svm_id - @na_utils.trace def create_network_interface(self, ip, netmask, node, port, vserver_name, lif_name): @@ -4665,8 +4759,8 @@ class NetAppRestClient(object): query['svm.name'] = vserver nfs_info = { - 'tcp-max-xfer-size': DEFAULT_TCP_MAX_XFER_SIZE, - 'udp-max-xfer-size': DEFAULT_UDP_MAX_XFER_SIZE, + 'tcp-max-xfer-size': str(DEFAULT_TCP_MAX_XFER_SIZE), + 'udp-max-xfer-size': str(DEFAULT_UDP_MAX_XFER_SIZE) } response = self.send_request('/protocols/nfs/services/', @@ -4675,7 +4769,7 @@ class NetAppRestClient(object): if records: nfs_info['tcp-max-xfer-size'] = ( - records[0]['transport']['tcp_max_transfer_size']) + str(records[0]['transport']['tcp_max_transfer_size'])) return nfs_info @@ -4878,8 +4972,8 @@ class NetAppRestClient(object): if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s ' 'was not found') - if (e.code == netapp_api.EREST_INTERFACE_BOUND or - e.code == netapp_api.EREST_PORT_IN_USE): + elif (e.code == netapp_api.EREST_INTERFACE_BOUND or + e.code == netapp_api.EREST_PORT_IN_USE): LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s ' 'still used by LIF and cannot be deleted.', {'vlan': vlan, 'port': port, 'node': node}) diff --git a/manila/share/drivers/netapp/dataontap/client/rest_api.py b/manila/share/drivers/netapp/dataontap/client/rest_api.py index d24110ebd5..a4b64bc1b2 100644 --- a/manila/share/drivers/netapp/dataontap/client/rest_api.py +++ b/manila/share/drivers/netapp/dataontap/client/rest_api.py @@ -53,6 +53,8 @@ EREST_INTERFACE_BOUND = '1376858' EREST_PORT_IN_USE = '1966189' EREST_NFS_V4_0_ENABLED_MIGRATION_FAILURE = '13172940' EREST_VSERVER_MIGRATION_TO_NON_AFF_CLUSTER = '13172984' +EREST_UNMOUNT_FAILED_LOCK = '917536' +EREST_CANNOT_MODITY_OFFLINE_VOLUME = '917533' class NaRetryableError(api.NaApiError): diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py index 7e67303ac1..4e62604b06 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py @@ -2634,9 +2634,11 @@ class NetAppCmodeFileStorageLibrary(object): # NOTE(dviroel): Don't try to resume or resync a SnapMirror that has # one of the in progress transfer states, because the storage will # answer with an error. - in_progress_status = ['preparing', 'transferring', 'finalizing'] + in_progress_status = ['preparing', 'transferring', 'finalizing', + 'synchronizing'] if (snapmirror.get('mirror-state') != 'snapmirrored' and - snapmirror.get('relationship-status') in in_progress_status): + (snapmirror.get('relationship-status') in in_progress_status or + snapmirror.get('transferring-state') in in_progress_status)): return constants.REPLICA_STATE_OUT_OF_SYNC if snapmirror.get('mirror-state') != 'snapmirrored': diff --git a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py index c22bfe4ab8..9f54eefa19 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py @@ -85,7 +85,9 @@ DELETED_EXPORT_POLICIES = { } QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' QOS_MAX_THROUGHPUT = '5000B/s' +QOS_MAX_THROUGHPUT_IOPS = '5000iops' QOS_MAX_THROUGHPUT_NO_UNIT = 5000 +QOS_MAX_THROUGHPUT_IOPS_NO_UNIT = 5000 ADAPTIVE_QOS_POLICY_GROUP_NAME = 'fake_adaptive_qos_policy_group_name' VSERVER_TYPE_DEFAULT = 'default' VSERVER_TYPE_DP_DEST = 'dp_destination' @@ -1028,15 +1030,36 @@ NFS_LIFS = [ ] NFS_LIFS_REST = [ - {'uuid': FAKE_UUID, - 'address': IP_ADDRESS, - 'home-node': NODE_NAME, - 'home-port': VLAN_PORT, - 'interface-name': LIF_NAME, - 'netmask': NETMASK, - 'role': 'data', - 'vserver': VSERVER_NAME, - }, + { + 'uuid': 'fake_uuid_1', + 'address': IP_ADDRESS, + 'home-node': NODE_NAME, + 'home-port': VLAN_PORT, + 'interface-name': LIF_NAME, + 'netmask': NETMASK, + 'role': 'data', + 'vserver': VSERVER_NAME, + }, + { + 'uuid': 'fake_uuid_2', + 'address': IP_ADDRESS, + 'home-node': NODE_NAME, + 'home-port': VLAN_PORT, + 'interface-name': LIF_NAME, + 'netmask': NETMASK, + 'role': 'data', + 'vserver': VSERVER_NAME, + }, + { + 'uuid': 'fake_uuid_3', + 'address': IP_ADDRESS, + 'home-node': NODE_NAME, + 'home-port': VLAN_PORT, + 'interface-name': LIF_NAME, + 'netmask': NETMASK, + 'role': 'data', + 'vserver': VSERVER_NAME, + }, ] NET_INTERFACE_GET_ONE_RESPONSE = etree.XML(""" @@ -3690,7 +3713,8 @@ GENERIC_NETWORK_INTERFACES_GET_REPONSE = { } } } - ] + ], + "num_records": 1, } GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES = { @@ -3940,6 +3964,9 @@ SNAPMIRROR_GET_ITER_RESPONSE_REST = { "transfer_schedule": { "name": "hourly", }, + "transfer": { + "state": "success" + } } ], "num_records": 1, @@ -3950,12 +3977,13 @@ REST_GET_SNAPMIRRORS_RESPONSE = [{ 'destination-vserver': SM_DEST_VSERVER, 'last-transfer-end-timestamp': 0, 'mirror-state': 'snapmirrored', - 'relationship-status': 'snapmirrored', + 'relationship-status': 'idle', 'source-volume': SM_SOURCE_VOLUME, 'source-vserver': SM_SOURCE_VSERVER, 'uuid': FAKE_UUID, 'policy-type': 'async', - 'schedule': 'hourly' + 'schedule': 'hourly', + 'transferring-state': 'success' }] FAKE_CIFS_RECORDS = { @@ -4622,7 +4650,8 @@ FAKE_AGGREGATES_RESPONSE = { { "name": SHARE_AGGREGATE_NAME } - ] + ], + "name": VSERVER_NAME, } ] } @@ -4673,3 +4702,65 @@ REST_MGMT_INTERFACES = { ], "num_records": 2, } + +FAKE_CIFS_LOCAL_USER = { + 'records': [ + { + 'sid': 'S-1-5-21-256008430-3394229847-3930036330-1001' + } + ] +} + +FAKE_SERVER_SWITCH_NAME = 'fake_ss_name' +FAKE_SUBTYPE = 'fake_subtype' +FAKE_DNS_CONFIG = { + 'dns-state': 'true', + 'domains': ['fake_domain'], + 'dns-ips': ['fake_ip'] +} + +FAKE_VOLUME_MANAGE = { + 'records': [ + { + 'name': VOLUME_NAMES[0], + 'aggregates': [ + { + 'name': SHARE_AGGREGATE_NAME + } + ], + 'nas': { + 'path': VOLUME_JUNCTION_PATH + }, + 'style': 'flex', + 'type': 'fake_type', + 'svm': { + 'name': VSERVER_NAME + }, + 'qos': { + 'policy': { + 'name': QOS_POLICY_GROUP_NAME + } + }, + 'space': { + 'size': SHARE_SIZE + } + } + ], + 'num_records': 1 +} + +FAKE_PORTS = [ + {'speed': ''}, + {'speed': '4'}, + {'speed': 'auto'}, + {'speed': 'undef'}, + {'speed': 'fake_speed'} + ] + +FAKE_ROOT_AGGREGATES_RESPONSE = { + "records": [ + { + "aggregate": SHARE_AGGREGATE_NAME + } + ] +} diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py index 1ebf0235df..03eb571885 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py @@ -3179,6 +3179,7 @@ class NetAppClientCmodeTestCase(test.TestCase): 'volume-type': 'rw', 'junction-path': '/%s' % fake.SHARE_NAME, 'space-reserve': ('none' if thin_provisioned else 'volume'), + 'encrypt': 'false' } self.client.send_request.assert_called_once_with('volume-create', @@ -3291,6 +3292,7 @@ class NetAppClientCmodeTestCase(test.TestCase): expected_api_args = { 'volume-type': volume_type, 'space-reserve': 'volume', + 'encrypt': 'false' } self.assertEqual(expected_api_args, result_api_args) diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py index 2d4ccf8953..c25c3581f3 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py @@ -13,6 +13,7 @@ # under the License. import copy +import math import time from unittest import mock @@ -765,7 +766,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): result = self.client.get_vserver_aggregate_capacities([]) self.assertEqual({}, result) - @ddt.data(None, fake.QOS_MAX_THROUGHPUT) + @ddt.data(None, fake.QOS_MAX_THROUGHPUT, fake.QOS_MAX_THROUGHPUT_IOPS) def test_qos_policy_group_create(self, max_throughput): return_value = fake.GENERIC_JOB_POST_RESPONSE body = { @@ -773,7 +774,12 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'svm.name': fake.VSERVER_NAME, } if max_throughput: - body['fixed.max_throughput_iops'] = fake.QOS_MAX_THROUGHPUT_NO_UNIT + if 'iops' in max_throughput: + qos = fake.QOS_MAX_THROUGHPUT_IOPS_NO_UNIT + body['fixed.max_throughput_iops'] = qos + else: + qos = math.ceil(fake.QOS_MAX_THROUGHPUT_NO_UNIT / units.Mi) + body['fixed.max_throughput_mbps'] = qos self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) @@ -781,7 +787,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): if max_throughput: result = self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.VSERVER_NAME, - fake.QOS_MAX_THROUGHPUT) + max_throughput) else: result = self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.VSERVER_NAME) @@ -941,7 +947,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'size': fake_volume.get('space', {}).get('size', ''), 'qos-policy-group-name': fake_volume.get('qos', {}) .get('policy', {}) - .get('name', ''), + .get('name'), 'style-extended': fake_volume.get('style', '') } @@ -983,7 +989,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.mock_object(self.client, 'send_request') - self.client.create_cifs_share(fake.SHARE_NAME) + self.client.create_cifs_share(fake.SHARE_NAME, f'/{fake.SHARE_NAME}') self.client.send_request.assert_called_once_with( '/protocols/cifs/shares', 'post', body=body) @@ -1028,16 +1034,19 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'create_volume_async') mock_update = self.mock_object( self.client, 'update_volume_efficiency_attributes') + mock_max_files = self.mock_object(self.client, 'set_volume_max_files') self.client.create_volume(fake.SHARE_AGGREGATE_NAME, - fake.VOLUME_NAMES[0], fake.SHARE_SIZE) + fake.VOLUME_NAMES[0], fake.SHARE_SIZE, + max_files=1) mock_create_volume_async.assert_called_once_with( [fake.SHARE_AGGREGATE_NAME], fake.VOLUME_NAMES[0], fake.SHARE_SIZE, is_flexgroup=False, thin_provisioned=False, snapshot_policy=None, - language=None, max_files=None, snapshot_reserve=None, + language=None, max_files=1, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None) mock_update.assert_called_once_with(fake.VOLUME_NAMES[0], False, False) + mock_max_files.assert_called_once_with(fake.VOLUME_NAMES[0], 1) def test_create_volume_async(self): body = { @@ -1575,18 +1584,20 @@ class NetAppRestCmodeClientTestCase(test.TestCase): qos_policy_group_name = 'extreme' qos_policy_group = fake.QOS_POLICY_GROUP_REST qos_policy = qos_policy_group.get('records')[0] + max_throughput = qos_policy.get('fixed', + {}).get('max_throughput_iops') expected = { 'policy-group': qos_policy.get('name'), 'vserver': qos_policy.get('svm', {}).get('name'), - 'max-throughput': qos_policy.get('fixed', {}).get( - 'max_throughput_iops'), + 'max-throughput': max_throughput if max_throughput else None, 'num-workloads': int(qos_policy.get('object_count')), } query = { 'name': qos_policy_group_name, - 'fields': 'name,object_count,fixed.max_throughput_iops,svm.name', + 'fields': 'name,object_count,fixed.max_throughput_iops,' + + 'fixed.max_throughput_mbps,svm.name' } mock_sr = self.mock_object(self.client, 'send_request', @@ -1644,8 +1655,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.assertEqual(0, client_cmode_rest.LOG.warning.call_count) def test_unmount_volume_with_retries(self): - - side_effect = [netapp_api.api.NaApiError(code=netapp_api.api.EAPIERROR, + return_code = netapp_api.EREST_UNMOUNT_FAILED_LOCK + side_effect = [netapp_api.api.NaApiError(code=return_code, message='...job ID...')] * 5 side_effect.append(None) self.mock_object(self.client, @@ -1660,8 +1671,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.assertEqual(5, client_cmode_rest.LOG.warning.call_count) def test_unmount_volume_with_max_retries(self): - - side_effect = [netapp_api.api.NaApiError(code=netapp_api.api.EAPIERROR, + return_code = netapp_api.EREST_UNMOUNT_FAILED_LOCK + side_effect = [netapp_api.api.NaApiError(code=return_code, message='...job ID...')] * 30 self.mock_object(self.client, '_unmount_volume', @@ -1752,7 +1763,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'name': 'qos_fake_name', } body = { - 'fixed.max_throughput_iops': 1000 + 'fixed.max_throughput_iops': 1000, + 'fixed.max_throughput_mbps': 0 } mock_sr.assert_has_calls([ mock.call('/storage/qos/policies', 'get', query=query), @@ -2175,7 +2187,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): result = self.client.get_snapmirrors( fake.SM_SOURCE_PATH, fake.SM_DEST_PATH, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, - fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) + fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, + enable_tunneling=True) expected = fake.REST_GET_SNAPMIRRORS_RESPONSE @@ -2186,7 +2199,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' - 'transfer_schedule.name' + 'transfer_schedule.name,transfer.state' } mock_send_request.assert_called_once_with('/snapmirror/relationships', @@ -2218,7 +2231,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' - 'transfer_schedule.name' + 'transfer_schedule.name,transfer.state' } mock_send_request.assert_called_once_with('/snapmirror/relationships', @@ -2246,7 +2259,7 @@ class NetAppRestCmodeClientTestCase(test.TestCase): ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' - 'transfer_schedule.name' + 'transfer_schedule.name,transfer.state' } mock_send_request.assert_called_once_with('/snapmirror/relationships', @@ -2991,7 +3004,9 @@ class NetAppRestCmodeClientTestCase(test.TestCase): source_vserver=None, source_volume=None, dest_vserver=None, - dest_volume=None) + dest_volume=None, + enable_tunneling=None, + list_destinations_only=None) mock_sr.assert_has_calls([ mock.call(f'/snapmirror/relationships/{return_snp[0]["uuid"]}' @@ -3155,6 +3170,13 @@ class NetAppRestCmodeClientTestCase(test.TestCase): def test_create_volume_clone(self, qos_policy_group_name, adaptive_qos_policy_group_name): self.mock_object(self.client, 'send_request') + + if qos_policy_group_name: + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + uuid = volume["uuid"] + self.mock_object(self.client, + '_get_volume_by_args', + mock.Mock(return_value=volume)) self.mock_object(self.client, 'split_volume_clone') self.mock_object( self.client.connection, 'get_vserver', @@ -3178,14 +3200,24 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'clone.is_flexclone': 'true', 'svm.name': 'fake_svm', } - if qos_policy_group_name: - body['qos.policy.name'] = fake.QOS_POLICY_GROUP_NAME + if adaptive_qos_policy_group_name is not None: set_qos_adapt_mock.assert_called_once_with( fake.SHARE_NAME, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME ) - self.client.send_request.assert_called_once_with( - '/storage/volumes', 'post', body=body) + + if qos_policy_group_name: + self.client._get_volume_by_args.assert_called_once_with( + vol_name=fake.SHARE_NAME) + self.client.send_request.assert_has_calls([ + mock.call('/storage/volumes', 'post', body=body), + mock.call(f'/storage/volumes/{uuid}', 'patch', + body={'qos.policy.name': qos_policy_group_name}) + ]) + else: + self.client.send_request.assert_called_once_with( + '/storage/volumes', 'post', body=body) + self.assertFalse(self.client.split_volume_clone.called) @ddt.data(True, False) @@ -3331,9 +3363,15 @@ class NetAppRestCmodeClientTestCase(test.TestCase): body=body_resync, wait_on_accepted=False) - mock_snapmirror.assert_called_once_with(fake.SM_SOURCE_PATH, - fake.SM_DEST_PATH, - None, None, None, None) + mock_snapmirror.assert_called_once_with( + source_path=fake.SM_SOURCE_PATH, + dest_path=fake.SM_DEST_PATH, + source_vserver=None, + source_volume=None, + dest_vserver=None, + dest_volume=None, + enable_tunneling=None, + list_destinations_only=None) def test__resync_snapmirror(self): mock = self.mock_object(self.client, '_resume_snapmirror') @@ -3394,12 +3432,15 @@ class NetAppRestCmodeClientTestCase(test.TestCase): fake.SM_SOURCE_VOLUME, fake.SM_DEST_VOLUME) mock_sr.assert_called_once() - mock_snapmirror.assert_called_once_with(fake.SM_SOURCE_PATH, - fake.SM_DEST_PATH, - fake.SM_SOURCE_VSERVER, - fake.SM_SOURCE_VOLUME, - fake.SM_DEST_VSERVER, - fake.SM_DEST_VOLUME) + mock_snapmirror.assert_called_once_with( + source_path=fake.SM_SOURCE_PATH, + dest_path=fake.SM_DEST_PATH, + source_vserver=fake.SM_SOURCE_VSERVER, + source_volume=fake.SM_SOURCE_VOLUME, + dest_vserver=fake.SM_DEST_VSERVER, + dest_volume=fake.SM_DEST_VOLUME, + enable_tunneling=None, + list_destinations_only=None) def test_get_cluster_name(self): """Get all available cluster nodes.""" @@ -3771,8 +3812,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): result = self.client.get_nfs_config_default(['tcp-max-xfer-size', 'udp-max-xfer-size']) expected = { - 'tcp-max-xfer-size': 65536, - 'udp-max-xfer-size': 32768, + 'tcp-max-xfer-size': '65536', + 'udp-max-xfer-size': '32768', } self.assertEqual(expected, result) @@ -3839,7 +3880,6 @@ class NetAppRestCmodeClientTestCase(test.TestCase): fake_api_response = fake.NFS_LIFS_REST fake_security = fake.KERBEROS_SECURITY_SERVICE - fake_uuid = fake.FAKE_UUID fake_keberos_name = fake.KERBEROS_SERVICE_PRINCIPAL_NAME fake_body = { @@ -3866,9 +3906,14 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self_get_kerberos.assert_called_once_with( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) self.client.get_network_interfaces.assert_called_once_with() - self.client.send_request.assert_called_once_with( - f'/protocols/nfs/kerberos/interfaces/{fake_uuid}', 'patch', - body=fake_body) + self.client.send_request.assert_has_calls([ + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_1', + 'patch', body=fake_body), + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_2', + 'patch', body=fake_body), + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_3', + 'patch', body=fake_body) + ]) @ddt.data(fake.CIFS_SECURITY_SERVICE, fake.CIFS_SECURITY_SERVICE_3) def test_configure_active_directory(self, security_service): @@ -4239,21 +4284,6 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.client.send_request.assert_called_once_with( f'/name-services/dns/{fake_uuid}', 'patch', body=body) - def test_modify_active_directory_security_service(self): - self.mock_object( - self.client, '_get_unique_svm_by_name', - mock.Mock(return_value=fake.FAKE_UUID)) - fake_response = copy.deepcopy(fake.LOCAL_USERS_CIFS_RESPONSE) - svm_uuid = copy.deepcopy(fake.FAKE_UUID) - sid = copy.deepcopy(fake.LOCAL_USERS_CIFS_RESPONSE.get('sid')) - self.mock_object(self.client, 'send_request', - mock.Mock(return_value=fake_response)) - self.client.send_request.has_calls([ - mock.call(f'/protocols/cifs/local-users/{svm_uuid}/', 'get'), - mock.call(f'/protocols/cifs/local-users/{svm_uuid}/{sid}', - 'patch') - ]) - def test_remove_preferred_dcs(self): svm_uuid = copy.deepcopy(fake.FAKE_UUID) fqdn = copy.deepcopy(fake.PREFERRED_DC_REST.get('fqdn')) @@ -4724,27 +4754,54 @@ class NetAppRestCmodeClientTestCase(test.TestCase): fake.IPSPACE_NAME) def test__add_port_to_broadcast_domain(self): - - fake_qualified_port_name = fake.NODE_NAME + ':' + fake.PORT - - fake_query = { - 'ipspace': fake.IPSPACE_NAME + query = { + 'name': fake.PORT, + 'node.name': fake.NODE_NAME, } - fake_body = { - 'ipspace': fake.IPSPACE_NAME, - 'broadcast-domain': fake.BROADCAST_DOMAIN, - 'ports': fake_qualified_port_name, + body = { + 'broadcast_domain.ipspace.name': fake.IPSPACE_NAME, + 'broadcast_domain.name': fake.BROADCAST_DOMAIN, } self.mock_object(self.client, 'send_request') + self.client._add_port_to_broadcast_domain(fake.NODE_NAME, + fake.PORT, + fake.BROADCAST_DOMAIN, + fake.IPSPACE_NAME) + + self.client.send_request.assert_called_once_with( + '/network/ethernet/ports/', 'patch', query=query, body=body) + + def test__add_port_to_broadcast_domain_exists(self): + query = { + 'name': fake.PORT, + 'node.name': fake.NODE_NAME, + } + body = { + 'broadcast_domain.ipspace.name': fake.IPSPACE_NAME, + 'broadcast_domain.name': fake.BROADCAST_DOMAIN, + } + self.mock_object( + self.client, 'send_request', self._mock_api_error( + code=netapp_api.EREST_FAIL_ADD_PORT_BROADCAST)) self.client._add_port_to_broadcast_domain(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) - self.client.send_request('/network/ethernet/ports/', 'patch', - query=fake_query, body=fake_body) + self.client.send_request.assert_called_once_with( + '/network/ethernet/ports/', 'patch', query=query, body=body) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + def test__add_port_to_broadcast_domain_exception(self): + self.mock_object(self.client, 'send_request', + self._mock_api_error()) + self.assertRaises( + exception.NetAppException, + self.client._add_port_to_broadcast_domain, + fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, + fake.IPSPACE_NAME) def test_rename_vserver(self): svm_uuid = fake.SVM_ITEM_SIMPLE_RESPONSE_REST["uuid"] @@ -4920,8 +4977,8 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'udp-max-xfer-size'], fake.VSERVER_NAME) expected = { - 'tcp-max-xfer-size': 65536, - 'udp-max-xfer-size': 32768, + 'tcp-max-xfer-size': '65536', + 'udp-max-xfer-size': '32768', } self.assertEqual(expected, result) @@ -4948,6 +5005,20 @@ class NetAppRestCmodeClientTestCase(test.TestCase): mock.call('/svm/svms', 'get', query=query)]) self.assertEqual(expected, result) + def test_get_vserver_ipspace_not_found(self): + api_response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.get_vserver_ipspace(fake.VSERVER_NAME) + self.assertIsNone(result) + + def test_get_vserver_ipspace_exception(self): + self.mock_object(self.client, 'send_request', + self._mock_api_error()) + self.assertRaises(exception.NetAppException, + self.client.get_vserver_ipspace, + fake.VSERVER_NAME) + def test_get_snapmirror_policies(self): api_response = fake.GET_SNAPMIRROR_POLICIES_REST self.mock_object(self.client, 'send_request', @@ -4982,30 +5053,49 @@ class NetAppRestCmodeClientTestCase(test.TestCase): mock.call(f'/snapmirror/policies/{uuid}', 'delete') ]) + def test_delete_snapmirror_policy_exception(self): + api_response = fake.GET_SNAPMIRROR_POLICIES_REST + api_error = netapp_api.api.NaApiError() + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=[api_response, api_error])) + self.assertRaises(netapp_api.api.NaApiError, + self.client.delete_snapmirror_policy, + 'fake_policy') + + def test_delete_snapmirror_policy_no_records(self): + api_response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=api_response)) + + self.client.delete_snapmirror_policy('fake_policy') + + query = {} + query['name'] = 'fake_policy' + query['fields'] = 'uuid,name' + self.client.send_request.assert_called_once_with( + '/snapmirror/policies', 'get', query=query) + def test_delete_vserver_one_volume(self): - self.mock_object(self.client, - 'get_vserver_info', + self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) - self.mock_object(self.client, - '_get_unique_svm_by_name', + self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) - self.mock_object(self.client, - 'get_vserver_root_volume_name', + self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) - self.mock_object(self.client, - 'get_vserver_volume_count', + self.mock_object(self.client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'offline_volume') self.mock_object(self.client, 'delete_volume') + self.mock_object(self.client, '_terminate_vserver_services') - self.client.delete_vserver(fake.VSERVER_NAME, - self.client) + self.client.delete_vserver(fake.VSERVER_NAME, self.client, + fake.CIFS_SECURITY_SERVICE) - self.client.offline_volume.assert_called_with( - fake.ROOT_VOLUME_NAME) - self.client.delete_volume.assert_called_with( - fake.ROOT_VOLUME_NAME) + self.client.offline_volume.assert_called_with(fake.ROOT_VOLUME_NAME) + self.client.delete_volume.assert_called_with(fake.ROOT_VOLUME_NAME) + self.client._terminate_vserver_services( + fake.VSERVER_NAME, self.client, fake.CIFS_SECURITY_SERVICE) svm_uuid = fake.FAKE_UUID self.client.send_request.assert_has_calls([ @@ -5115,26 +5205,31 @@ class NetAppRestCmodeClientTestCase(test.TestCase): mock_request.assert_called_once_with( '/storage/volumes', 'get', query=query) - def test_terminate_vserver_services(self): + def test__terminate_vserver_services(self): fake_uuid = fake.FAKE_UUID self.mock_object(self.client, 'send_request') + self.mock_object(self.client, 'disable_kerberos') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) - security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) - self.client._terminate_vserver_services(fake.VSERVER_NAME, - self.client, - [security_service]) + security_services = [ + copy.deepcopy(fake.CIFS_SECURITY_SERVICE), + copy.deepcopy(fake.KERBEROS_SECURITY_SERVICE) + ] + self.client._terminate_vserver_services( + fake.VSERVER_NAME, self.client, security_services) cifs_server_delete_body = { - 'ad_domain.password': security_service['password'], - 'ad_domain.user': security_service['user'], + 'ad_domain.password': security_services[0]['password'], + 'ad_domain.user': security_services[0]['user'], } self.client.send_request.assert_called_once_with( f'/protocols/cifs/services/{fake_uuid}', 'delete', body=cifs_server_delete_body) + self.client.disable_kerberos.assert_called_once_with( + security_services[1]) def test_terminate_vserver_services_cifs_not_found(self): @@ -5194,13 +5289,14 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.assertEqual(0, client_cmode_rest.LOG.error.call_count) def test_disable_kerberos(self): - fake_api_response = fake.NFS_LIFS_REST - fake_uuid = fake.FAKE_UUID - - self.mock_object(self.client, 'send_request') + api_error = self._mock_api_error( + code=netapp_api.EREST_KERBEROS_IS_ENABLED_DISABLED) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=fake_api_response)) + self.mock_object( + self.client, 'send_request', + mock.Mock(side_effect=[None, api_error, None])) self.client.disable_kerberos(fake.KERBEROS_SECURITY_SERVICE) @@ -5211,9 +5307,14 @@ class NetAppRestCmodeClientTestCase(test.TestCase): 'enabled': False, } - self.client.send_request.assert_called_once_with( - f'/protocols/nfs/kerberos/interfaces/{fake_uuid}', - 'patch', body=kerberos_config_modify_body) + self.client.send_request.assert_has_calls([ + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_1', + 'patch', body=kerberos_config_modify_body), + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_2', + 'patch', body=kerberos_config_modify_body), + mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_3', + 'patch', body=kerberos_config_modify_body) + ]) self.client.get_network_interfaces.assert_called_once() def test_get_vserver_root_volume_name(self): @@ -5275,6 +5376,23 @@ class NetAppRestCmodeClientTestCase(test.TestCase): self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports/', 'delete', query=query)]) + def test_delete_vlan_not_found(self): + self.mock_object( + self.client, 'send_request', + self._mock_api_error(code=netapp_api.EREST_ENTRY_NOT_FOUND)) + + query = { + 'vlan.base_port.name': fake.PORT, + 'node.name': fake.NODE_NAME, + 'vlan.tag': fake.VLAN + } + + self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) + + self.client.send_request.assert_has_calls([ + mock.call('/network/ethernet/ports/', 'delete', query=query)]) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + def test_delete_vlan_still_used(self): self.mock_object( self.client, 'send_request', @@ -5487,6 +5605,13 @@ class NetAppRestCmodeClientTestCase(test.TestCase): result = self.client.get_ipspaces(fake.IPSPACE_NAME) self.assertEqual(expected, result) + def test_get_ipspaces_no_records(self): + api_response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.get_ipspaces(fake.IPSPACE_NAME) + self.assertEqual([], result) + def test_delete_port_and_broadcast_domains_for_ipspace_not_found(self): self.mock_object(self.client, @@ -5515,3 +5640,1110 @@ class NetAppRestCmodeClientTestCase(test.TestCase): fake.IPSPACE_NAME) self.client._delete_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACES[0]['broadcast-domains'][0], fake.IPSPACES[0]) + + @ddt.data(('10.10.10.0/24', '10.10.10.1', False), + ('fc00::/7', 'fe80::1', False), + ('0.0.0.0/0', '10.10.10.1', True), + ('::/0', 'fe80::1', True)) + @ddt.unpack + def test_create_route(self, subnet, gateway, omit_destination): + + address = None + netmask = None + destination = None if omit_destination else subnet + if not destination: + if ':' in gateway: + destination = '::/0' + else: + destination = '0.0.0.0/0' + + if '/' in destination: + address, netmask = destination.split('/') + else: + address = destination + + body = { + 'destination.address': address, + 'gateway': gateway, + } + + if netmask: + body['destination.netmask'] = netmask + + self.mock_object(self.client, 'send_request') + + self.client.create_route(gateway, destination=destination) + + self.client.send_request.assert_called_once_with( + '/network/ip/routes', 'post', body=body) + + def test_create_route_duplicate(self): + self.mock_object(client_cmode_rest.LOG, 'debug') + self.mock_object( + self.client, 'send_request', + self._mock_api_error(code=netapp_api.EREST_DUPLICATE_ROUTE)) + + self.client.create_route(fake.GATEWAY, destination=fake.SUBNET) + + body = { + 'destination.address': fake.SUBNET[:-3], + 'gateway': fake.GATEWAY, + 'destination.netmask': fake.SUBNET[-2:], + } + self.client.send_request.assert_called_once_with( + '/network/ip/routes', 'post', body=body) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + def test_create_route_api_error(self): + self.mock_object(client_cmode_rest.LOG, 'debug') + self.mock_object(self.client, 'send_request', self._mock_api_error()) + + body = { + 'destination.address': fake.SUBNET[:-3], + 'gateway': fake.GATEWAY, + 'destination.netmask': fake.SUBNET[-2:], + } + self.assertRaises(exception.NetAppException, + self.client.create_route, + fake.GATEWAY, destination=fake.SUBNET) + self.client.send_request.assert_called_once_with( + '/network/ip/routes', 'post', body=body) + + def test_create_route_without_gateway(self): + self.mock_object(self.client, 'send_request') + self.client.create_route(None, destination=fake.SUBNET) + self.assertFalse(self.client.send_request.called) + + def test_network_interface_exists(self): + api_response = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.network_interface_exists( + fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, + fake.NETMASK, fake.VLAN) + query = { + 'ip.address': fake.IP_ADDRESS, + 'location.home_node.name': fake.NODE_NAME, + 'location.home_port.name': f'{fake.PORT}-{fake.VLAN}', + 'ip.netmask': fake.NETMASK, + 'svm.name': fake.VSERVER_NAME, + 'fields': 'name', + } + self.client.send_request.assert_called_once_with( + '/network/ip/interfaces', 'get', query=query) + self.assertTrue(result) + + def test_modify_active_directory_security_service(self): + svm_uuid = fake.FAKE_UUID + user_records = fake.FAKE_CIFS_LOCAL_USER.get('records')[0] + sid = user_records.get('sid') + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=svm_uuid)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=[user_records, + None, None])) + self.mock_object(self.client, 'remove_preferred_dcs') + self.mock_object(self.client, 'set_preferred_dc') + new_security_service = { + 'user': 'new_user', + 'password': 'new_password', + 'server': 'fake_server' + } + + current_security_service = { + 'server': 'fake_current_server' + } + keys = {'user', 'password', 'server'} + + self.client.modify_active_directory_security_service( + fake.VSERVER_NAME, keys, new_security_service, + current_security_service) + + self.client.send_request.assert_has_calls([ + mock.call(f'/protocols/cifs/local-users/{svm_uuid}', 'get'), + mock.call(f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', + query={'password': new_security_service['password']}), + mock.call(f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', + query={'name': new_security_service['user']}) + ]) + + def test__create_vserver(self): + mock_sr = self.mock_object(self.client, 'send_request') + body = { + 'name': fake.VSERVER_NAME, + 'nsswitch.namemap': fake.FAKE_SERVER_SWITCH_NAME, + 'subtype': fake.FAKE_SUBTYPE, + 'ipspace.name': fake.IPSPACE_NAME, + 'aggregates': [{ + 'name': fake.SHARE_AGGREGATE_NAME + }] + } + + self.client._create_vserver(fake.VSERVER_NAME, + [fake.SHARE_AGGREGATE_NAME], + fake.IPSPACE_NAME, + fake.FAKE_SERVER_SWITCH_NAME, + fake.FAKE_SUBTYPE) + + mock_sr.assert_called_once_with('/svm/svms', 'post', body=body) + + @ddt.data((f'/name-services/dns/{fake.FAKE_UUID}', 'patch', + ['fake_domain'], ['fake_ip']), + (f'/name-services/dns/{fake.FAKE_UUID}', 'delete', [], []), + ('/name-services/dns', 'post', ['fake_domain'], ['fake_ip'])) + @ddt.unpack + def test_update_dns_configuration_all_operations(self, endpoint, + operation, domains, ips): + return_value = fake.FAKE_DNS_CONFIG if operation != 'post' else {} + self.mock_object(self.client, 'get_dns_config', + mock.Mock(return_value=return_value)) + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=fake.FAKE_UUID)) + mock_sr = self.mock_object(self.client, 'send_request') + body = { + 'domains': domains, + 'servers': ips + } + empty_dns_config = (not body['domains'] and not body['servers']) + if empty_dns_config: + body = {} + self.client.update_dns_configuration(ips, domains) + mock_sr.assert_called_once_with(endpoint, operation, body) + + @ddt.data(True, False) + def test_delete_snapshot(self, ignore_owners): + volume_id = fake.VOLUME.get('uuid') + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=fake.VOLUME)) + response = fake.SNAPSHOTS_REST_RESPONSE + snapshot_id = response.get('records')[0].get('uuid') + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=True)) + query = { + 'name': fake.SNAPSHOT_NAME, + 'fields': 'uuid' + } + calls = [mock.call(f'/storage/volumes/{volume_id}/snapshots', 'get', + query=query)] + if ignore_owners: + query_cli = { + 'vserver': self.client.vserver, + 'volume': fake.VOLUME_NAMES[0], + 'snapshot': fake.SNAPSHOT_NAME, + 'ignore-owners': 'true' + } + calls.append(mock.call('/private/cli/snapshot', 'delete', + query=query_cli)) + else: + calls.append(mock.call(f'/storage/volumes/{volume_id}/' + f'snapshots/{snapshot_id}', 'delete')) + + self.client.delete_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME, + ignore_owners) + mock_sr.assert_has_calls(calls) + + def test_volume_has_luns(self): + mock_sr = self.mock_object(self.client, 'send_request') + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=True)) + result = self.client.volume_has_luns(fake.VOLUME_NAMES[0]) + query = { + 'location.volume.name': fake.VOLUME_NAMES[0], + } + mock_sr.assert_called_once_with('/storage/luns/', 'get', query=query) + self.assertTrue(result) + + @ddt.data(fake.VOLUME_JUNCTION_PATH, '') + def test_volume_has_junctioned_volumes(self, junction_path): + mock_sr = self.mock_object(self.client, 'send_request') + return_records = True if junction_path else False + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=return_records)) + result = self.client.volume_has_junctioned_volumes(junction_path) + if junction_path: + query = { + 'nas.path': junction_path + '/*', + } + + mock_sr.assert_called_once_with('/storage/volumes/', 'get', + query=query) + self.assertTrue(result) + else: + self.assertFalse(result) + + @ddt.data(fake.VOLUME_JUNCTION_PATH, '') + def test_get_volume_at_junction_path(self, junction_path): + response = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST + return_records = True if junction_path else False + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=return_records)) + query = { + 'nas.path': junction_path, + 'fields': 'name' + } + + result = self.client.get_volume_at_junction_path(junction_path) + expected = { + 'name': response.get('records')[0].get('name') + } + + if junction_path: + mock_sr.assert_called_once_with('/storage/volumes/', 'get', + query=query) + self.assertEqual(expected, result) + else: + self.assertIsNone(result) + + def test_get_aggregate_for_volume(self): + response = fake.FAKE_SVM_AGGREGATES.get('records')[0] + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + result = self.client.get_aggregate_for_volume(fake.VOLUME_NAMES[0]) + expected = fake.SHARE_AGGREGATE_NAMES_LIST + query = { + 'name': fake.VOLUME_NAMES[0], + 'fields': 'aggregates' + } + mock_sr.assert_called_once_with('/storage/volumes/', 'get', + query=query) + self.assertEqual(expected, result) + + def test_get_volume_to_manage(self): + response = fake.FAKE_VOLUME_MANAGE + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=True)) + expected = { + 'aggregate': fake.SHARE_AGGREGATE_NAME, + 'aggr-list': [], + 'junction-path': fake.VOLUME_JUNCTION_PATH, + 'name': fake.VOLUME_NAMES[0], + 'type': 'fake_type', + 'style': 'flex', + 'owning-vserver-name': fake.VSERVER_NAME, + 'size': fake.SHARE_SIZE, + 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME + } + + result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME, + fake.VOLUME_NAMES[0]) + query = { + 'name': fake.VOLUME_NAMES[0], + 'fields': 'name,aggregates.name,nas.path,name,type,style,' + 'svm.name,qos.policy.name,space.size', + 'aggregates.name': fake.SHARE_AGGREGATE_NAME + } + mock_sr.assert_called_once_with('/storage/volumes', 'get', + query=query) + self.assertEqual(expected, result) + + def test_get_cifs_share_access(self): + response = fake.FAKE_CIFS_RECORDS + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + query = { + 'name': fake.SHARE_NAME + } + query_acls = { + 'fields': 'user_or_group,permission' + } + expected = { + 'Everyone': 'full_control', + 'root': 'no_access' + } + result = self.client.get_cifs_share_access(fake.SHARE_NAME) + svm_uuid = response.get('records')[0].get('svm').get('uuid') + mock_sr.assert_has_calls([ + mock.call('/protocols/cifs/shares', 'get', query=query), + mock.call(f'/protocols/cifs/shares/{svm_uuid}/{fake.SHARE_NAME}/' + 'acls', 'get', query=query_acls) + ]) + self.assertEqual(expected, result) + + @ddt.data((netapp_api.EREST_LICENSE_NOT_INSTALLED, False), + (netapp_api.EREST_SNAPSHOT_NOT_SPECIFIED, True)) + @ddt.unpack + def test_check_snaprestore_license(self, code, expected): + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + result = self.client.check_snaprestore_license() + self.assertEqual(expected, result) + body = { + 'restore_to.snapshot.name': '' + } + query = { + 'name': '*' + } + self.client.send_request.assert_called_once_with('/storage/volumes', + 'patch', + body=body, + query=query) + + def test_check_snaprestore_license_error(self): + self.mock_object(self.client, 'send_request') + self.assertRaises(exception.NetAppException, + self.client.check_snaprestore_license) + + def test__sort_data_ports_by_speed(self): + ports = fake.FAKE_PORTS + result = self.client._sort_data_ports_by_speed(ports) + expected = [{'speed': '4'}, + {'speed': 'auto'}, + {'speed': 'undef'}, + {'speed': 'fake_speed'}, + {'speed': ''}] + self.assertEqual(expected, result) + + def test_create_port_and_broadcast_domain(self): + self.mock_object(self.client, '_create_vlan') + self.mock_object(self.client, '_ensure_broadcast_domain_for_port') + res = self.client.create_port_and_broadcast_domain(fake.NODE_NAME, + fake.PORT, + fake.VLAN, + fake.MTU, + fake.IPSPACE_NAME) + expected = f'{fake.PORT}-{fake.VLAN}' + self.assertEqual(expected, res) + + @ddt.data(netapp_api.EREST_DUPLICATE_ENTRY, None) + def test__create_vlan(self, code): + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + if not(code): + self.assertRaises(exception.NetAppException, + self.client._create_vlan, + fake.NODE_NAME, + fake.PORT, + fake.VLAN) + + else: + self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) + body = { + 'vlan.base_port.name': fake.PORT, + 'node.name': fake.NODE_NAME, + 'vlan.tag': fake.VLAN, + 'type': 'vlan' + } + self.client.send_request.assert_called_once_with( + '/network/ethernet/ports', 'post', body=body) + + @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) + def test_delete_fpolicy_event_error_not_found(self, code): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + if not(code): + self.assertRaises(exception.NetAppException, + self.client.delete_fpolicy_event, + fake.SHARE_NAME, 'fake_event') + else: + self.client.delete_fpolicy_event(fake.SHARE_NAME, 'fake_event') + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) + def test_delete_fpolicy_policy_request_error(self, code): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + if not(code): + self.assertRaises(exception.NetAppException, + self.client.delete_fpolicy_policy, + fake.SHARE_NAME, 'fake_policy') + else: + self.client.delete_fpolicy_policy(fake.SHARE_NAME, 'fake_policy') + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + def test_modify_fpolicy_scope(self): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + svm_uuid = volume['svm']['uuid'] + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + mock_sr = self.mock_object(self.client, 'send_request') + body = { + 'name': fake.FPOLICY_POLICY_NAME, + 'scope.include_shares': fake.SHARE_NAME, + 'scope.include_extension': 'fake_extension', + 'scope.exclude_extension': 'fake_extension' + } + self.client.modify_fpolicy_scope(fake.SHARE_NAME, + fake.FPOLICY_POLICY_NAME, + [fake.SHARE_NAME], + ['fake_extension'], + ['fake_extension']) + mock_sr.assert_called_once_with(f'/protocols/fpolicy/{svm_uuid}/' + 'policies/', 'patch', body=body) + + def test_remove_cifs_share(self): + response = fake.SVMS_LIST_SIMPLE_RESPONSE_REST + svm_id = response.get('records')[0]['uuid'] + mock_sr = self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.client.remove_cifs_share(fake.SHARE_NAME) + query = { + 'name': self.client.vserver, + 'fields': 'uuid' + } + mock_sr.assert_has_calls([ + mock.call('/svm/svms', 'get', query=query), + mock.call(f'/protocols/cifs/shares/{svm_id}' + f'/{fake.SHARE_NAME}', 'delete')]) + + def test_qos_policy_group_get_error(self): + code = netapp_api.EREST_NOT_AUTHORIZED + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + self.assertRaises(exception.NetAppException, + self.client.qos_policy_group_get, + fake.QOS_POLICY_GROUP_NAME) + + def test_qos_policy_group_get_not_found(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.qos_policy_group_get, + fake.QOS_POLICY_GROUP_NAME) + + def test_remove_unused_qos_policy_groups_error(self): + res_list = [fake.QOS_POLICY_GROUP_REST, netapp_api.api.NaApiError] + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=res_list)) + self.client.remove_unused_qos_policy_groups() + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + def test_mount_volume_error(self): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + code = netapp_api.EREST_SNAPMIRROR_INITIALIZING + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + self.assertRaises(netapp_api.api.NaApiError, + self.client.mount_volume, + fake.VOLUME_NAMES[0]) + + def test_get_aggregate_for_volume_empty(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.get_aggregate_for_volume, + fake.VOLUME_NAMES[0]) + + def test_get_nfs_export_policy_for_volume_empty(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=False)) + self.assertRaises(exception.NetAppException, + self.client.get_nfs_export_policy_for_volume, + fake.VOLUME_NAMES[0]) + + def test_get_unique_export_policy_id_empty(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=False)) + self.assertRaises(exception.NetAppException, + self.client.get_unique_export_policy_id, + fake.FPOLICY_POLICY_NAME) + + def test__remove_nfs_export_rules_error(self): + self.mock_object(self.client, 'get_unique_export_policy_id', + mock.Mock(return_value=fake.FAKE_UUID)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(netapp_api.api.NaApiError, + self.client._remove_nfs_export_rules, + fake.FPOLICY_POLICY_NAME, + [1]) + + def test_get_volume_move_status_error(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=False)) + self.assertRaises(exception.NetAppException, + self.client.get_volume_move_status, + fake.VOLUME_NAMES[0], + fake.VSERVER_NAME) + + def test__set_snapmirror_state_error(self): + self.mock_object(self.client, 'get_snapmirrors', + mock.Mock(return_value=[])) + self.assertRaises(netapp_utils.NetAppDriverException, + self.client._set_snapmirror_state, + 'fake_state', 'fake_source_path', 'fake_dest_path', + 'fake_source_vserver', 'fake_source_volume', + 'fake_dest_vserver', 'fake_dest_volume') + + def test__break_snapmirror_error(self): + fake_snapmirror = fake.REST_GET_SNAPMIRRORS_RESPONSE + self.mock_object(self.client, '_get_snapmirrors', + mock.Mock(return_value=fake_snapmirror)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(netapp_api.api.NaApiError, + self.client._break_snapmirror) + + def test__resync_snapmirror_no_parameter(self): + mock_snap = self.mock_object(self.client, '_resume_snapmirror') + self.client._resync_snapmirror() + mock_snap.assert_called_once_with(None, None, None, None, None, None) + + def test_add_nfs_export_rule_with_rule_created(self): + self.mock_object(self.client, '_get_nfs_export_rule_indices', + mock.Mock(return_value=[1])) + update = self.mock_object(self.client, '_update_nfs_export_rule') + remove = self.mock_object(self.client, '_remove_nfs_export_rules') + self.client.add_nfs_export_rule(fake.FPOLICY_POLICY_NAME, + 'fake_client', + True, + 'fake_auth') + update.assert_called_once_with(fake.FPOLICY_POLICY_NAME, + 'fake_client', True, 1, 'fake_auth') + remove.assert_called_once_with(fake.FPOLICY_POLICY_NAME, []) + + def test__update_snapmirror_no_snapmirrors(self): + self.mock_object(self.client, '_get_snapmirrors', + mock.Mock(return_value=[])) + self.assertRaises(netapp_utils.NetAppDriverException, + self.client._update_snapmirror) + + @ddt.data((netapp_api.EREST_SNAPMIRROR_NOT_INITIALIZED, + 'Another transfer is in progress'), + (None, 'fake')) + @ddt.unpack + def test__update_snapmirror_error(self, code, message): + snapmirrors = fake.REST_GET_SNAPMIRRORS_RESPONSE + self.mock_object(self.client, '_get_snapmirrors', + mock.Mock(return_value=snapmirrors)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code, + message))) + self.assertRaises(netapp_api.api.NaApiError, + self.client._update_snapmirror) + + @ddt.data(netapp_api.EREST_DUPLICATE_ENTRY, None) + def test_create_kerberos_realm_error(self, code): + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error(code))) + if code: + self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + else: + self.assertRaises(exception.NetAppException, + self.client.create_kerberos_realm, + fake.KERBEROS_SECURITY_SERVICE) + + def test_configure_kerberos_error(self): + self.mock_object(self.client, 'configure_dns') + self.mock_object(self.client, '_get_kerberos_service_principal_name') + self.mock_object(self.client, 'get_network_interfaces', + mock.Mock(return_value=[])) + self.assertRaises(exception.NetAppException, + self.client.configure_kerberos, + fake.KERBEROS_SECURITY_SERVICE, + fake.VSERVER_NAME) + + def test_configure_ldap(self): + mock_ldap = self.mock_object(self.client, '_create_ldap_client') + self.client.configure_ldap(fake.LDAP_AD_SECURITY_SERVICE, 30, + fake.VSERVER_NAME) + mock_ldap.assert_called_once_with(fake.LDAP_AD_SECURITY_SERVICE, + vserver_name=fake.VSERVER_NAME) + + def test_configure_active_directory_error(self): + self.mock_object(self.client, 'configure_dns') + self.mock_object(self.client, 'set_preferred_dc') + self.mock_object(self.client, '_get_cifs_server_name') + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(exception.NetAppException, + self.client.configure_active_directory, + fake.LDAP_AD_SECURITY_SERVICE, + fake.VSERVER_NAME) + + def test__get_unique_svm_by_name_error(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client._get_unique_svm_by_name, + fake.VSERVER_NAME) + + def test_get_ontap_version_scoped(self): + self.client.get_ontap_version = self.original_get_ontap_version + e = netapp_api.api.NaApiError(code=netapp_api.EREST_NOT_AUTHORIZED) + res_list = [e, fake.GET_VERSION_RESPONSE_REST] + version = fake.GET_VERSION_RESPONSE_REST['records'][0]['version'] + expected = { + 'version': version['full'], + 'version-tuple': (9, 11, 1) + } + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=res_list)) + result = self.client.get_ontap_version(self=self.client, cached=False) + self.assertEqual(expected, result) + + def test_get_licenses_error(self): + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(netapp_api.api.NaApiError, + self.client.get_licenses) + + def test__get_volume_by_args_error(self): + res = fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=res)) + self.assertRaises(exception.NetAppException, + self.client._get_volume_by_args, + is_root=True) + + def test_get_aggregate_no_name(self): + expected = {} + result = self.client.get_aggregate('') + self.assertEqual(expected, result) + + def test_get_aggregate_error(self): + self.mock_object(self.client, '_get_aggregates', + mock.Mock(side_effect=self._mock_api_error())) + result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) + expected = {} + self.assertEqual(expected, result) + + def test_get_node_for_aggregate_no_name(self): + result = self.client.get_node_for_aggregate('') + self.assertIsNone(result) + + @ddt.data(netapp_api.EREST_NOT_AUTHORIZED, None) + def test_get_node_for_aggregate_error(self, code): + self.mock_object(self.client, '_get_aggregates', + mock.Mock(side_effect=self._mock_api_error(code))) + if code: + r = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) + self.assertIsNone(r) + else: + self.assertRaises(netapp_api.api.NaApiError, + self.client.get_node_for_aggregate, + fake.SHARE_AGGREGATE_NAME) + + def test_get_vserver_aggregate_capabilities_no_response(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.get_vserver_aggregate_capacities, + fake.SHARE_AGGREGATE_NAME) + + def test_get_vserver_aggregate_capacities_no_aggregate(self): + response = fake.FAKE_AGGREGATES_RESPONSE + share_name = fake.SHARE_AGGREGATE_NAME + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=response)) + res = self.client.get_vserver_aggregate_capacities(share_name) + expected = {} + self.assertEqual(expected, res) + + def test_rename_nfs_export_policy_error(self): + self.mock_object(self.client, 'send_request') + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=False)) + self.assertRaises(exception.NetAppException, + self.client.rename_nfs_export_policy, + 'fake_policy_name', + 'fake_new_policy_name') + + @ddt.data((False, exception.StorageResourceNotFound), + (True, exception.NetAppException)) + @ddt.unpack + def test_get_volume_error(self, records, exception): + res = copy.deepcopy(fake.FAKE_VOLUME_MANAGE) + res['num_records'] = 2 + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=res)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=records)) + self.assertRaises(exception, + self.client.get_volume, + fake.VOLUME_NAMES[0]) + + def test_get_volume_no_aggregate(self): + res = copy.deepcopy(fake.FAKE_VOLUME_MANAGE) + res.get('records')[0]['aggregates'] = [] + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=res)) + fake_volume = res.get('records', [])[0] + + expected = { + 'aggregate': '', + 'aggr-list': [], + 'junction-path': fake_volume.get('nas', {}).get('path', ''), + 'name': fake_volume.get('name', ''), + 'owning-vserver-name': fake_volume.get('svm', {}).get('name', ''), + 'type': fake_volume.get('type', ''), + 'style': fake_volume.get('style', ''), + 'size': fake_volume.get('space', {}).get('size', ''), + 'qos-policy-group-name': fake_volume.get('qos', {}) + .get('policy', {}) + .get('name', ''), + 'style-extended': fake_volume.get('style', '') + } + result = self.client.get_volume(fake.VOLUME_NAMES[0]) + self.assertEqual(expected, result) + + def test_get_job_state_error(self): + response = { + 'records': [fake.JOB_SUCCESSFUL_REST, + fake.JOB_SUCCESSFUL_REST] + } + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=True)) + self.assertRaises(exception.NetAppException, + self.client.get_job_state, + fake.JOB_ID) + + def test_get_volume_efficiency_status_error(self): + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.client.get_volume_efficiency_status(fake.VOLUME_NAMES[0]) + self.assertEqual(1, client_cmode_rest.LOG.error.call_count) + + def test_get_fpolicy_scopes_not_found(self): + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + result = self.client.get_fpolicy_scopes(fake.SHARE_NAME) + expected = [] + self.assertEqual(expected, result) + + def test_delete_fpolicy_policy_error(self): + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + self.mock_object(self.client, 'send_request') + res = self.client.delete_fpolicy_policy(fake.SHARE_NAME, + fake.FPOLICY_POLICY_NAME) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + self.assertIsNone(res) + + def test_delete_fpolicy_event_error(self): + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + self.mock_object(self.client, 'send_request') + res = self.client.delete_fpolicy_event(fake.SHARE_NAME, + fake.FPOLICY_EVENT_NAME) + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + self.assertIsNone(res) + + def test_delete_nfs_export_policy_no_records(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + res = self.client.delete_nfs_export_policy(fake.FPOLICY_POLICY_NAME) + self.assertIsNone(res) + + def test_remove_cifs_share_not_found(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.remove_cifs_share, + fake.SHARE_NAME) + + @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) + def test_remove_cifs_share_error(self, code): + responses = [fake.SVMS_LIST_SIMPLE_RESPONSE_REST, + netapp_api.api.NaApiError(code=code)] + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=responses)) + if not(code): + self.assertRaises(netapp_api.api.NaApiError, + self.client.remove_cifs_share, + fake.SHARE_NAME) + else: + result = self.client.remove_cifs_share(fake.SHARE_NAME) + self.assertIsNone(result) + + def test_qos_policy_group_does_not_exists(self): + self.mock_object(self.client, 'qos_policy_group_get', + mock.Mock(side_effect=exception.NetAppException)) + result = self.client.qos_policy_group_exists(fake.QOS_POLICY_GROUP) + self.assertFalse(result) + + def test_qos_policy_group_rename_error(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.qos_policy_group_rename, + fake.QOS_POLICY_GROUP_NAME, + 'fake_new_qos_policy_group_name') + + def test_qos_policy_group_rename_same_name(self): + res = self.client.qos_policy_group_rename(fake.QOS_POLICY_GROUP_NAME, + fake.QOS_POLICY_GROUP_NAME) + self.assertIsNone(res) + + def test_qos_policy_group_modify_error(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client.qos_policy_group_modify, + fake.QOS_POLICY_GROUP_NAME, + fake.QOS_MAX_THROUGHPUT) + + def test_update_kerberos_realm_error(self): + self.mock_object(self.client, + '_get_unique_svm_by_name', + mock.Mock(return_value=fake.FAKE_UUID)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(exception.NetAppException, + self.client.update_kerberos_realm, + fake.KERBEROS_SECURITY_SERVICE) + + @ddt.data(('fake_domain', 'fake_server'), (None, None)) + @ddt.unpack + def test_modify_ldap_error(self, domain, server): + security_service = { + 'domain': domain, + 'server': server, + 'user': 'fake_user', + 'ou': 'fake_ou', + 'dns_ip': 'fake_ip', + 'password': 'fake_password' + } + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=fake.FAKE_UUID)) + self.mock_object(self.client, 'send_request') + self.assertRaises(exception.NetAppException, + self.client.modify_ldap, + security_service, + fake.LDAP_AD_SECURITY_SERVICE) + + def test_update_dns_configuration_error(self): + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=fake.FAKE_UUID)) + dns_config = { + 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], + 'dns-ips': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']], + } + self.mock_object(self.client, 'get_dns_config', + mock.Mock(return_value=dns_config)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(exception.NetAppException, + self.client.update_dns_configuration, + ['fake_ips'], ['fake_domain']) + + def test_remove_preferred_dcs_error(self): + fake_response = [fake.PREFERRED_DC_REST, + netapp_api.api.NaApiError] + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=fake_response)) + self.assertRaises(exception.NetAppException, + self.client.remove_preferred_dcs, + fake.LDAP_AD_SECURITY_SERVICE, + fake.FAKE_UUID) + + def test_set_preferred_dc_error(self): + security = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE) + security['server'] = 'fake_server' + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=fake.FAKE_UUID)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=self._mock_api_error())) + self.assertRaises(exception.NetAppException, + self.client.set_preferred_dc, + security, + fake.VSERVER_NAME) + + def test_set_preferred_dc_no_server(self): + result = self.client.set_preferred_dc(fake.LDAP_AD_SECURITY_SERVICE, + fake.VSERVER_NAME) + self.assertIsNone(result) + + def test__get_svm_peer_uuid_error(self): + response = fake.NO_RECORDS_RESPONSE_REST + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=response)) + self.assertRaises(exception.NetAppException, + self.client._get_svm_peer_uuid, + fake.VSERVER_NAME, + fake.VSERVER_PEER_NAME) + + def test_create_vserver_dp_destination(self): + mock_vserver = self.mock_object(self.client, '_create_vserver') + self.client.create_vserver_dp_destination(fake.VSERVER_NAME, + fake.FAKE_AGGR_LIST, + fake.IPSPACE_NAME) + mock_vserver.assert_called_once_with(fake.VSERVER_NAME, + fake.FAKE_AGGR_LIST, + fake.IPSPACE_NAME, + subtype='dp_destination') + + @ddt.data(':', '.') + def test_create_route_no_destination(self, gateway): + mock_sr = self.mock_object(self.client, 'send_request') + body = { + 'gateway': gateway, + 'destination.address': '::' if ":" in gateway else '0.0.0.0', + 'destination.netmask': '0' + } + self.client.create_route(gateway) + mock_sr.assert_called_once_with('/network/ip/routes', 'post', + body=body) + + def test_list_root_aggregates(self): + return_value = fake.FAKE_ROOT_AGGREGATES_RESPONSE + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=return_value)) + + result = self.client.list_root_aggregates() + + expected = [fake.SHARE_AGGREGATE_NAME] + self.assertEqual(expected, result) + + @ddt.data(("fake_server", "fake_domain"), (None, None)) + @ddt.unpack + def test__create_ldap_client_error(self, server, domain): + security_service = { + 'server': server, + 'domain': domain, + 'user': 'fake_user', + 'ou': 'fake_ou', + 'dns_ip': 'fake_ip', + 'password': 'fake_password' + } + + self.assertRaises(exception.NetAppException, + self.client._create_ldap_client, + security_service) + + @ddt.data(["password"], ["user"]) + def test__modify_active_directory_security_service_error(self, keys): + svm_uuid = fake.FAKE_UUID + user_records = fake.FAKE_CIFS_LOCAL_USER.get('records')[0] + self.mock_object(self.client, '_get_unique_svm_by_name', + mock.Mock(return_value=svm_uuid)) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=[user_records, + netapp_api.api.NaApiError])) + self.mock_object(self.client, 'remove_preferred_dcs') + self.mock_object(self.client, 'set_preferred_dc') + new_security_service = { + 'user': 'new_user', + 'password': 'new_password', + 'server': 'fake_server' + } + + current_security_service = { + 'server': 'fake_current_server' + } + + self.assertRaises( + exception.NetAppException, + self.client.modify_active_directory_security_service, + fake.VSERVER_NAME, + keys, + new_security_service, + current_security_service) + + def test_disable_kerberos_error(self): + fake_api_response = fake.NFS_LIFS_REST + api_error = self._mock_api_error() + self.mock_object(self.client, 'get_network_interfaces', + mock.Mock(return_value=fake_api_response)) + self.mock_object( + self.client, 'send_request', + mock.Mock(side_effect=api_error)) + + self.assertRaises(exception.NetAppException, + self.client.disable_kerberos, + fake.LDAP_AD_SECURITY_SERVICE) + + def test_set_volume_snapdir_access_exception(self): + fake_hide_snapdir = 'fake-snapdir' + + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + self.assertRaises(exception.SnapshotResourceNotFound, + self.client.set_volume_snapdir_access, + fake.VOLUME_NAMES[0], + fake_hide_snapdir) + + def test__get_broadcast_domain_for_port_exception(self): + fake_response_empty = { + "records": [{}] + } + self.mock_object(self.client, 'send_request', mock.Mock( + return_value=fake_response_empty)) + + self.assertRaises(exception.NetAppException, + self.client._get_broadcast_domain_for_port, + fake.NODE_NAME, + fake.PORT) + + def test__configure_nfs_exception(self): + fake_nfs = { + 'udp-max-xfer-size': 10000, + 'tcp-max-xfer-size': 10000, + } + self.assertRaises(exception.NetAppException, + self.client._configure_nfs, + fake_nfs, + fake.FAKE_UUID) + + def test_get_snapshot_exception(self): + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + self.assertRaises(exception.SnapshotResourceNotFound, + self.client.get_snapshot, + fake.VOLUME_NAMES[0], + fake.SNAPSHOT_NAME) + + def test_delete_snapshot_exception(self): + self.mock_object(self.client, + '_get_volume_by_args', + mock.Mock(side_effect=exception.NetAppException)) + self.client.delete_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME, + True) + + self.assertEqual(1, client_cmode_rest.LOG.warning.call_count) + + def test_set_nfs_export_policy_for_volume_exception(self): + return_code = netapp_api.EREST_CANNOT_MODITY_OFFLINE_VOLUME + self.mock_object(self.client, + 'send_request', + mock.Mock(side_effect=self._mock_api_error( + code=return_code))) + self.client.set_nfs_export_policy_for_volume( + fake.VOLUME_NAMES[0], fake.EXPORT_POLICY_NAME) + + self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) + + def test__break_snapmirror_exception(self): + fake_snapmirror = copy.deepcopy(fake.REST_GET_SNAPMIRRORS_RESPONSE) + fake_snapmirror[0]['transferring-state'] = 'error' + + self.mock_object( + self.client, '_get_snapmirrors', + mock.Mock(return_value=fake_snapmirror)) + + self.assertRaises(netapp_utils.NetAppDriverException, + self.client._break_snapmirror)