diff --git a/manila/exception.py b/manila/exception.py
index ee9246d574..509cbc890f 100644
--- a/manila/exception.py
+++ b/manila/exception.py
@@ -779,6 +779,10 @@ class VserverNotSpecified(NetAppException):
message = _("Vserver not specified.")
+class VserverNotReady(NetAppException):
+ message = _("Vserver %(vserver)s is not ready yet.")
+
+
class EMCPowerMaxXMLAPIError(Invalid):
message = _("%(err)s")
diff --git a/manila/share/drivers/netapp/dataontap/client/api.py b/manila/share/drivers/netapp/dataontap/client/api.py
index 4c7691d540..b022a63546 100644
--- a/manila/share/drivers/netapp/dataontap/client/api.py
+++ b/manila/share/drivers/netapp/dataontap/client/api.py
@@ -45,8 +45,10 @@ EVOLNOTCLONE = '13170'
EVOLMOVE_CANNOT_MOVE_TO_CFO = '13633'
EAGGRDOESNOTEXIST = '14420'
EVOL_NOT_MOUNTED = '14716'
+EVSERVERALREADYSTARTED = '14923'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'
+EVSERVERNOTFOUND = '15698'
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605'
ERELATION_EXISTS = '17122'
ENOTRANSFER_IN_PROGRESS = '17130'
@@ -55,6 +57,7 @@ EANOTHER_OP_ACTIVE = '17131'
ERELATION_NOT_QUIESCED = '17127'
ESOURCE_IS_DIFFERENT = '17105'
EVOL_CLONE_BEING_SPLIT = '17151'
+ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS = '18815'
class NaServer(object):
diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode.py b/manila/share/drivers/netapp/dataontap/client/client_cmode.py
index 7e53f4e78c..3526d64b23 100644
--- a/manila/share/drivers/netapp/dataontap/client/client_cmode.py
+++ b/manila/share/drivers/netapp/dataontap/client/client_cmode.py
@@ -84,6 +84,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.features.add_feature('ADVANCED_DISK_PARTITIONING',
supported=ontapi_1_30)
self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontapi_1_110)
+ self.features.add_feature('SVM_DR', supported=ontapi_1_140)
self.features.add_feature('TRANSFER_LIMIT_NFS_CONFIG',
supported=ontapi_1_140)
self.features.add_feature('CIFS_DC_ADD_SKIP_CHECK',
@@ -161,15 +162,42 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
def create_vserver(self, vserver_name, root_volume_aggregate_name,
root_volume_name, aggregate_names, ipspace_name):
"""Creates new vserver and assigns aggregates."""
+ self._create_vserver(
+ vserver_name, aggregate_names, ipspace_name,
+ root_volume_name=root_volume_name,
+ root_volume_aggregate_name=root_volume_aggregate_name,
+ root_volume_security_style='unix',
+ name_server_switch='file')
+
+ @na_utils.trace
+ def create_vserver_dp_destination(self, vserver_name, aggregate_names,
+ ipspace_name):
+ """Creates new 'dp_destination' vserver and assigns aggregates."""
+ self._create_vserver(
+ vserver_name, aggregate_names, ipspace_name,
+ subtype='dp_destination')
+
+ @na_utils.trace
+ def _create_vserver(self, vserver_name, aggregate_names, ipspace_name,
+ root_volume_name=None, root_volume_aggregate_name=None,
+ root_volume_security_style=None,
+ name_server_switch=None, subtype=None):
+ """Creates new vserver and assigns aggregates."""
create_args = {
'vserver-name': vserver_name,
- 'root-volume-security-style': 'unix',
- 'root-volume-aggregate': root_volume_aggregate_name,
- 'root-volume': root_volume_name,
- 'name-server-switch': {
- 'nsswitch': 'file',
- },
}
+ if root_volume_name:
+ create_args['root-volume'] = root_volume_name
+ if root_volume_aggregate_name:
+ create_args['root-volume-aggregate'] = root_volume_aggregate_name
+ if root_volume_security_style:
+ create_args['root-volume-security-style'] = (
+ root_volume_security_style)
+ if name_server_switch:
+ create_args['name-server-switch'] = {
+ 'nsswitch': name_server_switch}
+ if subtype:
+ create_args['vserver-subtype'] = subtype
if ipspace_name:
if not self.features.IPSPACES:
@@ -187,6 +215,50 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
}
self.send_request('vserver-modify', modify_args)
+ @na_utils.trace
+ def get_vserver_info(self, vserver_name):
+ """Retrieves Vserver info."""
+ LOG.debug('Retrieving Vserver %s information.', vserver_name)
+
+ api_args = {
+ 'query': {
+ 'vserver-info': {
+ 'vserver-name': vserver_name,
+ },
+ },
+ 'desired-attributes': {
+ 'vserver-info': {
+ 'vserver-name': None,
+ 'vserver-subtype': None,
+ 'state': None,
+ 'operational-state': None,
+ },
+ },
+ }
+ result = self.send_iter_request('vserver-get-iter', api_args)
+ if not self._has_records(result):
+ return
+ try:
+ vserver_info = result.get_child_by_name(
+ 'attributes-list').get_child_by_name(
+ 'vserver-info')
+ vserver_subtype = vserver_info.get_child_content(
+ 'vserver-subtype')
+ vserver_op_state = vserver_info.get_child_content(
+ 'operational-state')
+ vserver_state = vserver_info.get_child_content('state')
+ except AttributeError:
+ msg = _('Could not retrieve vserver-info for %s.') % vserver_name
+ raise exception.NetAppException(msg)
+
+ vserver_info = {
+ 'name': vserver_name,
+ 'subtype': vserver_subtype,
+ 'operational_state': vserver_op_state,
+ 'state': vserver_state,
+ }
+ return vserver_info
+
@na_utils.trace
def vserver_exists(self, vserver_name):
"""Checks if Vserver exists."""
@@ -204,7 +276,13 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
},
},
}
- result = self.send_iter_request('vserver-get-iter', api_args)
+ try:
+ result = self.send_iter_request('vserver-get-iter', api_args)
+ except netapp_api.NaApiError as e:
+ if e.code == netapp_api.EVSERVERNOTFOUND:
+ return False
+ else:
+ raise
return self._has_records(result)
@na_utils.trace
@@ -332,19 +410,23 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
@na_utils.trace
def delete_vserver(self, vserver_name, vserver_client,
security_services=None):
- """Delete Vserver.
+ """Deletes a Vserver.
Checks if Vserver exists and does not have active shares.
Offlines and destroys root volumes. Deletes Vserver.
"""
- if not self.vserver_exists(vserver_name):
+ vserver_info = self.get_vserver_info(vserver_name)
+ if vserver_info is None:
LOG.error("Vserver %s does not exist.", vserver_name)
return
+ is_dp_destination = vserver_info.get('subtype') == 'dp_destination'
root_volume_name = self.get_vserver_root_volume_name(vserver_name)
volumes_count = vserver_client.get_vserver_volume_count()
- if volumes_count == 1:
+ # NOTE(dviroel): 'dp_destination' vservers don't allow to delete its
+ # root volume. We can just call vserver-destroy directly.
+ if volumes_count == 1 and not is_dp_destination:
try:
vserver_client.offline_volume(root_volume_name)
except netapp_api.NaApiError as e:
@@ -359,7 +441,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
msg = _("Cannot delete Vserver. Vserver %s has shares.")
raise exception.NetAppException(msg % vserver_name)
- if security_services:
+ if security_services and not is_dp_destination:
self._terminate_vserver_services(vserver_name, vserver_client,
security_services)
@@ -579,10 +661,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return list(self.get_vserver_aggregate_capacities().keys())
@na_utils.trace
- def create_network_interface(self, ip, netmask, vlan, node, port,
- vserver_name, lif_name, ipspace_name, mtu):
- """Creates LIF on VLAN port."""
-
+ def create_port_and_broadcast_domain(self, node, port, vlan, mtu, ipspace):
home_port_name = port
if vlan:
self._create_vlan(node, port, vlan)
@@ -590,7 +669,17 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
if self.features.BROADCAST_DOMAINS:
self._ensure_broadcast_domain_for_port(
- node, home_port_name, mtu, ipspace=ipspace_name)
+ node, home_port_name, mtu, ipspace=ipspace)
+
+ return home_port_name
+
+ @na_utils.trace
+ def create_network_interface(self, ip, netmask, vlan, node, port,
+ vserver_name, lif_name, ipspace_name, mtu):
+ """Creates LIF on VLAN port."""
+
+ home_port_name = self.create_port_and_broadcast_domain(
+ node, port, vlan, mtu, ipspace_name)
LOG.debug('Creating LIF %(lif)s for Vserver %(vserver)s ',
{'lif': lif_name, 'vserver': vserver_name})
@@ -2705,6 +2794,26 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
api_args = {'path': share_path, 'share-name': share_name}
self.send_request('cifs-share-create', api_args)
+ @na_utils.trace
+ def cifs_share_exists(self, share_name):
+ """Check that a cifs share already exists"""
+ share_path = '/%s' % share_name
+ api_args = {
+ 'query': {
+ 'cifs-share': {
+ 'share-name': share_name,
+ 'path': share_path,
+ },
+ },
+ 'desired-attributes': {
+ 'cifs-share': {
+ 'share-name': None
+ }
+ },
+ }
+ result = self.send_iter_request('cifs-share-get-iter', api_args)
+ return self._has_records(result)
+
@na_utils.trace
def get_cifs_share_access(self, share_name):
api_args = {
@@ -3411,24 +3520,57 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise exception.NetAppException(msg)
@na_utils.trace
- def create_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
- schedule=None, policy=None,
- relationship_type='data_protection'):
+ def create_snapmirror_vol(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ schedule=None, policy=None,
+ relationship_type='data_protection'):
+ """Creates a SnapMirror relationship between volumes."""
+ self._create_snapmirror(source_vserver, destination_vserver,
+ source_volume=source_volume,
+ destination_volume=destination_volume,
+ schedule=schedule, policy=policy,
+ relationship_type=relationship_type)
+
+ @na_utils.trace
+ def create_snapmirror_svm(self, source_vserver, destination_vserver,
+ schedule=None, policy=None,
+ relationship_type='data_protection',
+ identity_preserve=True,
+ max_transfer_rate=None):
+ """Creates a SnapMirror relationship between vServers."""
+ self._create_snapmirror(source_vserver, destination_vserver,
+ schedule=schedule, policy=policy,
+ relationship_type=relationship_type,
+ identity_preserve=identity_preserve,
+ max_transfer_rate=max_transfer_rate)
+
+ @na_utils.trace
+ def _create_snapmirror(self, source_vserver, destination_vserver,
+ source_volume=None, destination_volume=None,
+ schedule=None, policy=None,
+ relationship_type='data_protection',
+ identity_preserve=None, max_transfer_rate=None):
"""Creates a SnapMirror relationship (cDOT 8.2 or later only)."""
self._ensure_snapmirror_v2()
api_args = {
- 'source-volume': source_volume,
'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
'destination-vserver': destination_vserver,
'relationship-type': relationship_type,
}
+ if source_volume:
+ api_args['source-volume'] = source_volume
+ if destination_volume:
+ api_args['destination-volume'] = destination_volume
if schedule:
api_args['schedule'] = schedule
if policy:
api_args['policy'] = policy
+ if identity_preserve is not None:
+ api_args['identity-preserve'] = (
+ 'true' if identity_preserve is True else 'false')
+ if max_transfer_rate is not None:
+ api_args['max-transfer-rate'] = max_transfer_rate
try:
self.send_request('snapmirror-create', api_args)
@@ -3436,19 +3578,60 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
if e.code != netapp_api.ERELATION_EXISTS:
raise
+ def _build_snapmirror_request(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
+ """Build a default SnapMirror request."""
+
+ req_args = {}
+ if source_path:
+ req_args['source-location'] = source_path
+ if dest_path:
+ req_args['destination-location'] = dest_path
+ if source_vserver:
+ req_args['source-vserver'] = source_vserver
+ if source_volume:
+ req_args['source-volume'] = source_volume
+ if dest_vserver:
+ req_args['destination-vserver'] = dest_vserver
+ if dest_volume:
+ req_args['destination-volume'] = dest_volume
+
+ return req_args
+
@na_utils.trace
- def initialize_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
- source_snapshot=None, transfer_priority=None):
- """Initializes a SnapMirror relationship (cDOT 8.2 or later only)."""
+ def initialize_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume,
+ source_snapshot=None,
+ transfer_priority=None):
+ """Initializes a SnapMirror relationship between volumes."""
+ return self._initialize_snapmirror(
+ source_vserver=source_vserver, dest_vserver=dest_vserver,
+ source_volume=source_volume, dest_volume=dest_volume,
+ source_snapshot=source_snapshot,
+ transfer_priority=transfer_priority)
+
+ @na_utils.trace
+ def initialize_snapmirror_svm(self, source_vserver, dest_vserver,
+ transfer_priority=None):
+ """Initializes a SnapMirror relationship between vServer."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ return self._initialize_snapmirror(source_path=source_path,
+ dest_path=dest_path,
+ transfer_priority=transfer_priority)
+
+ @na_utils.trace
+ def _initialize_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
+ source_snapshot=None, transfer_priority=None):
+ """Initializes a SnapMirror relationship."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
if source_snapshot:
api_args['source-snapshot'] = source_snapshot
if transfer_priority:
@@ -3469,54 +3652,109 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return result_info
@na_utils.trace
- def release_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
- relationship_info_only=False):
+ def release_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume,
+ relationship_info_only=False):
"""Removes a SnapMirror relationship on the source endpoint."""
- self._ensure_snapmirror_v2()
-
- api_args = {
- 'query': {
- 'snapmirror-destination-info': {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- 'relationship-info-only': ('true' if relationship_info_only
- else 'false'),
- }
- }
- }
- self.send_request('snapmirror-release-iter', api_args)
+ self._release_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume,
+ relationship_info_only=relationship_info_only)
@na_utils.trace
- def quiesce_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def release_snapmirror_svm(self, source_vserver, dest_vserver,
+ relationship_info_only=False):
+ """Removes a SnapMirror relationship on the source endpoint."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._release_snapmirror(source_path=source_path, dest_path=dest_path,
+ relationship_info_only=relationship_info_only,
+ enable_tunneling=False)
+
+ @na_utils.trace
+ def _release_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
+ relationship_info_only=False,
+ enable_tunneling=True):
+ """Removes a SnapMirror relationship on the source endpoint."""
+ dest_info = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+ self._ensure_snapmirror_v2()
+ dest_info['relationship-info-only'] = (
+ 'true' if relationship_info_only else 'false')
+ api_args = {
+ 'query': {
+ 'snapmirror-destination-info': dest_info
+ }
+ }
+ self.send_request('snapmirror-release-iter', api_args,
+ enable_tunneling=enable_tunneling)
+
+ @na_utils.trace
+ def quiesce_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Disables future transfers to a SnapMirror destination."""
+ self._quiesce_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def quiesce_snapmirror_svm(self, source_vserver, dest_vserver):
+ """Disables future transfers to a SnapMirror destination."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._quiesce_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _quiesce_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Disables future transfers to a SnapMirror destination."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+
self.send_request('snapmirror-quiesce', api_args)
@na_utils.trace
- def abort_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
- clear_checkpoint=False):
+ def abort_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume,
+ clear_checkpoint=False):
+ """Stops ongoing transfers for a SnapMirror relationship."""
+ self._abort_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume,
+ clear_checkpoint=clear_checkpoint)
+
+ @na_utils.trace
+ def abort_snapmirror_svm(self, source_vserver, dest_vserver,
+ clear_checkpoint=False):
+ """Stops ongoing transfers for a SnapMirror relationship."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._abort_snapmirror(source_path=source_path, dest_path=dest_path,
+ clear_checkpoint=clear_checkpoint)
+
+ @na_utils.trace
+ def _abort_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
+ clear_checkpoint=False):
"""Stops ongoing transfers for a SnapMirror relationship."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- 'clear-checkpoint': 'true' if clear_checkpoint else 'false',
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+ api_args['clear-checkpoint'] = 'true' if clear_checkpoint else 'false'
+
try:
self.send_request('snapmirror-abort', api_args)
except netapp_api.NaApiError as e:
@@ -3524,33 +3762,64 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise
@na_utils.trace
- def break_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def break_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Breaks a data protection SnapMirror relationship."""
+ self._break_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def break_snapmirror_svm(self, source_vserver=None, dest_vserver=None):
+ """Breaks a data protection SnapMirror relationship."""
+ source_path = source_vserver + ':' if source_vserver else None
+ dest_path = dest_vserver + ':' if dest_vserver else None
+ self._break_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _break_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Breaks a data protection SnapMirror relationship."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
- self.send_request('snapmirror-break', api_args)
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+ try:
+ self.send_request('snapmirror-break', api_args)
+ except netapp_api.NaApiError as e:
+ break_in_progress = 'SnapMirror operation status is "Breaking"'
+ if not (e.code == netapp_api.ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS
+ and break_in_progress in e.message):
+ raise
@na_utils.trace
- def modify_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
- schedule=None, policy=None, tries=None,
- max_transfer_rate=None):
+ def modify_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume,
+ schedule=None, policy=None, tries=None,
+ max_transfer_rate=None):
+ """Modifies a SnapMirror relationship between volumes."""
+ self._modify_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume,
+ schedule=schedule, policy=policy, tries=tries,
+ max_transfer_rate=max_transfer_rate)
+
+ @na_utils.trace
+ def _modify_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
+ schedule=None, policy=None, tries=None,
+ max_transfer_rate=None):
"""Modifies a SnapMirror relationship."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
if schedule:
api_args['schedule'] = schedule
if policy:
@@ -3563,35 +3832,66 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.send_request('snapmirror-modify', api_args)
@na_utils.trace
- def delete_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def delete_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Destroys a SnapMirror relationship between volumes."""
+ self._delete_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def delete_snapmirror_svm(self, source_vserver, dest_vserver):
+ """Destroys a SnapMirror relationship between vServers."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._delete_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _delete_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Destroys a SnapMirror relationship."""
self._ensure_snapmirror_v2()
+ snapmirror_info = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+
api_args = {
'query': {
- 'snapmirror-info': {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ 'snapmirror-info': snapmirror_info
}
}
self.send_request('snapmirror-destroy-iter', api_args)
@na_utils.trace
- def update_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def update_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Schedules a snapmirror update between volumes."""
+ self._update_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def update_snapmirror_svm(self, source_vserver, dest_vserver):
+ """Schedules a snapmirror update between vServers."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._update_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _update_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Schedules a snapmirror update."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+
try:
self.send_request('snapmirror-update', api_args)
except netapp_api.NaApiError as e:
@@ -3600,17 +3900,32 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise
@na_utils.trace
- def resume_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def resume_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Resume a SnapMirror relationship if it is quiesced."""
+ self._resume_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def resume_snapmirror_svm(self, source_vserver, dest_vserver):
+ """Resume a SnapMirror relationship if it is quiesced."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._resume_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _resume_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Resume a SnapMirror relationship if it is quiesced."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+
try:
self.send_request('snapmirror-resume', api_args)
except netapp_api.NaApiError as e:
@@ -3618,42 +3933,49 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise
@na_utils.trace
- def resync_snapmirror(self, source_vserver, source_volume,
- destination_vserver, destination_volume):
+ def resync_snapmirror_vol(self, source_vserver, source_volume,
+ dest_vserver, dest_volume):
+ """Resync a SnapMirror relationship between volumes."""
+ self._resync_snapmirror(source_vserver=source_vserver,
+ dest_vserver=dest_vserver,
+ source_volume=source_volume,
+ dest_volume=dest_volume)
+
+ @na_utils.trace
+ def resync_snapmirror_svm(self, source_vserver, dest_vserver):
+ """Resync a SnapMirror relationship between vServers."""
+ source_path = source_vserver + ':'
+ dest_path = dest_vserver + ':'
+ self._resync_snapmirror(source_path=source_path, dest_path=dest_path)
+
+ @na_utils.trace
+ def _resync_snapmirror(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None):
"""Resync a SnapMirror relationship."""
self._ensure_snapmirror_v2()
- api_args = {
- 'source-volume': source_volume,
- 'source-vserver': source_vserver,
- 'destination-volume': destination_volume,
- 'destination-vserver': destination_vserver,
- }
+ api_args = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+
self.send_request('snapmirror-resync', api_args)
@na_utils.trace
- def _get_snapmirrors(self, source_vserver=None, source_volume=None,
- destination_vserver=None, destination_volume=None,
+ def _get_snapmirrors(self, source_path=None, dest_path=None,
+ source_vserver=None, source_volume=None,
+ dest_vserver=None, dest_volume=None,
desired_attributes=None):
+ """Gets one or more SnapMirror relationships."""
- query = None
- if (source_vserver or source_volume or destination_vserver or
- destination_volume):
- query = {'snapmirror-info': {}}
- if source_volume:
- query['snapmirror-info']['source-volume'] = source_volume
- if destination_volume:
- query['snapmirror-info']['destination-volume'] = (
- destination_volume)
- if source_vserver:
- query['snapmirror-info']['source-vserver'] = source_vserver
- if destination_vserver:
- query['snapmirror-info']['destination-vserver'] = (
- destination_vserver)
-
+ snapmirror_info = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
api_args = {}
- if query:
- api_args['query'] = query
+ if snapmirror_info:
+ api_args['query'] = {
+ 'snapmirror-info': snapmirror_info
+ }
if desired_attributes:
api_args['desired-attributes'] = desired_attributes
@@ -3664,8 +3986,18 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return result.get_child_by_name('attributes-list').get_children()
@na_utils.trace
- def get_snapmirrors(self, source_vserver, source_volume,
- destination_vserver, destination_volume,
+ def get_snapmirrors_svm(self, source_vserver=None, dest_vserver=None,
+ desired_attributes=None):
+ source_path = source_vserver + ':' if source_vserver else None
+ dest_path = dest_vserver + ':' if dest_vserver else None
+ return self.get_snapmirrors(source_path=source_path,
+ dest_path=dest_path,
+ desired_attributes=desired_attributes)
+
+ @na_utils.trace
+ def get_snapmirrors(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
desired_attributes=None):
"""Gets one or more SnapMirror relationships.
@@ -3680,10 +4012,12 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
}
result = self._get_snapmirrors(
+ source_path=source_path,
+ dest_path=dest_path,
source_vserver=source_vserver,
source_volume=source_volume,
- destination_vserver=destination_vserver,
- destination_volume=destination_volume,
+ dest_vserver=dest_vserver,
+ dest_volume=dest_volume,
desired_attributes=desired_attributes)
snapmirrors = []
@@ -3697,6 +4031,79 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return snapmirrors
+ @na_utils.trace
+ def _get_snapmirror_destinations(self, source_path=None, dest_path=None,
+ source_vserver=None, source_volume=None,
+ dest_vserver=None, dest_volume=None,
+ desired_attributes=None):
+ """Gets one or more SnapMirror at source endpoint."""
+
+ snapmirror_info = self._build_snapmirror_request(
+ source_path, dest_path, source_vserver,
+ dest_vserver, source_volume, dest_volume)
+ api_args = {}
+ if snapmirror_info:
+ api_args['query'] = {
+ 'snapmirror-destination-info': snapmirror_info
+ }
+ if desired_attributes:
+ api_args['desired-attributes'] = desired_attributes
+
+ result = self.send_iter_request('snapmirror-get-destination-iter',
+ api_args)
+ if not self._has_records(result):
+ return []
+ else:
+ return result.get_child_by_name('attributes-list').get_children()
+
+ @na_utils.trace
+ def get_snapmirror_destinations(self, source_path=None, dest_path=None,
+ source_vserver=None, dest_vserver=None,
+ source_volume=None, dest_volume=None,
+ desired_attributes=None):
+ """Gets one or more SnapMirror relationships in the source endpoint.
+
+ Either the source or destination info may be omitted.
+ Desired attributes should be a flat list of attribute names.
+ """
+ self._ensure_snapmirror_v2()
+
+ if desired_attributes is not None:
+ desired_attributes = {
+ 'snapmirror-destination-info': {
+ attr: None for attr in desired_attributes},
+ }
+
+ result = self._get_snapmirror_destinations(
+ source_path=source_path,
+ dest_path=dest_path,
+ source_vserver=source_vserver,
+ source_volume=source_volume,
+ dest_vserver=dest_vserver,
+ dest_volume=dest_volume,
+ desired_attributes=desired_attributes)
+
+ snapmirrors = []
+
+ for snapmirror_info in result:
+ snapmirror = {}
+ for child in snapmirror_info.get_children():
+ name = self._strip_xml_namespace(child.get_name())
+ snapmirror[name] = child.get_content()
+ snapmirrors.append(snapmirror)
+
+ return snapmirrors
+
+ @na_utils.trace
+ def get_snapmirror_destinations_svm(self, source_vserver=None,
+ dest_vserver=None,
+ desired_attributes=None):
+ source_path = source_vserver + ':' if source_vserver else None
+ dest_path = dest_vserver + ':' if dest_vserver else None
+ return self.get_snapmirror_destinations(
+ source_path=source_path, dest_path=dest_path,
+ desired_attributes=desired_attributes)
+
def volume_has_snapmirror_relationships(self, volume):
"""Return True if snapmirror relationships exist for a given volume.
@@ -3706,11 +4113,13 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
try:
# Check if volume is a source snapmirror volume
snapmirrors = self.get_snapmirrors(
- volume['owning-vserver-name'], volume['name'], None, None)
+ source_vserver=volume['owning-vserver-name'],
+ source_volume=volume['name'])
# Check if volume is a destination snapmirror volume
if not snapmirrors:
snapmirrors = self.get_snapmirrors(
- None, None, volume['owning-vserver-name'], volume['name'])
+ dest_vserver=volume['owning-vserver-name'],
+ dest_volume=volume['name'])
has_snapmirrors = len(snapmirrors) > 0
except netapp_api.NaApiError:
@@ -3743,6 +4152,71 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return [snapshot_info.get_child_content('name')
for snapshot_info in attributes_list.get_children()]
+ @na_utils.trace
+ def create_snapmirror_policy(self, policy_name, type='async_mirror',
+ discard_network_info=True,
+ preserve_snapshots=True):
+ """Creates a SnapMirror policy for a vServer."""
+ self._ensure_snapmirror_v2()
+
+ api_args = {
+ 'policy-name': policy_name,
+ 'type': type,
+ }
+
+ if discard_network_info:
+ api_args['discard-configs'] = {
+ 'svmdr-config-obj': 'network'
+ }
+
+ self.send_request('snapmirror-policy-create', api_args)
+
+ if preserve_snapshots:
+ api_args = {
+ 'policy-name': policy_name,
+ 'snapmirror-label': 'all_source_snapshots',
+ 'keep': '1',
+ 'preserve': 'false'
+ }
+
+ self.send_request('snapmirror-policy-add-rule', api_args)
+
+ @na_utils.trace
+ def delete_snapmirror_policy(self, policy_name):
+ """Deletes a SnapMirror policy."""
+
+ api_args = {
+ 'policy-name': policy_name,
+ }
+ try:
+ self.send_request('snapmirror-policy-delete', api_args)
+ except netapp_api.NaApiError as e:
+ if e.code != netapp_api.EOBJECTNOTFOUND:
+ raise
+
+ @na_utils.trace
+ def get_snapmirror_policies(self, vserver_name):
+ """Get all SnapMirror policies associated to a vServer."""
+
+ api_args = {
+ 'query': {
+ 'snapmirror-policy-info': {
+ 'vserver-name': vserver_name,
+ },
+ },
+ 'desired-attributes': {
+ 'snapmirror-policy-info': {
+ 'policy-name': None,
+ },
+ },
+ }
+ result = self.send_iter_request('snapmirror-policy-get-iter', api_args)
+ attributes_list = result.get_child_by_name(
+ 'attributes-list') or netapp_api.NaElement('none')
+
+ return [policy_info.get_child_content('policy-name')
+ for policy_info in attributes_list.get_children()]
+
@na_utils.trace
def start_volume_move(self, volume_name, vserver, destination_aggregate,
cutover_action='wait', encrypt_destination=None):
@@ -4086,3 +4560,34 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
nfs_config[arg] = nfs_info_elem.get_child_content(arg)
return nfs_config
+
+ @na_utils.trace
+ def start_vserver(self, vserver, force=None):
+ """Starts a vServer."""
+ api_args = {
+ 'vserver-name': vserver,
+ }
+ if force is not None:
+ api_args['force'] = 'true' if force is True else 'false'
+
+ try:
+ self.send_request('vserver-start', api_args,
+ enable_tunneling=False)
+ except netapp_api.NaApiError as e:
+ if e.code == netapp_api.EVSERVERALREADYSTARTED:
+ msg = _("Vserver %s is already started.")
+ LOG.debug(msg, vserver)
+ else:
+ raise
+
+ @na_utils.trace
+ def stop_vserver(self, vserver):
+ """Stops a vServer."""
+ api_args = {
+ 'vserver-name': vserver,
+ }
+
+ self.send_request('vserver-stop', api_args, enable_tunneling=False)
+
+ def is_svm_dr_supported(self):
+ return self.features.SVM_DR
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py b/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
index 4fb5125aa6..c05055f4ba 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
@@ -94,32 +94,48 @@ class DataMotionSession(object):
def _get_backend_qos_policy_group_name(self, share):
"""Get QoS policy name according to QoS policy group name template."""
- __, config = self._get_backend_config_obj(share)
+ __, config = self.get_backend_name_and_config_obj(share['host'])
return config.netapp_qos_policy_group_name_template % {
'share_id': share['id'].replace('-', '_')}
+ def _get_backend_snapmirror_policy_name_svm(self, share_server_id,
+ backend_name):
+ config = get_backend_configuration(backend_name)
+ return (config.netapp_snapmirror_policy_name_svm_template
+ % {'share_server_id': share_server_id.replace('-', '_')})
+
+ def get_vserver_from_share_server(self, share_server):
+ backend_details = share_server.get('backend_details')
+ if backend_details:
+ return backend_details.get('vserver_name')
+
def get_vserver_from_share(self, share_obj):
share_server = share_obj.get('share_server')
if share_server:
- backend_details = share_server.get('backend_details')
- if backend_details:
- return backend_details.get('vserver_name')
+ return self.get_vserver_from_share_server(share_server)
- def _get_backend_config_obj(self, share_obj):
- backend_name = share_utils.extract_host(
- share_obj['host'], level='backend_name')
+ def get_backend_name_and_config_obj(self, host):
+ backend_name = share_utils.extract_host(host, level='backend_name')
config = get_backend_configuration(backend_name)
return backend_name, config
def get_backend_info_for_share(self, share_obj):
- backend_name, config = self._get_backend_config_obj(share_obj)
+ backend_name, config = self.get_backend_name_and_config_obj(
+ share_obj['host'])
vserver = (self.get_vserver_from_share(share_obj) or
config.netapp_vserver)
- volume_name = self._get_backend_volume_name(
- config, share_obj)
+ volume_name = self._get_backend_volume_name(config, share_obj)
return volume_name, vserver, backend_name
+ def get_client_and_vserver_name(self, share_server):
+ destination_host = share_server.get('host')
+ vserver = self.get_vserver_from_share_server(share_server)
+ backend, __ = self.get_backend_name_and_config_obj(destination_host)
+ client = get_client_for_backend(backend, vserver_name=vserver)
+
+ return client, vserver
+
def get_snapmirrors(self, source_share_obj, dest_share_obj):
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
@@ -130,8 +146,8 @@ class DataMotionSession(object):
source_share_obj)
snapmirrors = dest_client.get_snapmirrors(
- src_vserver, src_volume_name,
- dest_vserver, dest_volume_name,
+ source_vserver=src_vserver, dest_vserver=dest_vserver,
+ source_volume=src_volume_name, dest_volume=dest_volume_name,
desired_attributes=['relationship-status',
'mirror-state',
'source-vserver',
@@ -155,17 +171,17 @@ class DataMotionSession(object):
# 1. Create SnapMirror relationship
# TODO(ameade): Change the schedule from hourly to a config value
- dest_client.create_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name,
- schedule='hourly')
-
- # 2. Initialize async transfer of the initial data
- dest_client.initialize_snapmirror(src_vserver,
+ dest_client.create_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
- dest_volume_name)
+ dest_volume_name,
+ schedule='hourly')
+
+ # 2. Initialize async transfer of the initial data
+ dest_client.initialize_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
def delete_snapmirror(self, source_share_obj, dest_share_obj,
release=True):
@@ -185,21 +201,21 @@ class DataMotionSession(object):
# 1. Abort any ongoing transfers
try:
- dest_client.abort_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name,
- clear_checkpoint=False)
+ dest_client.abort_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name,
+ clear_checkpoint=False)
except netapp_api.NaApiError:
# Snapmirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
- dest_client.delete_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.delete_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
@@ -218,10 +234,10 @@ class DataMotionSession(object):
# 3. Cleanup SnapMirror relationship on source
try:
if src_client:
- src_client.release_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ src_client.release_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
@@ -242,50 +258,81 @@ class DataMotionSession(object):
source_share_obj)
# Update SnapMirror
- dest_client.update_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.update_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
+
+ def quiesce_then_abort_svm(self, source_share_server, dest_share_server):
+ source_client, source_vserver = self.get_client_and_vserver_name(
+ source_share_server)
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+
+ # 1. Attempt to quiesce, then abort
+ dest_client.quiesce_snapmirror_svm(source_vserver, dest_vserver)
+
+ dest_backend = share_utils.extract_host(dest_share_server['host'],
+ level='backend_name')
+ config = get_backend_configuration(dest_backend)
+ retries = config.netapp_snapmirror_quiesce_timeout / 5
+
+ @utils.retry(exception.ReplicationException, interval=5,
+ retries=retries, backoff_rate=1)
+ def wait_for_quiesced():
+ snapmirror = dest_client.get_snapmirrors_svm(
+ source_vserver=source_vserver, dest_vserver=dest_vserver,
+ desired_attributes=['relationship-status', 'mirror-state']
+ )[0]
+ if snapmirror.get('relationship-status') != 'quiesced':
+ raise exception.ReplicationException(
+ reason="Snapmirror relationship is not quiesced.")
+
+ try:
+ wait_for_quiesced()
+ except exception.ReplicationException:
+ dest_client.abort_snapmirror_svm(source_vserver,
+ dest_vserver,
+ clear_checkpoint=False)
def quiesce_then_abort(self, source_share_obj, dest_share_obj):
- dest_volume_name, dest_vserver, dest_backend = (
+ dest_volume, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
- src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
+ src_volume, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Attempt to quiesce, then abort
- dest_client.quiesce_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.quiesce_snapmirror_vol(src_vserver,
+ src_volume,
+ dest_vserver,
+ dest_volume)
- config = get_backend_configuration(share_utils.extract_host(
- source_share_obj['host'], level='backend_name'))
+ config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
- src_vserver, src_volume_name, dest_vserver,
- dest_volume_name, desired_attributes=['relationship-status',
- 'mirror-state']
+ source_vserver=src_vserver, dest_vserver=dest_vserver,
+ source_volume=src_volume, dest_volume=dest_volume,
+ desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
- reason=("Snapmirror relationship is not quiesced."))
+ reason="Snapmirror relationship is not quiesced.")
try:
wait_for_quiesced()
except exception.ReplicationException:
- dest_client.abort_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name,
- clear_checkpoint=False)
+ dest_client.abort_snapmirror_vol(src_vserver,
+ src_volume,
+ dest_vserver,
+ dest_volume,
+ clear_checkpoint=False)
def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True):
"""Breaks SnapMirror relationship.
@@ -307,10 +354,10 @@ class DataMotionSession(object):
self.quiesce_then_abort(source_share_obj, dest_share_obj)
# 2. Break SnapMirror
- dest_client.break_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.break_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
# 3. Mount the destination volume and create a junction path
if mount:
@@ -326,10 +373,10 @@ class DataMotionSession(object):
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
- dest_client.resync_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.resync_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
def resume_snapmirror(self, source_share_obj, dest_share_obj):
"""Resume SnapMirror relationship from a quiesced state."""
@@ -341,10 +388,10 @@ class DataMotionSession(object):
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
- dest_client.resume_snapmirror(src_vserver,
- src_volume_name,
- dest_vserver,
- dest_volume_name)
+ dest_client.resume_snapmirror_vol(src_vserver,
+ src_volume_name,
+ dest_vserver,
+ dest_volume_name)
def change_snapmirror_source(self, replica,
orig_source_replica,
@@ -400,16 +447,16 @@ class DataMotionSession(object):
# 3. create
# TODO(ameade): Update the schedule if needed.
- replica_client.create_snapmirror(new_src_vserver,
- new_src_volume_name,
- replica_vserver,
- replica_volume_name,
- schedule='hourly')
+ replica_client.create_snapmirror_vol(new_src_vserver,
+ new_src_volume_name,
+ replica_vserver,
+ replica_volume_name,
+ schedule='hourly')
# 4. resync
- replica_client.resync_snapmirror(new_src_vserver,
- new_src_volume_name,
- replica_vserver,
- replica_volume_name)
+ replica_client.resync_snapmirror_vol(new_src_vserver,
+ new_src_volume_name,
+ replica_vserver,
+ replica_volume_name)
@na_utils.trace
def remove_qos_on_old_active_replica(self, orig_active_replica):
@@ -430,3 +477,254 @@ class DataMotionSession(object):
"for replica %s to unset QoS policy and mark "
"the QoS policy group for deletion.",
orig_active_replica['id'])
+
+ def create_snapmirror_svm(self, source_share_server,
+ dest_share_server):
+ """Sets up a SnapMirror relationship between two vServers.
+
+ 1. Create a SnapMirror policy for SVM DR
+ 2. Create SnapMirror relationship
+ 3. Initialize data transfer asynchronously
+ """
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ src_vserver = self.get_vserver_from_share_server(source_share_server)
+
+ # 1: Create SnapMirror policy for SVM DR
+ dest_backend_name = share_utils.extract_host(dest_share_server['host'],
+ level='backend_name')
+ policy_name = self._get_backend_snapmirror_policy_name_svm(
+ dest_share_server['id'],
+ dest_backend_name,
+ )
+ dest_client.create_snapmirror_policy(policy_name)
+
+ # 2. Create SnapMirror relationship
+ dest_client.create_snapmirror_svm(src_vserver,
+ dest_vserver,
+ policy=policy_name,
+ schedule='hourly')
+
+ # 2. Initialize async transfer of the initial data
+ dest_client.initialize_snapmirror_svm(src_vserver,
+ dest_vserver)
+
+ def get_snapmirrors_svm(self, source_share_server, dest_share_server):
+ """Get SnapMirrors between two vServers."""
+
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ src_vserver = self.get_vserver_from_share_server(source_share_server)
+
+ snapmirrors = dest_client.get_snapmirrors_svm(
+ source_vserver=src_vserver, dest_vserver=dest_vserver,
+ desired_attributes=['relationship-status',
+ 'mirror-state',
+ 'last-transfer-end-timestamp'])
+ return snapmirrors
+
+ def get_snapmirror_destinations_svm(self, source_share_server,
+ dest_share_server):
+ """Get SnapMirrors between two vServers."""
+
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ src_vserver = self.get_vserver_from_share_server(source_share_server)
+
+ snapmirrors = dest_client.get_snapmirror_destinations_svm(
+ source_vserver=src_vserver, dest_vserver=dest_vserver)
+ return snapmirrors
+
+ def update_snapmirror_svm(self, source_share_server, dest_share_server):
+ """Schedule a SnapMirror update to happen on the backend."""
+
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ src_vserver = self.get_vserver_from_share_server(source_share_server)
+
+ # Update SnapMirror
+ dest_client.update_snapmirror_svm(src_vserver, dest_vserver)
+
+ def quiesce_and_break_snapmirror_svm(self, source_share_server,
+ dest_share_server):
+ """Abort and break a SnapMirror relationship between vServers.
+
+ 1. Quiesce SnapMirror
+ 2. Break SnapMirror
+ """
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ src_vserver = self.get_vserver_from_share_server(source_share_server)
+
+ # 1. Attempt to quiesce, then abort
+ self.quiesce_then_abort_svm(source_share_server, dest_share_server)
+
+ # 2. Break SnapMirror
+ dest_client.break_snapmirror_svm(src_vserver, dest_vserver)
+
+ def cancel_snapmirror_svm(self, source_share_server, dest_share_server):
+ """Cancels SnapMirror relationship between vServers."""
+
+ dest_backend = share_utils.extract_host(dest_share_server['host'],
+ level='backend_name')
+ dest_config = get_backend_configuration(dest_backend)
+ server_timeout = (
+ dest_config.netapp_server_migration_state_change_timeout)
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+
+ snapmirrors = self.get_snapmirrors_svm(source_share_server,
+ dest_share_server)
+ if snapmirrors:
+ # 1. Attempt to quiesce and break snapmirror
+ self.quiesce_and_break_snapmirror_svm(source_share_server,
+ dest_share_server)
+
+ # NOTE(dviroel): Lets wait until the destination vserver be
+ # promoted to 'default' and state 'running', before starting
+ # shutting down the source
+ self.wait_for_vserver_state(dest_vserver, dest_client,
+ subtype='default', state='running',
+ operational_state='stopped',
+ timeout=server_timeout)
+ # 2. Delete SnapMirror
+ self.delete_snapmirror_svm(source_share_server, dest_share_server)
+ else:
+ dest_info = dest_client.get_vserver_info(dest_vserver)
+ if dest_info is None:
+ # NOTE(dviroel): Nothing to cancel since the destination does
+ # not exist.
+ return
+ if dest_info.get('subtype') == 'dp_destination':
+ # NOTE(dviroel): Can be a corner case where no snapmirror
+ # relationship was found but the destination vserver is stuck
+ # in DP mode. We need to convert it to 'default' to release
+ # its resources later.
+ self.convert_svm_to_default_subtype(dest_vserver, dest_client,
+ timeout=server_timeout)
+
+ def convert_svm_to_default_subtype(self, vserver_name, client,
+ is_dest_path=True, timeout=300):
+ interval = 10
+ retries = (timeout / interval or 1)
+
+ @utils.retry(exception.VserverNotReady, interval=interval,
+ retries=retries, backoff_rate=1)
+ def wait_for_state():
+ vserver_info = client.get_vserver_info(vserver_name)
+ if vserver_info.get('subtype') != 'default':
+ if is_dest_path:
+ client.break_snapmirror_svm(dest_vserver=vserver_name)
+ else:
+ client.break_snapmirror_svm(source_vserver=vserver_name)
+ raise exception.VserverNotReady(vserver=vserver_name)
+ try:
+ wait_for_state()
+ except exception.VserverNotReady:
+ msg = _("Vserver %s did not reach the expected state. Retries "
+ "exhausted. Aborting.") % vserver_name
+ raise exception.NetAppException(message=msg)
+
+ def delete_snapmirror_svm(self, src_share_server, dest_share_server,
+ release=True):
+ """Ensures all information about a SnapMirror relationship is removed.
+
+ 1. Abort SnapMirror
+ 2. Delete the SnapMirror
+ 3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
+ """
+ src_client, src_vserver = self.get_client_and_vserver_name(
+ src_share_server)
+ dest_client, dest_vserver = self.get_client_and_vserver_name(
+ dest_share_server)
+ # 1. Abort any ongoing transfers
+ try:
+ dest_client.abort_snapmirror_svm(src_vserver, dest_vserver)
+ except netapp_api.NaApiError:
+ # SnapMirror is already deleted
+ pass
+
+ # 2. Delete SnapMirror Relationship and cleanup destination snapshots
+ try:
+ dest_client.delete_snapmirror_svm(src_vserver, dest_vserver)
+ except netapp_api.NaApiError as e:
+ with excutils.save_and_reraise_exception() as exc_context:
+ if (e.code == netapp_api.EOBJECTNOTFOUND or
+ e.code == netapp_api.ESOURCE_IS_DIFFERENT or
+ "(entry doesn't exist)" in e.message):
+ LOG.info('No snapmirror relationship to delete')
+ exc_context.reraise = False
+
+ # 3. Release SnapMirror
+ if release:
+ src_backend = share_utils.extract_host(src_share_server['host'],
+ level='backend_name')
+ src_config = get_backend_configuration(src_backend)
+ release_timeout = (
+ src_config.netapp_snapmirror_release_timeout)
+ self.wait_for_snapmirror_release_svm(src_vserver,
+ dest_vserver,
+ src_client,
+ timeout=release_timeout)
+
+ def wait_for_vserver_state(self, vserver_name, client, state=None,
+ operational_state=None, subtype=None,
+ timeout=300):
+ interval = 10
+ retries = (timeout / interval or 1)
+
+ expected = {}
+ if state:
+ expected['state'] = state
+ if operational_state:
+ expected['operational_state'] = operational_state
+ if subtype:
+ expected['subtype'] = subtype
+
+ @utils.retry(exception.VserverNotReady, interval=interval,
+ retries=retries, backoff_rate=1)
+ def wait_for_state():
+ vserver_info = client.get_vserver_info(vserver_name)
+ if not all(item in vserver_info.items() for
+ item in expected.items()):
+ raise exception.VserverNotReady(vserver=vserver_name)
+ try:
+ wait_for_state()
+ except exception.VserverNotReady:
+ msg = _("Vserver %s did not reach the expected state. Retries "
+ "exhausted. Aborting.") % vserver_name
+ raise exception.NetAppException(message=msg)
+
+ def wait_for_snapmirror_release_svm(self, source_vserver, dest_vserver,
+ src_client, timeout=300):
+ interval = 10
+ retries = (timeout / interval or 1)
+
+ @utils.retry(exception.NetAppException, interval=interval,
+ retries=retries, backoff_rate=1)
+ def release_snapmirror():
+ snapmirrors = src_client.get_snapmirror_destinations_svm(
+ source_vserver=source_vserver, dest_vserver=dest_vserver)
+ if not snapmirrors:
+ LOG.debug("No snapmirrors to be released in source location.")
+ else:
+ try:
+ src_client.release_snapmirror_svm(source_vserver,
+ dest_vserver)
+ except netapp_api.NaApiError as e:
+ if (e.code == netapp_api.EOBJECTNOTFOUND or
+ e.code == netapp_api.ESOURCE_IS_DIFFERENT or
+ "(entry doesn't exist)" in e.message):
+ LOG.debug('Snapmirror relationship does not exists '
+ 'anymore.')
+
+ msg = _('Snapmirror release sent to source vserver. We will '
+ 'wait for it to be released.')
+ raise exception.NetAppException(vserver=msg)
+
+ try:
+ release_snapmirror()
+ except exception.NetAppException:
+ msg = _("Unable to release the snapmirror from source vserver %s. "
+ "Retries exhausted. Aborting") % source_vserver
+ raise exception.NetAppException(message=msg)
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
index 77cf4b1349..f7a9286c0e 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
@@ -287,9 +287,38 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)
- def choose_share_server_compatible_with_share(self, context,
- share_servers, share,
- snapshot=None,
+ def share_server_migration_check_compatibility(
+ self, context, share_server, dest_host, old_share_network,
+ new_share_network, shares_request_spec):
+
+ return self.library.share_server_migration_check_compatibility(
+ context, share_server, dest_host, old_share_network,
+ new_share_network, shares_request_spec)
+
+ def share_server_migration_start(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ self.library.share_server_migration_start(
+ context, src_share_server, dest_share_server, shares, snapshots)
+
+ def share_server_migration_continue(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ return self.library.share_server_migration_continue(
+ context, src_share_server, dest_share_server, shares, snapshots)
+
+ def share_server_migration_complete(self, context, src_share_server,
+ dest_share_server, shares, snapshots,
+ new_network_info):
+ return self.library.share_server_migration_complete(
+ context, src_share_server, dest_share_server, shares, snapshots,
+ new_network_info)
+
+ def share_server_migration_cancel(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ self.library.share_server_migration_cancel(
+ context, src_share_server, dest_share_server, shares, snapshots)
+
+ def choose_share_server_compatible_with_share(self, context, share_servers,
+ share, snapshot=None,
share_group=None):
return self.library.choose_share_server_compatible_with_share(
context, share_servers, share, snapshot=snapshot,
@@ -301,3 +330,9 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
return self.library.choose_share_server_compatible_with_share_group(
context, share_servers, share_group_ref,
share_group_snapshot=share_group_snapshot)
+
+ def share_server_migration_get_progress(self, context, src_share_server,
+ dest_share_server, shares,
+ snapshots):
+ return self.library.share_server_migration_get_progress(
+ context, src_share_server, dest_share_server, shares, snapshots)
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
index 4a311cfbc2..fd12c13e91 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
@@ -284,6 +284,32 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)
+ def share_server_migration_start(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ raise NotImplementedError
+
+ def share_server_migration_continue(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ raise NotImplementedError
+
+ def share_server_migration_complete(self, context, src_share_server,
+ dest_share_server, shares, snapshots,
+ new_network_info):
+ raise NotImplementedError
+
+ def share_server_migration_cancel(self, context, src_share_server,
+ dest_share_server, shares, snapshots):
+ raise NotImplementedError
+
+ def share_server_migration_check_compatibility(
+ self, context, share_server, dest_host, old_share_network,
+ new_share_network, shares_request_spec):
+ raise NotImplementedError
+
+ def share_server_migration_get_progress(self, context, src_share_server,
+ dest_share_server):
+ raise NotImplementedError
+
def choose_share_server_compatible_with_share(self, context, share_servers,
share, snapshot=None,
share_group=None):
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
index a1304a4aec..0484a5ebb9 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
@@ -273,6 +273,10 @@ class NetAppCmodeFileStorageLibrary(object):
return self.configuration.netapp_qos_policy_group_name_template % {
'share_id': share_id.replace('-', '_')}
+ def _get_backend_snapmirror_policy_name_svm(self, share_server_id):
+ return (self.configuration.netapp_snapmirror_policy_name_svm_template
+ % {'share_server_id': share_server_id.replace('-', '_')})
+
@na_utils.trace
def _get_aggregate_space(self):
aggregates = self._find_matching_aggregates()
@@ -1155,7 +1159,8 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _create_export(self, share, share_server, vserver, vserver_client,
- clear_current_export_policy=True):
+ clear_current_export_policy=True,
+ ensure_share_already_exists=False):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
@@ -1177,7 +1182,8 @@ class NetAppCmodeFileStorageLibrary(object):
# Create the share and get a callback for generating export locations
callback = helper.create_share(
share, share_name,
- clear_current_export_policy=clear_current_export_policy)
+ clear_current_export_policy=clear_current_export_policy,
+ ensure_share_already_exists=ensure_share_already_exists)
# Generate export locations using addresses, metadata and callback
export_locations = [
@@ -1919,14 +1925,16 @@ class NetAppCmodeFileStorageLibrary(object):
if snapmirror.get('mirror-state') != 'snapmirrored':
try:
- vserver_client.resume_snapmirror(snapmirror['source-vserver'],
- snapmirror['source-volume'],
- vserver,
- share_name)
- vserver_client.resync_snapmirror(snapmirror['source-vserver'],
- snapmirror['source-volume'],
- vserver,
- share_name)
+ vserver_client.resume_snapmirror_vol(
+ snapmirror['source-vserver'],
+ snapmirror['source-volume'],
+ vserver,
+ share_name)
+ vserver_client.resync_snapmirror_vol(
+ snapmirror['source-vserver'],
+ snapmirror['source-volume'],
+ vserver,
+ share_name)
return constants.REPLICA_STATE_OUT_OF_SYNC
except netapp_api.NaApiError:
LOG.exception("Could not resync snapmirror.")
@@ -2592,7 +2600,7 @@ class NetAppCmodeFileStorageLibrary(object):
msg_args = {
'share_move_state': move_status['state']
}
- msg = _("Migration cancelation was not successful. The share "
+ msg = _("Migration cancellation was not successful. The share "
"migration state failed while transitioning from "
"%(share_move_state)s state to 'failed'. Retries "
"exhausted.") % msg_args
@@ -2842,3 +2850,32 @@ class NetAppCmodeFileStorageLibrary(object):
self.volume_rehost(share, src_vserver, dest_vserver)
# Mount the volume on the destination vserver
dest_vserver_client.mount_volume(volume_name)
+
+ def _check_capacity_compatibility(self, pools, thin_provision, size):
+ """Check if the size requested is suitable for the available pools"""
+
+ backend_free_capacity = 0.0
+
+ for pool in pools:
+ if "unknown" in (pool['free_capacity_gb'],
+ pool['total_capacity_gb']):
+ return False
+ reserved = float(pool['reserved_percentage']) / 100
+
+ total_pool_free = math.floor(
+ pool['free_capacity_gb'] -
+ pool['total_capacity_gb'] * reserved)
+
+ if thin_provision:
+ # If thin provision is enabled it's necessary recalculate the
+ # total_pool_free considering the max over subscription ratio
+ # for each pool. After summing the free space for each pool we
+ # have the total backend free capacity to compare with the
+ # requested size.
+ if pool['max_over_subscription_ratio'] >= 1:
+ total_pool_free = math.floor(
+ total_pool_free * pool['max_over_subscription_ratio'])
+
+ backend_free_capacity += total_pool_free
+
+ return size <= backend_free_capacity
diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
index 6cd8933897..f6705476e3 100644
--- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
+++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
@@ -29,6 +29,7 @@ from oslo_utils import excutils
from manila import exception
from manila.i18n import _
+from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
@@ -72,8 +73,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
check_for_setup_error())
@na_utils.trace
- def _get_vserver(self, share_server=None, vserver_name=None):
-
+ def _get_vserver(self, share_server=None, vserver_name=None,
+ backend_name=None):
if share_server:
backend_details = share_server.get('backend_details')
vserver = backend_details.get(
@@ -86,13 +87,19 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
elif vserver_name:
vserver = vserver_name
else:
- msg = _('Share server not provided')
+ msg = _('Share server or vserver name not provided')
raise exception.InvalidInput(reason=msg)
- if not self._client.vserver_exists(vserver):
+ if backend_name:
+ vserver_client = data_motion.get_client_for_backend(
+ backend_name, vserver
+ )
+ else:
+ vserver_client = self._get_api_client(vserver)
+
+ if not vserver_client.vserver_exists(vserver):
raise exception.VserverNotFound(vserver=vserver)
- vserver_client = self._get_api_client(vserver)
return vserver, vserver_client
def _get_ems_pool_info(self):
@@ -152,7 +159,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
server_details['nfs_config'] = jsonutils.dumps(nfs_config)
try:
- self._create_vserver(vserver_name, network_info,
+ self._create_vserver(vserver_name, network_info, metadata,
nfs_config=nfs_config)
except Exception as e:
e.detail_data = {'server_details': server_details}
@@ -208,12 +215,20 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return self.configuration.netapp_vserver_name_template % server_id
@na_utils.trace
- def _create_vserver(self, vserver_name, network_info, nfs_config=None):
+ def _create_vserver(self, vserver_name, network_info, metadata=None,
+ nfs_config=None):
"""Creates Vserver with given parameters if it doesn't exist."""
if self._client.vserver_exists(vserver_name):
msg = _('Vserver %s already exists.')
raise exception.NetAppException(msg % vserver_name)
+ # NOTE(dviroel): check if this vserver will be a data protection server
+ is_dp_destination = False
+ if metadata and metadata.get('migration_destination') is True:
+ is_dp_destination = True
+ msg = _("Starting creation of a vserver with 'dp_destination' "
+ "subtype.")
+ LOG.debug(msg)
# NOTE(lseki): If there's already an ipspace created for the same VLAN
# port, reuse it. It will be named after the previously created share
@@ -224,47 +239,66 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(network_info)
- LOG.debug('Vserver %s does not exist, creating.', vserver_name)
- self._client.create_vserver(
- vserver_name,
- self.configuration.netapp_root_volume_aggregate,
- self.configuration.netapp_root_volume,
- self._find_matching_aggregates(),
- ipspace_name)
+ if is_dp_destination:
+ # Get Data ONTAP aggregate name as pool name.
+ LOG.debug('Creating a new Vserver (%s) for data protection.',
+ vserver_name)
+ self._client.create_vserver_dp_destination(
+ vserver_name,
+ self._find_matching_aggregates(),
+ ipspace_name)
+ # Set up port and broadcast domain for the current ipspace
+ self._create_port_and_broadcast_domain(ipspace_name, network_info)
+ else:
+ LOG.debug('Vserver %s does not exist, creating.', vserver_name)
+ self._client.create_vserver(
+ vserver_name,
+ self.configuration.netapp_root_volume_aggregate,
+ self.configuration.netapp_root_volume,
+ self._find_matching_aggregates(),
+ ipspace_name)
- vserver_client = self._get_api_client(vserver=vserver_name)
- security_services = None
- try:
- self._create_vserver_lifs(vserver_name,
- vserver_client,
- network_info,
- ipspace_name)
+ vserver_client = self._get_api_client(vserver=vserver_name)
- self._create_vserver_admin_lif(vserver_name,
- vserver_client,
- network_info,
- ipspace_name)
+ security_services = network_info.get('security_services')
+ try:
+ self._setup_network_for_vserver(
+ vserver_name, vserver_client, network_info, ipspace_name,
+ security_services=security_services, nfs_config=nfs_config)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error("Failed to configure Vserver.")
+ # NOTE(dviroel): At this point, the lock was already
+ # acquired by the caller of _create_vserver.
+ self._delete_vserver(vserver_name,
+ security_services=security_services,
+ needs_lock=False)
- self._create_vserver_routes(vserver_client,
- network_info)
+ def _setup_network_for_vserver(self, vserver_name, vserver_client,
+ network_info, ipspace_name,
+ enable_nfs=True, security_services=None,
+ nfs_config=None):
+ self._create_vserver_lifs(vserver_name,
+ vserver_client,
+ network_info,
+ ipspace_name)
+ self._create_vserver_admin_lif(vserver_name,
+ vserver_client,
+ network_info,
+ ipspace_name)
+
+ self._create_vserver_routes(vserver_client,
+ network_info)
+ if enable_nfs:
vserver_client.enable_nfs(
self.configuration.netapp_enabled_share_protocols,
nfs_config=nfs_config)
- security_services = network_info.get('security_services')
- if security_services:
- self._client.setup_security_services(security_services,
- vserver_client,
- vserver_name)
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error("Failed to configure Vserver.")
- # NOTE(dviroel): At this point, the lock was already acquired
- # by the caller of _create_vserver.
- self._delete_vserver(vserver_name,
- security_services=security_services,
- needs_lock=False)
+ if security_services:
+ self._client.setup_security_services(security_services,
+ vserver_client,
+ vserver_name)
def _get_valid_ipspace_name(self, network_id):
"""Get IPspace name according to network id."""
@@ -376,6 +410,21 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
ip_address, netmask, vlan, node_name, port, vserver_name,
lif_name, ipspace_name, mtu)
+ @na_utils.trace
+ def _create_port_and_broadcast_domain(self, ipspace_name, network_info):
+ nodes = self._client.list_cluster_nodes()
+ node_network_info = zip(nodes, network_info['network_allocations'])
+
+ for node_name, network_allocation in node_network_info:
+
+ port = self._get_node_data_port(node_name)
+ vlan = network_allocation['segmentation_id']
+ network_mtu = network_allocation.get('mtu')
+ mtu = network_mtu or DEFAULT_MTU
+
+ self._client.create_port_and_broadcast_domain(
+ node_name, port, vlan, mtu, ipspace_name)
+
@na_utils.trace
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
@@ -415,6 +464,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
vserver_client = self._get_api_client(vserver=vserver)
network_interfaces = vserver_client.get_network_interfaces()
+ snapmirror_policies = self._client.get_snapmirror_policies(vserver)
interfaces_on_vlans = []
vlans = []
@@ -430,6 +480,11 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
vlan_id = None
def _delete_vserver_without_lock():
+ # NOTE(dviroel): always delete all policies before deleting the
+ # vserver
+ for policy in snapmirror_policies:
+ vserver_client.delete_snapmirror_policy(policy)
+
# NOTE(dviroel): Attempt to delete all vserver peering
# created by replication
self._delete_vserver_peers(vserver)
@@ -437,13 +492,17 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
self._client.delete_vserver(vserver,
vserver_client,
security_services=security_services)
-
+ ipspace_deleted = False
if (ipspace_name and ipspace_name not in CLUSTER_IPSPACES
and not self._client.ipspace_has_data_vservers(
ipspace_name)):
self._client.delete_ipspace(ipspace_name)
+ ipspace_deleted = True
- self._delete_vserver_vlans(interfaces_on_vlans)
+ if not ipspace_name or ipspace_deleted:
+ # NOTE(dviroel): only delete vlans if they are not being used
+ # by any ipspaces and data vservers.
+ self._delete_vserver_vlans(interfaces_on_vlans)
@utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)
def _delete_vserver_with_lock():
@@ -592,8 +651,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
def _get_snapmirrors(self, vserver, peer_vserver):
return self._client.get_snapmirrors(
- source_vserver=vserver, source_volume=None,
- destination_vserver=peer_vserver, destination_volume=None)
+ source_vserver=vserver, dest_vserver=peer_vserver)
def _get_vservers_from_replicas(self, context, replica_list, new_replica):
active_replica = self.find_active_replica(replica_list)
@@ -706,10 +764,13 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
extra_specs = share_types.get_extra_specs_from_share(share)
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)
+ # Avoid the reuse of 'dp_protection' vservers:
for share_server in share_servers:
if self._check_reuse_share_server(share_server, nfs_config,
share_group=share_group):
return share_server
+
+ # There is no compatible share server to be reused
return None
@na_utils.trace
@@ -720,6 +781,16 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
share_server['id']):
return False
+ backend_name = share_utils.extract_host(share_server['host'],
+ level='backend_name')
+ vserver_name, client = self._get_vserver(share_server,
+ backend_name=backend_name)
+ vserver_info = client.get_vserver_info(vserver_name)
+ if (vserver_info.get('operational_state') != 'running'
+ or vserver_info.get('state') != 'running'
+ or vserver_info.get('subtype') != 'default'):
+ return False
+
if self.is_nfs_config_supported:
# NOTE(felipe_rodrigues): Do not check that the share nfs_config
# matches with the group nfs_config, because the API guarantees
@@ -799,3 +870,417 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self).
manage_existing(share, driver_options,
share_server=share_server))
+
+ @na_utils.trace
+ def share_server_migration_check_compatibility(
+ self, context, source_share_server, dest_host, old_share_network,
+ new_share_network, shares_request_spec):
+
+ not_compatible = {
+ 'compatible': False,
+ 'writable': None,
+ 'nondisruptive': None,
+ 'preserve_snapshots': None,
+ 'migration_cancel': None,
+ 'migration_get_progress': None,
+ 'share_network_id': None
+ }
+
+ # We need cluster creds, of course
+ if not self._have_cluster_creds:
+ msg = _("Cluster credentials have not been configured with this "
+ "share driver. Cannot perform server migration operation.")
+ LOG.error(msg)
+ return not_compatible
+
+ # Vserver will spread across aggregates in this implementation
+ if share_utils.extract_host(dest_host, level='pool') is not None:
+ msg = _("Cannot perform server migration to a specific pool. "
+ "Please choose a destination host 'host@backend' as "
+ "destination.")
+ LOG.error(msg)
+ return not_compatible
+
+ src_backend_name = share_utils.extract_host(
+ source_share_server['host'], level='backend_name')
+ src_vserver, src_client = self._get_vserver(
+ source_share_server, backend_name=src_backend_name)
+ dest_backend_name = share_utils.extract_host(dest_host,
+ level='backend_name')
+ # Block migration within the same backend.
+ if src_backend_name == dest_backend_name:
+ msg = _("Cannot perform server migration within the same backend. "
+ "Please choose a destination host different from the "
+ "source.")
+ LOG.error(msg)
+ return not_compatible
+
+ src_cluster_name = src_client.get_cluster_name()
+ # NOTE(dviroel): This call is supposed to made in the destination host
+ dest_cluster_name = self._client.get_cluster_name()
+ # Must be in different clusters too, SVM-DR restriction
+ if src_cluster_name == dest_cluster_name:
+ msg = _("Cannot perform server migration within the same cluster. "
+ "Please choose a destination host that's in a different "
+ "cluster.")
+ LOG.error(msg)
+ return not_compatible
+
+ # Check for SVM DR support
+ # NOTE(dviroel): These clients can only be used for non-tunneling
+ # requests.
+ dst_client = data_motion.get_client_for_backend(dest_backend_name,
+ vserver_name=None)
+ if (not src_client.is_svm_dr_supported()
+ or not dst_client.is_svm_dr_supported()):
+ msg = _("Cannot perform server migration because at leat one of "
+ "the backends doesn't support SVM DR.")
+ LOG.error(msg)
+ return not_compatible
+
+ # Blocking different security services for now
+ if old_share_network['id'] != new_share_network['id']:
+ new_sec_services = new_share_network.get('security_services', [])
+ old_sec_services = old_share_network.get('security_services', [])
+ if new_sec_services or old_sec_services:
+ new_sec_serv_ids = [ss['id'] for ss in new_sec_services]
+ old_sec_serv_ids = [ss['id'] for ss in old_sec_services]
+ if not set(new_sec_serv_ids) == set(old_sec_serv_ids):
+ msg = _("Cannot perform server migration for different "
+ "security services. Please choose a suitable "
+ "share network that matches the source security "
+ "service.")
+ LOG.error(msg)
+ return not_compatible
+
+ pools = self._get_pools()
+ # Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support'
+ specs_to_validate = ('netapp_flexvol_encryption',
+ 'revert_to_snapshot_support')
+ for req_spec in shares_request_spec.get('shares_req_spec', []):
+ extra_specs = req_spec.get('share_type', {}).get('extra_specs', {})
+ for spec in specs_to_validate:
+ if extra_specs.get(spec) and not pools[0][spec]:
+ msg = _("Cannot perform server migration since the "
+ "destination host doesn't support the required "
+ "extra-spec %s.") % spec
+ LOG.error(msg)
+ return not_compatible
+ # TODO(dviroel): disk_type extra-spec
+
+ # Check capacity
+ server_total_size = (shares_request_spec.get('shares_size', 0) +
+ shares_request_spec.get('snapshots_size', 0))
+ # NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
+ # configured and greater than 1, we'll consider thin provisioning
+ # enable for all shares.
+ thin_provisioning = self.configuration.max_over_subscription_ratio > 1
+ if self.configuration.netapp_server_migration_check_capacity is True:
+ if not self._check_capacity_compatibility(pools, thin_provisioning,
+ server_total_size):
+ msg = _("Cannot perform server migration because destination "
+ "host doesn't have enough free space.")
+ LOG.error(msg)
+ return not_compatible
+
+ compatibility = {
+ 'compatible': True,
+ 'writable': True,
+ 'nondisruptive': False,
+ 'preserve_snapshots': True,
+ 'share_network_id': new_share_network['id'],
+ 'migration_cancel': True,
+ 'migration_get_progress': False,
+ }
+
+ return compatibility
+
+ def share_server_migration_start(self, context, source_share_server,
+ dest_share_server, share_intances,
+ snapshot_instances):
+ """Start share server migration using SVM DR.
+
+ 1. Create vserver peering between source and destination
+ 2. Create SnapMirror
+ """
+ src_backend_name = share_utils.extract_host(
+ source_share_server['host'], level='backend_name')
+ src_vserver, src_client = self._get_vserver(
+ share_server=source_share_server, backend_name=src_backend_name)
+ src_cluster = src_client.get_cluster_name()
+
+ dest_backend_name = share_utils.extract_host(
+ dest_share_server['host'], level='backend_name')
+ dest_vserver, dest_client = self._get_vserver(
+ share_server=dest_share_server, backend_name=dest_backend_name)
+ dest_cluster = dest_client.get_cluster_name()
+
+ # 1. Check and create vserver peer if needed
+ if not self._get_vserver_peers(dest_vserver, src_vserver):
+ # Request vserver peer creation from destination to source
+ # NOTE(dviroel): vserver peering rollback is handled by
+ # '_delete_vserver' function.
+ dest_client.create_vserver_peer(
+ dest_vserver, src_vserver,
+ peer_cluster_name=src_cluster)
+
+ # Accepts the vserver peering using active replica host's
+ # client (inter-cluster only)
+ if dest_cluster != src_cluster:
+ src_client.accept_vserver_peer(src_vserver, dest_vserver)
+
+ # 2. Create SnapMirror
+ dm_session = data_motion.DataMotionSession()
+ try:
+ dm_session.create_snapmirror_svm(source_share_server,
+ dest_share_server)
+ except Exception:
+ # NOTE(dviroel): vserver peer delete will be handled on vserver
+ # teardown
+ dm_session.cancel_snapmirror_svm(source_share_server,
+ dest_share_server)
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id'],
+ }
+ msg = _('Could not initialize SnapMirror between %(src)s and '
+ '%(dest)s vservers.') % msg_args
+ raise exception.NetAppException(message=msg)
+
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id'],
+ }
+ msg = _('Starting share server migration from %(src)s to %(dest)s.')
+ LOG.info(msg, msg_args)
+
+ def _get_snapmirror_svm(self, source_share_server, dest_share_server):
+ dm_session = data_motion.DataMotionSession()
+ try:
+ snapmirrors = dm_session.get_snapmirrors_svm(
+ source_share_server, dest_share_server)
+ except netapp_api.NaApiError:
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id']
+ }
+ msg = _("Could not retrieve snapmirrors between source "
+ "%(src)s and destination %(dest)s vServers.") % msg_args
+ LOG.exception(msg)
+ raise exception.NetAppException(message=msg)
+
+ return snapmirrors
+
+ @na_utils.trace
+ def share_server_migration_continue(self, context, source_share_server,
+ dest_share_server, share_instances,
+ snapshot_instances):
+ """Continues a share server migration using SVM DR."""
+ snapmirrors = self._get_snapmirror_svm(source_share_server,
+ dest_share_server)
+ if not snapmirrors:
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id']
+ }
+ msg = _("No snapmirror relationship was found between source "
+ "%(src)s and destination %(dest)s vServers.") % msg_args
+ LOG.exception(msg)
+ raise exception.NetAppException(message=msg)
+
+ snapmirror = snapmirrors[0]
+ in_progress_status = ['preparing', 'transferring', 'finalizing']
+ mirror_state = snapmirror.get('mirror-state')
+ status = snapmirror.get('relationship-status')
+ if mirror_state != 'snapmirrored' and status in in_progress_status:
+ LOG.debug("Data transfer still in progress.")
+ return False
+ elif mirror_state == 'snapmirrored' and status == 'idle':
+ LOG.info("Source and destination vServers are now snapmirrored.")
+ return True
+
+ msg = _("Snapmirror is not ready yet. The current mirror state is "
+ "'%(mirror_state)s' and relationship status is '%(status)s'.")
+ msg_args = {
+ 'mirror_state': mirror_state,
+ 'status': status,
+ }
+ LOG.debug(msg, msg_args)
+ return False
+
+ @na_utils.trace
+ def share_server_migration_complete(self, context, source_share_server,
+ dest_share_server, share_instances,
+ snapshot_instances, new_network_alloc):
+ """Completes share server migration using SVM DR.
+
+ 1. Do a last SnapMirror update.
+ 2. Quiesce, abort and then break the relationship.
+ 3. Stop the source vserver
+ 4. Configure network interfaces in the destination vserver
+ 5. Start the destinarion vserver
+ 6. Delete and release the snapmirror
+ 7. Build the list of export_locations for each share
+ 8. Release all resources from the source share server
+ """
+ dm_session = data_motion.DataMotionSession()
+ try:
+ # 1. Start an update to try to get a last minute transfer before we
+ # quiesce and break
+ dm_session.update_snapmirror_svm(source_share_server,
+ dest_share_server)
+ except exception.StorageCommunicationException:
+ # Ignore any errors since the current source may be unreachable
+ pass
+
+ src_backend_name = share_utils.extract_host(
+ source_share_server['host'], level='backend_name')
+ src_vserver, src_client = self._get_vserver(
+ share_server=source_share_server, backend_name=src_backend_name)
+
+ dest_backend_name = share_utils.extract_host(
+ dest_share_server['host'], level='backend_name')
+ dest_vserver, dest_client = self._get_vserver(
+ share_server=dest_share_server, backend_name=dest_backend_name)
+ try:
+ # 2. Attempt to quiesce, abort and then break SnapMirror
+ dm_session.quiesce_and_break_snapmirror_svm(source_share_server,
+ dest_share_server)
+ # NOTE(dviroel): Lets wait until the destination vserver be
+ # promoted to 'default' and state 'running', before starting
+ # shutting down the source
+ dm_session.wait_for_vserver_state(
+ dest_vserver, dest_client, subtype='default',
+ state='running', operational_state='stopped',
+ timeout=(self.configuration.
+ netapp_server_migration_state_change_timeout))
+
+ # 3. Stop source vserver
+ src_client.stop_vserver(src_vserver)
+
+ # 4. Setup network configuration
+ ipspace_name = dest_client.get_vserver_ipspace(dest_vserver)
+
+ # NOTE(dviroel): Security service and NFS configuration should be
+ # handled by SVM DR, so no changes will be made here.
+ vlan = new_network_alloc['segmentation_id']
+
+ @utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
+ def setup_network_for_destination_vserver():
+ self._setup_network_for_vserver(
+ dest_vserver, dest_client, new_network_alloc, ipspace_name,
+ enable_nfs=False,
+ security_services=None)
+
+ setup_network_for_destination_vserver()
+
+ # 5. Start the destination.
+ dest_client.start_vserver(dest_vserver)
+
+ except Exception:
+ # Try to recover source vserver
+ try:
+ src_client.start_vserver(src_vserver)
+ except Exception:
+ LOG.warning("Unable to recover source share server after a "
+ "migration failure.")
+ # Destroy any snapmirror and make destination vserver to have its
+ # subtype set to 'default'
+ dm_session.cancel_snapmirror_svm(source_share_server,
+ dest_share_server)
+ # Rollback resources transferred to the destination
+ for instance in share_instances:
+ self._delete_share(instance, dest_client, remove_export=False)
+
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id'],
+ }
+ msg = _('Could not complete the migration between %(src)s and '
+ '%(dest)s vservers.') % msg_args
+ raise exception.NetAppException(message=msg)
+
+ # 6. Delete/release snapmirror
+ dm_session.delete_snapmirror_svm(source_share_server,
+ dest_share_server)
+
+ # 7. Build a dict with shares/snapshot location updates
+ # NOTE(dviroel): For SVM DR, the share names aren't modified, only the
+ # export_locations are updated due to network changes.
+ share_updates = {}
+ for instance in share_instances:
+ # Get the volume to find out the associated aggregate
+ try:
+ share_name = self._get_backend_share_name(instance['id'])
+ volume = dest_client.get_volume(share_name)
+ except Exception:
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id'],
+ }
+ msg = _('Could not complete the migration between %(src)s and '
+ '%(dest)s vservers. One of the shares was not found '
+ 'in the destination vserver.') % msg_args
+ raise exception.NetAppException(message=msg)
+
+ export_locations = self._create_export(
+ instance, dest_share_server, dest_vserver, dest_client,
+ clear_current_export_policy=False,
+ ensure_share_already_exists=True)
+
+ share_updates.update({
+ instance['id']: {
+ 'export_locations': export_locations,
+ 'pool_name': volume.get('aggregate')
+ }})
+
+ # NOTE(dviroel): Nothing to update in snapshot instances since the
+ # provider location didn't change.
+
+ # 8. Release source share resources
+ for instance in share_instances:
+ self._delete_share(instance, src_client, remove_export=True)
+
+ # NOTE(dviroel): source share server deletion must be triggered by
+ # the manager after finishing the migration
+ LOG.info('Share server migration completed.')
+ return {
+ 'share_updates': share_updates,
+ }
+
+ def share_server_migration_cancel(self, context, source_share_server,
+ dest_share_server, shares, snapshots):
+ """Cancel a share server migration that is using SVM DR."""
+
+ dm_session = data_motion.DataMotionSession()
+ dest_backend_name = share_utils.extract_host(dest_share_server['host'],
+ level='backend_name')
+ dest_vserver, dest_client = self._get_vserver(
+ share_server=dest_share_server, backend_name=dest_backend_name)
+
+ try:
+ snapmirrors = self._get_snapmirror_svm(source_share_server,
+ dest_share_server)
+ if snapmirrors:
+ dm_session.cancel_snapmirror_svm(source_share_server,
+ dest_share_server)
+ # Do a simple volume cleanup in the destination vserver
+ for instance in shares:
+ self._delete_share(instance, dest_client, remove_export=False)
+
+ except Exception:
+ msg_args = {
+ 'src': source_share_server['id'],
+ 'dest': dest_share_server['id'],
+ }
+ msg = _('Unable to cancel SnapMirror relationship between %(src)s '
+ 'and %(dest)s vservers.') % msg_args
+ raise exception.NetAppException(message=msg)
+
+ LOG.info('Share server migration was cancelled.')
+
+ def share_server_migration_get_progress(self, context, src_share_server,
+ dest_share_server, shares,
+ snapshots):
+ # TODO(dviroel): get snapmirror info to infer the progress
+ return {'total_progress': 0}
diff --git a/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py b/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py
index 40beca06c4..5e787e15b6 100644
--- a/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py
+++ b/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py
@@ -29,9 +29,15 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def create_share(self, share, share_name,
- clear_current_export_policy=True):
+ clear_current_export_policy=True,
+ ensure_share_already_exists=False):
"""Creates CIFS share on Data ONTAP Vserver."""
- self._client.create_cifs_share(share_name)
+ if not ensure_share_already_exists:
+ self._client.create_cifs_share(share_name)
+ elif not self._client.cifs_share_exists(share_name):
+ msg = _("The expected CIFS share %(share_name)s was not found.")
+ msg_args = {'share_name': share_name}
+ raise exception.NetAppException(msg % msg_args)
if clear_current_export_policy:
self._client.remove_cifs_share_access(share_name, 'Everyone')
diff --git a/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py b/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py
index 4841969bb1..a17cbbddc4 100644
--- a/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py
+++ b/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py
@@ -41,8 +41,12 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
@na_utils.trace
def create_share(self, share, share_name,
- clear_current_export_policy=True):
+ clear_current_export_policy=True,
+ ensure_share_already_exists=False):
"""Creates NFS share."""
+ # TODO(dviroel): Ensure that nfs share already exists if
+ # ensure_share_already_exists is True. Although, no conflicts are
+ # expected here since there is no create share operation being made.
if clear_current_export_policy:
self._client.clear_nfs_export_policy_for_volume(share_name)
self._ensure_export_policy(share, share_name)
diff --git a/manila/share/drivers/netapp/options.py b/manila/share/drivers/netapp/options.py
index d3e3d2d540..d607d9abbf 100644
--- a/manila/share/drivers/netapp/options.py
+++ b/manila/share/drivers/netapp/options.py
@@ -111,7 +111,11 @@ netapp_provisioning_opts = [
"nothing will be changed during startup. This will not "
"affect new shares, which will have their snapshot "
"directory always visible, unless toggled by the share "
- "type extra spec 'netapp:hide_snapdir'."), ]
+ "type extra spec 'netapp:hide_snapdir'."),
+ cfg.StrOpt('netapp_snapmirror_policy_name_svm_template',
+ help='NetApp SnapMirror policy name template for Storage '
+ 'Virtual Machines (Vservers).',
+ default='snapmirror_policy_%(share_server_id)s'), ]
netapp_cluster_opts = [
cfg.StrOpt('netapp_vserver',
@@ -145,6 +149,11 @@ netapp_data_motion_opts = [
help='The maximum time in seconds to wait for existing '
'snapmirror transfers to complete before aborting when '
'promoting a replica.'),
+ cfg.IntOpt('netapp_snapmirror_release_timeout',
+ min=0,
+ default=3600, # One Hour
+ help='The maximum time in seconds to wait for a snapmirror '
+ 'release when breaking snapmirror relationships.'),
cfg.IntOpt('netapp_volume_move_cutover_timeout',
min=0,
default=3600, # One Hour,
@@ -162,7 +171,27 @@ netapp_data_motion_opts = [
default=3600, # One Hour,
help='The maximum time in seconds that migration cancel '
'waits for all migration operations be completely '
- 'aborted.'), ]
+ 'aborted.'),
+ cfg.IntOpt('netapp_server_migration_state_change_timeout',
+ min=0,
+ default=3600, # One hour,
+ help='The maximum time in seconds that a share server '
+ 'migration waits for a vserver to change its internal '
+ 'states.'),
+ cfg.BoolOpt('netapp_server_migration_check_capacity',
+ default=True,
+ help='Specify if the capacity check must be made by the '
+ 'driver while performing a share server migration. '
+ 'If enabled, the driver will validate if the destination '
+ 'backend can hold all shares and snapshots capacities '
+ 'from the source share server.'),
+ cfg.IntOpt('netapp_server_migration_state_change_timeout',
+ min=0,
+ default=3600, # One hour,
+ help='The maximum time in seconds that a share server '
+ 'migration waits for a vserver to change its internal '
+ 'states.'),
+]
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)
diff --git a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
index b86e6d43a9..045bca58c5 100644
--- a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
+++ b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py
@@ -74,6 +74,17 @@ DELETED_EXPORT_POLICIES = {
}
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_MAX_THROUGHPUT = '5000B/s'
+VSERVER_TYPE_DEFAULT = 'default'
+VSERVER_TYPE_DP_DEST = 'dp_destination'
+VSERVER_OP_STATE_RUNNING = 'running'
+VSERVER_STATE = 'running'
+VSERVER_INFO = {
+ 'name': VSERVER_NAME,
+ 'subtype': VSERVER_TYPE_DEFAULT,
+ 'operational_state': VSERVER_OP_STATE_RUNNING,
+ 'state': VSERVER_STATE,
+}
+SNAPMIRROR_POLICY_NAME = 'fake_snapmirror_policy'
USER_NAME = 'fake_user'
@@ -198,6 +209,20 @@ VSERVER_GET_ITER_RESPONSE = etree.XML("""
""" % {'fake_vserver': VSERVER_NAME})
+VSERVER_GET_ITER_RESPONSE_INFO = etree.XML("""
+
+
+
+ %(operational_state)s
+ %(state)s
+ %(name)s
+ %(subtype)s
+
+
+ 1
+
+""" % VSERVER_INFO)
+
VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML("""
@@ -1702,6 +1727,18 @@ CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML("""
""" % {'volume': SHARE_NAME})
+CIFS_SHARE_GET_ITER_RESPONSE = etree.XML("""
+
+
+
+ %(share_name)s
+ fake_vserver
+
+
+ 1
+
+""" % {'share_name': SHARE_NAME})
+
NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20')
NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML("""
@@ -2373,6 +2410,7 @@ SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
fake_destination_volume
true
snapmirrored
+ idle
daily
fake_source_vserver
fake_source_volume
@@ -2382,6 +2420,35 @@ SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
""")
+SNAPMIRROR_GET_ITER_FILTERED_RESPONSE_2 = etree.XML("""
+
+
+
+ fake_source_vserver
+ fake_destination_vserver
+ snapmirrored
+ idle
+
+
+ 1
+
+""")
+
+SNAPMIRROR_GET_DESTINATIONS_ITER_FILTERED_RESPONSE = etree.XML("""
+
+
+
+ fake_destination_vserver:
+ fake_destination_vserver
+ fake_relationship_id
+ fake_source_vserver:
+ fake_source_vserver
+
+
+ 1
+
+""")
+
SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
succeeded
@@ -2605,6 +2672,20 @@ QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML("""
'max_throughput': QOS_MAX_THROUGHPUT,
})
+SNAPMIRROR_POLICY_GET_ITER_RESPONSE = etree.XML("""
+
+
+
+ %(policy_name)s
+ %(vserver_name)s
+
+
+ 1
+ """ % {
+ 'policy_name': SNAPMIRROR_POLICY_NAME,
+ 'vserver_name': VSERVER_NAME,
+})
+
FAKE_VOL_XML = """
open123
online
diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
index c85bb39c50..ea1a3c5f3d 100644
--- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
+++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
@@ -472,6 +472,31 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.call('vserver-create', vserver_create_args),
mock.call('vserver-modify', vserver_modify_args)])
+ def test_create_vserver_dp_destination(self):
+
+ self.client.features.add_feature('IPSPACES')
+ self.mock_object(self.client, 'send_request')
+
+ vserver_create_args = {
+ 'vserver-name': fake.VSERVER_NAME,
+ 'ipspace': fake.IPSPACE_NAME,
+ 'vserver-subtype': fake.VSERVER_TYPE_DP_DEST,
+ }
+ vserver_modify_args = {
+ 'aggr-list': [{'aggr-name': aggr_name} for aggr_name
+ in fake.SHARE_AGGREGATE_NAMES],
+ 'vserver-name': fake.VSERVER_NAME
+ }
+
+ self.client.create_vserver_dp_destination(
+ fake.VSERVER_NAME,
+ fake.SHARE_AGGREGATE_NAMES,
+ fake.IPSPACE_NAME)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('vserver-create', vserver_create_args),
+ mock.call('vserver-modify', vserver_modify_args)])
+
def test_create_vserver_ipspaces_not_supported(self):
self.assertRaises(exception.NetAppException,
@@ -680,8 +705,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_no_volumes(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=True))
+ 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -707,8 +732,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=True))
+ 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -734,8 +759,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume_already_offline(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=True))
+ 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -765,8 +790,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume_api_error(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=True))
+ 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -787,8 +812,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_multiple_volumes(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=True))
+ 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -804,8 +829,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_not_found(self):
self.mock_object(self.client,
- 'vserver_exists',
- mock.Mock(return_value=False))
+ 'get_vserver_info',
+ mock.Mock(return_value=None))
self.client.delete_vserver(fake.VSERVER_NAME,
self.vserver_client)
@@ -5771,7 +5796,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_create_snapmirror(self, schedule, policy):
self.mock_object(self.client, 'send_request')
- self.client.create_snapmirror(
+ self.client.create_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
schedule=schedule, policy=policy)
@@ -5795,7 +5820,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
code=netapp_api.ERELATION_EXISTS))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.client.create_snapmirror(
+ self.client.create_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -5814,11 +5839,29 @@ class NetAppClientCmodeTestCase(test.TestCase):
code=0))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror,
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.create_snapmirror_vol,
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
self.assertTrue(self.client.send_request.called)
+ def test_create_snapmirror_svm(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_snapmirror_svm(fake.SM_SOURCE_VSERVER,
+ fake.SM_DEST_VSERVER,
+ max_transfer_rate='fake_xfer_rate')
+
+ snapmirror_create_args = {
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'relationship-type': 'data_protection',
+ 'identity-preserve': 'true',
+ 'max-transfer-rate': 'fake_xfer_rate'
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-create', snapmirror_create_args)])
+
@ddt.data(
{
'source_snapshot': 'fake_snapshot',
@@ -5837,7 +5880,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
'send_request',
mock.Mock(return_value=api_response))
- result = self.client.initialize_snapmirror(
+ result = self.client.initialize_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
source_snapshot=source_snapshot,
@@ -5865,12 +5908,38 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
self.assertEqual(expected, result)
+ def test_initialize_snapmirror_svm(self):
+
+ api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT)
+ self.mock_object(self.client,
+ 'send_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.initialize_snapmirror_svm(fake.SM_SOURCE_VSERVER,
+ fake.SM_DEST_VSERVER)
+
+ snapmirror_initialize_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-initialize', snapmirror_initialize_args)])
+
+ expected = {
+ 'operation-id': None,
+ 'status': 'succeeded',
+ 'jobid': None,
+ 'error-code': None,
+ 'error-message': None
+ }
+ self.assertEqual(expected, result)
+
@ddt.data(True, False)
def test_release_snapmirror(self, relationship_info_only):
self.mock_object(self.client, 'send_request')
- self.client.release_snapmirror(
+ self.client.release_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
relationship_info_only=relationship_info_only)
@@ -5887,14 +5956,35 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
}
}
+
self.client.send_request.assert_has_calls([
- mock.call('snapmirror-release-iter', snapmirror_release_args)])
+ mock.call('snapmirror-release-iter', snapmirror_release_args,
+ enable_tunneling=True)])
+
+ def test_release_snapmirror_svm(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.release_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_release_args = {
+ 'query': {
+ 'snapmirror-destination-info': {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ 'relationship-info-only': 'false'
+ }
+ }
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-release-iter', snapmirror_release_args,
+ enable_tunneling=False)])
def test_quiesce_snapmirror(self):
self.mock_object(self.client, 'send_request')
- self.client.quiesce_snapmirror(
+ self.client.quiesce_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -5907,12 +5997,26 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-quiesce', snapmirror_quiesce_args)])
+ def test_quiesce_snapmirror_svm(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.quiesce_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_quiesce_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)])
+
@ddt.data(True, False)
def test_abort_snapmirror(self, clear_checkpoint):
self.mock_object(self.client, 'send_request')
- self.client.abort_snapmirror(
+ self.client.abort_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
clear_checkpoint=clear_checkpoint)
@@ -5927,12 +6031,27 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-abort', snapmirror_abort_args)])
+ def test_abort_snapmirror_svm(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.abort_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_abort_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ 'clear-checkpoint': 'false'
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-abort', snapmirror_abort_args)])
+
def test_abort_snapmirror_no_transfer_in_progress(self):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
code=netapp_api.ENOTRANSFER_IN_PROGRESS))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.client.abort_snapmirror(
+ self.client.abort_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -5950,7 +6069,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror,
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.abort_snapmirror_vol,
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -5958,7 +6078,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.mock_object(self.client, 'send_request')
- self.client.break_snapmirror(
+ self.client.break_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -5971,6 +6091,20 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-break', snapmirror_break_args)])
+ def test_break_snapmirror_svm(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.break_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_break_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-break', snapmirror_break_args)])
+
@ddt.data(
{
'schedule': 'fake_schedule',
@@ -5991,7 +6125,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.mock_object(self.client, 'send_request')
- self.client.modify_snapmirror(
+ self.client.modify_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
schedule=schedule, policy=policy, tries=tries,
@@ -6018,7 +6152,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.mock_object(self.client, 'send_request')
- self.client.update_snapmirror(
+ self.client.update_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6031,12 +6165,26 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-update', snapmirror_update_args)])
+ def test_update_snapmirror_svm(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.update_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_update_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-update', snapmirror_update_args)])
+
def test_update_snapmirror_already_transferring(self):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
code=netapp_api.ETRANSFER_IN_PROGRESS))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.client.update_snapmirror(
+ self.client.update_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6054,7 +6202,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
code=netapp_api.EANOTHER_OP_ACTIVE))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.client.update_snapmirror(
+ self.client.update_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6071,7 +6219,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror,
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.update_snapmirror_vol,
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6079,7 +6228,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.mock_object(self.client, 'send_request')
- self.client.delete_snapmirror(
+ self.client.delete_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6096,6 +6245,24 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-destroy-iter', snapmirror_delete_args)])
+ def test_delete_snapmirror_svm(self):
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.delete_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_delete_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ }
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)])
+
def test__get_snapmirrors(self):
api_response = netapp_api.NaElement(fake.SNAPMIRROR_GET_ITER_RESPONSE)
@@ -6114,8 +6281,10 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
result = self.client._get_snapmirrors(
- fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
- fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ source_vserver=fake.SM_SOURCE_VSERVER,
+ source_volume=fake.SM_SOURCE_VOLUME,
+ dest_vserver=fake.SM_DEST_VSERVER,
+ dest_volume=fake.SM_DEST_VOLUME,
desired_attributes=desired_attributes)
snapmirror_get_iter_args = {
@@ -6165,11 +6334,14 @@ class NetAppClientCmodeTestCase(test.TestCase):
desired_attributes = ['source-vserver', 'source-volume',
'destination-vserver', 'destination-volume',
- 'is-healthy', 'mirror-state', 'schedule']
+ 'is-healthy', 'mirror-state', 'schedule',
+ 'relationship-status']
result = self.client.get_snapmirrors(
- fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
- fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
+ source_vserver=fake.SM_SOURCE_VSERVER,
+ dest_vserver=fake.SM_DEST_VSERVER,
+ source_volume=fake.SM_SOURCE_VOLUME,
+ dest_volume=fake.SM_DEST_VOLUME,
desired_attributes=desired_attributes)
snapmirror_get_iter_args = {
@@ -6190,6 +6362,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
'is-healthy': None,
'mirror-state': None,
'schedule': None,
+ 'relationship-status': None,
},
},
}
@@ -6202,16 +6375,97 @@ class NetAppClientCmodeTestCase(test.TestCase):
'is-healthy': 'true',
'mirror-state': 'snapmirrored',
'schedule': 'daily',
+ 'relationship-status': 'idle'
}]
self.client.send_iter_request.assert_has_calls([
mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
self.assertEqual(expected, result)
+ def test_get_snapmirrors_svm(self):
+
+ api_response = netapp_api.NaElement(
+ fake.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE_2)
+ self.mock_object(self.client,
+ 'send_iter_request',
+ mock.Mock(return_value=api_response))
+
+ desired_attributes = ['source-vserver', 'destination-vserver',
+ 'relationship-status', 'mirror-state']
+
+ result = self.client.get_snapmirrors_svm(
+ source_vserver=fake.SM_SOURCE_VSERVER,
+ dest_vserver=fake.SM_DEST_VSERVER,
+ desired_attributes=desired_attributes)
+
+ snapmirror_get_iter_args = {
+ 'query': {
+ 'snapmirror-info': {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ },
+ },
+ 'desired-attributes': {
+ 'snapmirror-info': {
+ 'source-vserver': None,
+ 'destination-vserver': None,
+ 'relationship-status': None,
+ 'mirror-state': None,
+ },
+ },
+ }
+
+ expected = [{
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'relationship-status': 'idle',
+ 'mirror-state': 'snapmirrored',
+ }]
+
+ self.client.send_iter_request.assert_has_calls([
+ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
+ self.assertEqual(expected, result)
+
+ @ddt.data(fake.SNAPMIRROR_GET_DESTINATIONS_ITER_FILTERED_RESPONSE,
+ fake.NO_RECORDS_RESPONSE)
+ def test_get_snapmirror_destinations_svm(self, api_response):
+ self.mock_object(
+ self.client, 'send_iter_request',
+ mock.Mock(return_value=netapp_api.NaElement(api_response)))
+
+ result = self.client.get_snapmirror_destinations_svm(
+ source_vserver=fake.SM_SOURCE_VSERVER,
+ dest_vserver=fake.SM_DEST_VSERVER)
+
+ snapmirror_get_iter_args = {
+ 'query': {
+ 'snapmirror-destination-info': {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ },
+ },
+ }
+
+ if api_response == fake.NO_RECORDS_RESPONSE:
+ expected = []
+ else:
+ expected = [{
+ 'source-vserver': fake.SM_SOURCE_VSERVER,
+ 'destination-vserver': fake.SM_DEST_VSERVER,
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ 'relationship-id': 'fake_relationship_id',
+ }]
+
+ self.client.send_iter_request.assert_has_calls([
+ mock.call('snapmirror-get-destination-iter',
+ snapmirror_get_iter_args)])
+ self.assertEqual(expected, result)
+
def test_resume_snapmirror(self):
self.mock_object(self.client, 'send_request')
- self.client.resume_snapmirror(
+ self.client.resume_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6224,12 +6478,25 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-resume', snapmirror_resume_args)])
+ def test_resume_snapmirror_svm(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.resume_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_resume_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-resume', snapmirror_resume_args)])
+
def test_resume_snapmirror_not_quiesed(self):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(
code=netapp_api.ERELATION_NOT_QUIESCED))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.client.resume_snapmirror(
+ self.client.resume_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6246,14 +6513,15 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0))
self.mock_object(self.client, 'send_request', mock_send_req)
- self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror,
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.resume_snapmirror_vol,
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
def test_resync_snapmirror(self):
self.mock_object(self.client, 'send_request')
- self.client.resync_snapmirror(
+ self.client.resync_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
@@ -6266,6 +6534,19 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-resync', snapmirror_resync_args)])
+ def test_resync_snapmirror_svm(self):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.resync_snapmirror_svm(
+ fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)
+
+ snapmirror_resync_args = {
+ 'source-location': fake.SM_SOURCE_VSERVER + ':',
+ 'destination-location': fake.SM_DEST_VSERVER + ':',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('snapmirror-resync', snapmirror_resync_args)])
+
@ddt.data('source', 'destination', None)
def test_volume_has_snapmirror_relationships(self, snapmirror_rel_type):
"""Snapmirror relationships can be both ways."""
@@ -6282,8 +6563,10 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
expected_get_snapmirrors_call_count = 2
expected_get_snapmirrors_calls = [
- mock.call(vol['owning-vserver-name'], vol['name'], None, None),
- mock.call(None, None, vol['owning-vserver-name'], vol['name']),
+ mock.call(source_vserver=vol['owning-vserver-name'],
+ source_volume=vol['name']),
+ mock.call(dest_vserver=vol['owning-vserver-name'],
+ dest_volume=vol['name']),
]
if snapmirror_rel_type is None:
side_effect = ([], [])
@@ -6315,7 +6598,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
vol = fake.FAKE_MANAGE_VOLUME
expected_get_snapmirrors_calls = [
- mock.call(vol['owning-vserver-name'], vol['name'], None, None),
+ mock.call(source_vserver=vol['owning-vserver-name'],
+ source_volume=vol['name']),
]
mock_get_snapmirrors_call = self.mock_object(
self.client, 'get_snapmirrors', mock.Mock(
@@ -6878,3 +7162,207 @@ class NetAppClientCmodeTestCase(test.TestCase):
nfs_config = self.client.parse_nfs_config(parent_elem, desired_args)
self.assertDictEqual(nfs_config, expected_nfs)
+
+ @ddt.data(fake.NO_RECORDS_RESPONSE,
+ fake.VSERVER_GET_ITER_RESPONSE_INFO)
+ def test_get_vserver_info(self, api_response):
+ self.mock_object(self.client, 'send_iter_request',
+ mock.Mock(
+ return_value=netapp_api.NaElement(
+ api_response)))
+
+ result = self.client.get_vserver_info(fake.VSERVER_NAME)
+
+ expected_api_args = {
+ 'query': {
+ 'vserver-info': {
+ 'vserver-name': fake.VSERVER_NAME,
+ },
+ },
+ 'desired-attributes': {
+ 'vserver-info': {
+ 'vserver-name': None,
+ 'vserver-subtype': None,
+ 'state': None,
+ 'operational-state': None,
+ },
+ },
+ }
+ self.client.send_iter_request.assert_called_once_with(
+ 'vserver-get-iter', expected_api_args)
+ if api_response == fake.NO_RECORDS_RESPONSE:
+ self.assertIsNone(result)
+ else:
+ self.assertDictMatch(fake.VSERVER_INFO, result)
+
+ @ddt.data({'discard_network': True, 'preserve_snapshots': False},
+ {'discard_network': False, 'preserve_snapshots': True})
+ @ddt.unpack
+ def test_create_snapmirror_policy(self, discard_network,
+ preserve_snapshots):
+ api_response = netapp_api.NaElement(fake.PASSED_RESPONSE)
+ self.mock_object(self.client, 'send_request',
+ mock.Mock(return_value=api_response))
+
+ self.client.create_snapmirror_policy(
+ fake.SNAPMIRROR_POLICY_NAME, discard_network_info=discard_network,
+ preserve_snapshots=preserve_snapshots)
+
+ expected_create_api_args = {
+ 'policy-name': fake.SNAPMIRROR_POLICY_NAME,
+ 'type': 'async_mirror',
+ }
+ if discard_network:
+ expected_create_api_args['discard-configs'] = {
+ 'svmdr-config-obj': 'network'
+ }
+ expected_calls = [
+ mock.call('snapmirror-policy-create', expected_create_api_args)
+ ]
+
+ if preserve_snapshots:
+ expected_add_rules = {
+ 'policy-name': fake.SNAPMIRROR_POLICY_NAME,
+ 'snapmirror-label': 'all_source_snapshots',
+ 'keep': '1',
+ 'preserve': 'false'
+ }
+ expected_calls.append(mock.call('snapmirror-policy-add-rule',
+ expected_add_rules))
+
+ self.client.send_request.assert_has_calls(expected_calls)
+
+ def test_delete_snapmirror_policy(self):
+ api_response = netapp_api.NaElement(fake.PASSED_RESPONSE)
+ self.mock_object(self.client, 'send_request',
+ mock.Mock(return_value=api_response))
+
+ self.client.delete_snapmirror_policy(fake.SNAPMIRROR_POLICY_NAME)
+
+ expected_api_args = {
+ 'policy-name': fake.SNAPMIRROR_POLICY_NAME,
+ }
+
+ self.client.send_request.assert_called_once_with(
+ 'snapmirror-policy-delete', expected_api_args)
+
+ def test_delete_snapmirror_policy_not_found(self):
+ self.mock_object(self.client, 'send_request',
+ self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND))
+
+ self.client.delete_snapmirror_policy(fake.SNAPMIRROR_POLICY_NAME)
+
+ expected_api_args = {
+ 'policy-name': fake.SNAPMIRROR_POLICY_NAME,
+ }
+
+ self.client.send_request.assert_called_once_with(
+ 'snapmirror-policy-delete', expected_api_args)
+
+ def test_get_snapmirror_policies(self):
+ api_response = netapp_api.NaElement(
+ fake.SNAPMIRROR_POLICY_GET_ITER_RESPONSE)
+ self.mock_object(self.client, 'send_iter_request',
+ mock.Mock(return_value=api_response))
+
+ result = self.client.get_snapmirror_policies(fake.VSERVER_NAME)
+
+ expected_api_args = {
+ 'query': {
+ 'snapmirror-policy-info': {
+ 'vserver-name': fake.VSERVER_NAME,
+ },
+ },
+ 'desired-attributes': {
+ 'snapmirror-policy-info': {
+ 'policy-name': None,
+ },
+ },
+ }
+
+ self.client.send_iter_request.assert_called_once_with(
+ 'snapmirror-policy-get-iter', expected_api_args)
+ self.assertEqual([fake.SNAPMIRROR_POLICY_NAME], result)
+
+ @ddt.data(True, False, None)
+ def test_start_vserver(self, force):
+ api_response = netapp_api.NaElement(fake.PASSED_RESPONSE)
+ self.mock_object(self.client, 'send_request',
+ mock.Mock(return_value=api_response))
+
+ self.client.start_vserver(fake.VSERVER_NAME, force=force)
+
+ expected_api_args = {
+ 'vserver-name': fake.VSERVER_NAME,
+ }
+ if force is not None:
+ expected_api_args['force'] = 'true' if force is True else 'false'
+
+ self.client.send_request.assert_called_once_with(
+ 'vserver-start', expected_api_args, enable_tunneling=False)
+
+ def test_start_vserver_already_started(self):
+ self.mock_object(self.client, 'send_request',
+ self._mock_api_error(
+ code=netapp_api.EVSERVERALREADYSTARTED))
+
+ self.client.start_vserver(fake.VSERVER_NAME)
+
+ expected_api_args = {
+ 'vserver-name': fake.VSERVER_NAME,
+ }
+
+ self.client.send_request.assert_called_once_with(
+ 'vserver-start', expected_api_args, enable_tunneling=False)
+
+ def test_stop_vserver(self):
+ api_response = netapp_api.NaElement(fake.PASSED_RESPONSE)
+ self.mock_object(self.client, 'send_request',
+ mock.Mock(return_value=api_response))
+
+ self.client.stop_vserver(fake.VSERVER_NAME)
+
+ expected_api_args = {
+ 'vserver-name': fake.VSERVER_NAME,
+ }
+
+ self.client.send_request.assert_called_once_with(
+ 'vserver-stop', expected_api_args, enable_tunneling=False)
+
+ def test_is_svm_dr_supported(self):
+ self.client.features.add_feature('SVM_DR')
+
+ result = self.client.is_svm_dr_supported()
+
+ self.assertTrue(result)
+
+ @ddt.data({'get_iter_response': fake.CIFS_SHARE_GET_ITER_RESPONSE,
+ 'expected_result': True},
+ {'get_iter_response': fake.NO_RECORDS_RESPONSE,
+ 'expected_result': False})
+ @ddt.unpack
+ def test_cifs_share_exists(self, get_iter_response, expected_result):
+ api_response = netapp_api.NaElement(get_iter_response)
+ self.mock_object(self.client,
+ 'send_iter_request',
+ mock.Mock(return_value=api_response))
+ fake_share_path = '/%s' % fake.SHARE_NAME
+
+ result = self.client.cifs_share_exists(fake.SHARE_NAME)
+
+ cifs_share_get_iter_args = {
+ 'query': {
+ 'cifs-share': {
+ 'share-name': fake.SHARE_NAME,
+ 'path': fake_share_path,
+ },
+ },
+ 'desired-attributes': {
+ 'cifs-share': {
+ 'share-name': None
+ }
+ },
+ }
+ self.assertEqual(expected_result, result)
+ self.client.send_iter_request.assert_called_once_with(
+ 'cifs-share-get-iter', cifs_share_get_iter_args)
diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
index 872b4f6889..9e58818298 100644
--- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
+++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
@@ -151,6 +151,8 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_src_share = copy.deepcopy(fake.SHARE)
self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER)
self.source_vserver = 'source_vserver'
+ self.source_backend_name = (
+ self.fake_src_share_server['host'].split('@')[1])
self.fake_src_share_server['backend_details']['vserver_name'] = (
self.source_vserver
)
@@ -158,8 +160,10 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_src_share['id'] = 'c02d497a-236c-4852-812a-0d39373e312a'
self.fake_src_vol_name = 'share_c02d497a_236c_4852_812a_0d39373e312a'
self.fake_dest_share = copy.deepcopy(fake.SHARE)
- self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER)
+ self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER_2)
self.dest_vserver = 'dest_vserver'
+ self.dest_backend_name = (
+ self.fake_dest_share_server['host'].split('@')[1])
self.fake_dest_share_server['backend_details']['vserver_name'] = (
self.dest_vserver
)
@@ -172,6 +176,25 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[self.mock_dest_client,
self.mock_src_client]))
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(side_effect=[
+ (self.mock_src_client, self.source_vserver),
+ (self.mock_dest_client, self.dest_vserver)]))
+
+ def test_get_client_and_vserver_name(self):
+ dm_session = data_motion.DataMotionSession()
+ client = mock.Mock()
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(return_value=client))
+
+ result = dm_session.get_client_and_vserver_name(fake.SHARE_SERVER)
+ expected = (client,
+ fake.SHARE_SERVER['backend_details']['vserver_name'])
+
+ self.assertEqual(expected, result)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ fake.BACKEND_NAME, vserver_name=fake.VSERVER1
+ )
def test_create_snapmirror(self):
mock_dest_client = mock.Mock()
@@ -181,15 +204,50 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.create_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.create_snapmirror.assert_called_once_with(
+ mock_dest_client.create_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, schedule='hourly'
)
- mock_dest_client.initialize_snapmirror.assert_called_once_with(
+ mock_dest_client.initialize_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
+ def test_create_snapmirror_svm(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+ self.mock_object(self.dm_session, 'get_vserver_from_share_server',
+ mock.Mock(return_value=self.source_vserver))
+ policy_name = 'policy_' + self.dest_vserver
+ get_snapmirro_policy_name = self.mock_object(
+ self.dm_session, '_get_backend_snapmirror_policy_name_svm',
+ mock.Mock(return_value=policy_name))
+
+ self.dm_session.create_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ self.dm_session.get_client_and_vserver_name.assert_called_once_with(
+ self.fake_dest_share_server
+ )
+ self.dm_session.get_vserver_from_share_server.assert_called_once_with(
+ self.fake_src_share_server
+ )
+ get_snapmirro_policy_name.assert_called_once_with(
+ self.fake_dest_share_server['id'], self.dest_backend_name
+ )
+ mock_dest_client.create_snapmirror_policy.assert_called_once_with(
+ policy_name
+ )
+ mock_dest_client.create_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver,
+ policy=policy_name, schedule='hourly'
+ )
+ mock_dest_client.initialize_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+
def test_delete_snapmirror(self):
mock_src_client = mock.Mock()
mock_dest_client = mock.Mock()
@@ -200,26 +258,50 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- mock_src_client.release_snapmirror.assert_called_once_with(
+ mock_src_client.release_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
+ @ddt.data(True, False)
+ def test_delete_snapmirror_svm(self, call_release):
+ self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm')
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_snapmirror_release_timeout = 30
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+
+ self.dm_session.delete_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server,
+ release=call_release)
+
+ self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ if call_release:
+ release_mock = self.dm_session.wait_for_snapmirror_release_svm
+ release_mock.assert_called_once_with(
+ self.source_vserver, self.dest_vserver, self.mock_src_client,
+ timeout=mock_backend_config.netapp_snapmirror_release_timeout
+ )
+
def test_delete_snapmirror_does_not_exist(self):
"""Ensure delete succeeds when the snapmirror does not exist."""
mock_src_client = mock.Mock()
mock_dest_client = mock.Mock()
- mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError(
- code=netapp_api.EAPIERROR
- )
+ mock_dest_client.abort_snapmirror_vol.side_effect = (
+ netapp_api.NaApiError(code=netapp_api.EAPIERROR))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[mock_dest_client,
mock_src_client]))
@@ -227,26 +309,50 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- mock_src_client.release_snapmirror.assert_called_once_with(
+ mock_src_client.release_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
+ def test_delete_snapmirror_svm_does_not_exist(self):
+ """Ensure delete succeeds when the snapmirror does not exist."""
+ self.mock_dest_client.abort_snapmirror_svm.side_effect = (
+ netapp_api.NaApiError(code=netapp_api.EAPIERROR))
+ self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm')
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_snapmirror_release_timeout = 30
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+
+ self.dm_session.delete_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ release_mock = self.dm_session.wait_for_snapmirror_release_svm
+ release_mock.assert_called_once_with(
+ self.source_vserver, self.dest_vserver, self.mock_src_client,
+ timeout=mock_backend_config.netapp_snapmirror_release_timeout
+ )
+
def test_delete_snapmirror_error_deleting(self):
"""Ensure delete succeeds when the snapmirror does not exist."""
mock_src_client = mock.Mock()
mock_dest_client = mock.Mock()
- mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError(
- code=netapp_api.ESOURCE_IS_DIFFERENT
- )
+ mock_dest_client.delete_snapmirror_vol.side_effect = (
+ netapp_api.NaApiError(code=netapp_api.ESOURCE_IS_DIFFERENT))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[mock_dest_client,
mock_src_client]))
@@ -254,24 +360,49 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- mock_src_client.release_snapmirror.assert_called_once_with(
+ mock_src_client.release_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
+ def test_delete_snapmirror_svm_error_deleting(self):
+ """Ensure delete succeeds when the snapmirror does not exist."""
+ self.mock_dest_client.delete_snapmirror_svm.side_effect = (
+ netapp_api.NaApiError(code=netapp_api.ESOURCE_IS_DIFFERENT))
+ self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm')
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_snapmirror_release_timeout = 30
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+
+ self.dm_session.delete_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver
+ )
+ release_mock = self.dm_session.wait_for_snapmirror_release_svm
+ release_mock.assert_called_once_with(
+ self.source_vserver, self.dest_vserver, self.mock_src_client,
+ timeout=mock_backend_config.netapp_snapmirror_release_timeout
+ )
+
def test_delete_snapmirror_error_releasing(self):
"""Ensure delete succeeds when the snapmirror does not exist."""
mock_src_client = mock.Mock()
mock_dest_client = mock.Mock()
- mock_src_client.release_snapmirror.side_effect = (
+ mock_src_client.release_snapmirror_vol.side_effect = (
netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[mock_dest_client,
@@ -280,15 +411,15 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- mock_src_client.release_snapmirror.assert_called_once_with(
+ mock_src_client.release_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
@@ -304,15 +435,15 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share,
release=False)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- self.assertFalse(mock_src_client.release_snapmirror.called)
+ self.assertFalse(mock_src_client.release_snapmirror_vol.called)
def test_delete_snapmirror_source_unreachable(self):
mock_src_client = mock.Mock()
@@ -324,16 +455,16 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror(self.fake_src_share,
self.fake_dest_share)
- mock_dest_client.abort_snapmirror.assert_called_once_with(
+ mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name, clear_checkpoint=False
)
- mock_dest_client.delete_snapmirror.assert_called_once_with(
+ mock_dest_client.delete_snapmirror_vol.assert_called_once_with(
mock.ANY, self.fake_src_vol_name, mock.ANY,
self.fake_dest_vol_name
)
- self.assertFalse(mock_src_client.release_snapmirror.called)
+ self.assertFalse(mock_src_client.release_snapmirror_vol.called)
def test_break_snapmirror(self):
self.mock_object(self.dm_session, 'quiesce_then_abort')
@@ -341,7 +472,7 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.break_snapmirror(self.fake_src_share,
self.fake_dest_share)
- self.mock_dest_client.break_snapmirror.assert_called_once_with(
+ self.mock_dest_client.break_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
@@ -358,7 +489,7 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share,
mount=False)
- self.mock_dest_client.break_snapmirror.assert_called_once_with(
+ self.mock_dest_client.break_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
@@ -376,7 +507,7 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.quiesce_then_abort.assert_called_once_with(
self.fake_src_share, self.fake_dest_share)
- self.mock_dest_client.break_snapmirror.assert_called_once_with(
+ self.mock_dest_client.break_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
@@ -398,22 +529,54 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share)
self.mock_dest_client.get_snapmirrors.assert_called_with(
- self.source_vserver, self.fake_src_vol_name,
- self.dest_vserver, self.fake_dest_vol_name,
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ source_volume=self.fake_src_vol_name,
+ dest_volume=self.fake_dest_vol_name,
desired_attributes=['relationship-status', 'mirror-state']
)
self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
- self.mock_dest_client.quiesce_snapmirror.assert_called_with(
+ self.mock_dest_client.quiesce_snapmirror_vol.assert_called_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
- self.mock_dest_client.abort_snapmirror.assert_called_once_with(
+ self.mock_dest_client.abort_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name,
clear_checkpoint=False
)
+ def test_quiesce_then_abort_svm_timeout(self):
+ self.mock_object(time, 'sleep')
+ mock_get_snapmirrors = mock.Mock(
+ return_value=[{'relationship-status': "transferring"}])
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors_svm',
+ mock_get_snapmirrors)
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_snapmirror_quiesce_timeout = 10
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+
+ self.dm_session.quiesce_then_abort_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ self.mock_dest_client.get_snapmirrors_svm.assert_called_with(
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ desired_attributes=['relationship-status', 'mirror-state']
+ )
+ self.assertEqual(2,
+ self.mock_dest_client.get_snapmirrors_svm.call_count)
+
+ self.mock_dest_client.quiesce_snapmirror_svm.assert_called_with(
+ self.source_vserver, self.dest_vserver)
+
+ self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver,
+ clear_checkpoint=False
+ )
+
def test_quiesce_then_abort_wait_for_quiesced(self):
self.mock_object(time, 'sleep')
self.mock_object(self.mock_dest_client, 'get_snapmirrors',
@@ -425,21 +588,44 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share)
self.mock_dest_client.get_snapmirrors.assert_called_with(
- self.source_vserver, self.fake_src_vol_name,
- self.dest_vserver, self.fake_dest_vol_name,
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ source_volume=self.fake_src_vol_name,
+ dest_volume=self.fake_dest_vol_name,
desired_attributes=['relationship-status', 'mirror-state']
)
self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
- self.mock_dest_client.quiesce_snapmirror.assert_called_once_with(
+ self.mock_dest_client.quiesce_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
+ def test_quiesce_then_abort_svm_wait_for_quiesced(self):
+ self.mock_object(time, 'sleep')
+ self.mock_object(self.mock_dest_client, 'get_snapmirrors_svm',
+ mock.Mock(side_effect=[
+ [{'relationship-status': "transferring"}],
+ [{'relationship-status': "quiesced"}]]))
+
+ self.dm_session.quiesce_then_abort_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ self.mock_dest_client.get_snapmirrors_svm.assert_called_with(
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ desired_attributes=['relationship-status', 'mirror-state']
+ )
+ self.assertEqual(2,
+ self.mock_dest_client.get_snapmirrors_svm.call_count)
+
+ self.mock_dest_client.quiesce_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver)
+
def test_resync_snapmirror(self):
self.dm_session.resync_snapmirror(self.fake_src_share,
self.fake_dest_share)
- self.mock_dest_client.resync_snapmirror.assert_called_once_with(
+ self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
@@ -459,19 +645,19 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share, self.fake_src_share, fake_new_src_share,
[self.fake_dest_share, self.fake_src_share, fake_new_src_share])
- self.assertFalse(self.mock_src_client.release_snapmirror.called)
+ self.assertFalse(self.mock_src_client.release_snapmirror_vol.called)
self.assertEqual(4, self.dm_session.delete_snapmirror.call_count)
self.dm_session.delete_snapmirror.assert_called_with(
mock.ANY, mock.ANY, release=False
)
- self.mock_dest_client.create_snapmirror.assert_called_once_with(
+ self.mock_dest_client.create_snapmirror_vol.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name, schedule='hourly'
)
- self.mock_dest_client.resync_snapmirror.assert_called_once_with(
+ self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name
)
@@ -517,11 +703,11 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.dm_session.delete_snapmirror.assert_called_with(
mock.ANY, mock.ANY, release=False
)
- self.mock_dest_client.create_snapmirror.assert_called_once_with(
+ self.mock_dest_client.create_snapmirror_vol.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name, schedule='hourly'
)
- self.mock_dest_client.resync_snapmirror.assert_called_once_with(
+ self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name
)
@@ -533,8 +719,10 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_share)
self.mock_dest_client.get_snapmirrors.assert_called_with(
- self.source_vserver, self.fake_src_vol_name,
- self.dest_vserver, self.fake_dest_vol_name,
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ source_volume=self.fake_src_vol_name,
+ dest_volume=self.fake_dest_vol_name,
desired_attributes=['relationship-status',
'mirror-state',
'source-vserver',
@@ -543,23 +731,158 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
)
self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count)
+ def test_get_snapmirrors_svm(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+ self.mock_object(mock_dest_client, 'get_snapmirrors_svm')
+
+ self.dm_session.get_snapmirrors_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ mock_dest_client.get_snapmirrors_svm.assert_called_with(
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ desired_attributes=['relationship-status',
+ 'mirror-state',
+ 'last-transfer-end-timestamp']
+ )
+ self.assertEqual(1, mock_dest_client.get_snapmirrors_svm.call_count)
+
+ def test_get_snapmirror_destinations_svm(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+ self.mock_object(mock_dest_client, 'get_snapmirror_destinations_svm')
+
+ self.dm_session.get_snapmirror_destinations_svm(
+ self.fake_src_share_server, self.fake_dest_share_server)
+
+ mock_dest_client.get_snapmirror_destinations_svm.assert_called_with(
+ source_vserver=self.source_vserver,
+ dest_vserver=self.dest_vserver,
+ )
+ self.assertEqual(1, mock_dest_client.get_snapmirror_destinations_svm
+ .call_count)
+
def test_update_snapmirror(self):
self.mock_object(self.mock_dest_client, 'get_snapmirrors')
self.dm_session.update_snapmirror(self.fake_src_share,
self.fake_dest_share)
- self.mock_dest_client.update_snapmirror.assert_called_once_with(
+ self.mock_dest_client.update_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
+ def test_update_snapmirror_svm(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+
+ self.dm_session.update_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ mock_dest_client.update_snapmirror_svm.assert_called_once_with(
+ self.source_vserver, self.dest_vserver)
+
+ def test_abort_and_break_snapmirror_svm(self):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+ self.mock_object(self.dm_session, 'quiesce_then_abort_svm')
+
+ self.dm_session.quiesce_and_break_snapmirror_svm(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ self.dm_session.get_client_and_vserver_name.assert_called_once_with(
+ self.fake_dest_share_server
+ )
+ self.dm_session.quiesce_then_abort_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ mock_dest_client.break_snapmirror_svm(self.source_vserver,
+ self.dest_vserver)
+
+ @ddt.data({'snapmirrors': ['fake_snapmirror'],
+ 'vserver_subtype': 'default'},
+ {'snapmirrors': [],
+ 'vserver_subtype': 'default'},
+ {'snapmirrors': [],
+ 'vserver_subtype': 'dp_destination'})
+ @ddt.unpack
+ def test_cancel_snapmirror_svm(self, snapmirrors, vserver_subtype):
+ mock_dest_client = mock.Mock()
+ self.mock_object(self.dm_session, 'get_client_and_vserver_name',
+ mock.Mock(return_value=(mock_dest_client,
+ self.dest_vserver)))
+ mock_backend_config = na_fakes.create_configuration()
+ mock_backend_config.netapp_server_migration_state_change_timeout = 30
+ self.mock_object(data_motion, 'get_backend_configuration',
+ mock.Mock(return_value=mock_backend_config))
+ self.mock_object(self.dm_session, 'get_snapmirrors_svm',
+ mock.Mock(return_value=snapmirrors))
+ self.mock_object(self.dm_session, 'quiesce_and_break_snapmirror_svm')
+ self.mock_object(self.dm_session, 'wait_for_vserver_state')
+ self.mock_object(self.dm_session, 'delete_snapmirror_svm')
+ vserver_info = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info['subtype'] = vserver_subtype
+ self.mock_object(mock_dest_client, 'get_vserver_info',
+ mock.Mock(return_value=vserver_info))
+ self.mock_object(self.dm_session, 'convert_svm_to_default_subtype')
+
+ self.dm_session.cancel_snapmirror_svm(self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ data_motion.get_backend_configuration.assert_called_once_with(
+ self.dest_backend_name
+ )
+ self.dm_session.get_client_and_vserver_name.assert_called_once_with(
+ self.fake_dest_share_server
+ )
+ self.dm_session.get_snapmirrors_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ if snapmirrors:
+ quiesce_mock = self.dm_session.quiesce_and_break_snapmirror_svm
+ quiesce_mock.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.dm_session.wait_for_vserver_state.assert_called_once_with(
+ self.dest_vserver, mock_dest_client, subtype='default',
+ state='running', operational_state='stopped',
+ timeout=(mock_backend_config
+ .netapp_server_migration_state_change_timeout)
+ )
+ self.dm_session.delete_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ else:
+ mock_dest_client.get_vserver_info.assert_called_once_with(
+ self.dest_vserver
+ )
+ convert_svm = self.dm_session.convert_svm_to_default_subtype
+ if vserver_subtype == 'dp_destination':
+ convert_svm.assert_called_once_with(
+ self.dest_vserver, mock_dest_client,
+ timeout=(mock_backend_config
+ .netapp_server_migration_state_change_timeout)
+ )
+ else:
+ self.assertFalse(convert_svm.called)
+
def test_resume_snapmirror(self):
self.mock_object(self.mock_dest_client, 'get_snapmirrors')
self.dm_session.resume_snapmirror(self.fake_src_share,
self.fake_dest_share)
- self.mock_dest_client.resume_snapmirror.assert_called_once_with(
+ self.mock_dest_client.resume_snapmirror_vol.assert_called_once_with(
self.source_vserver, self.fake_src_vol_name,
self.dest_vserver, self.fake_dest_vol_name)
@@ -601,3 +924,127 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
(mock_source_client.set_qos_policy_group_for_volume
.assert_called_once_with(self.fake_src_vol_name, 'none'))
data_motion.LOG.exception.assert_not_called()
+
+ @ddt.data(True, False)
+ def test_convert_svm_to_default_subtype(self, is_dest):
+ mock_client = mock.Mock()
+ vserver_info_default = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_default['subtype'] = 'default'
+ vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_dp['subtype'] = 'dp_destination'
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(side_effect=[vserver_info_dp,
+ vserver_info_default]))
+ self.mock_object(mock_client, 'break_snapmirror_svm')
+
+ self.dm_session.convert_svm_to_default_subtype(fake.VSERVER1,
+ mock_client,
+ is_dest_path=is_dest,
+ timeout=20)
+
+ mock_client.get_vserver_info.assert_has_calls([
+ mock.call(fake.VSERVER1), mock.call(fake.VSERVER1)])
+ if is_dest:
+ mock_client.break_snapmirror_svm.assert_called_once_with(
+ dest_vserver=fake.VSERVER1
+ )
+ else:
+ mock_client.break_snapmirror_svm.assert_called_once_with(
+ source_vserver=fake.VSERVER1
+ )
+
+ def test_convert_svm_to_default_subtype_timeout(self):
+ mock_client = mock.Mock()
+ vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_dp['subtype'] = 'dp_destination'
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(side_effect=[vserver_info_dp]))
+ self.mock_object(mock_client, 'break_snapmirror_svm')
+
+ self.assertRaises(
+ exception.NetAppException,
+ self.dm_session.convert_svm_to_default_subtype,
+ fake.VSERVER1, mock_client, is_dest_path=True, timeout=10)
+
+ mock_client.get_vserver_info.assert_called_once_with(fake.VSERVER1)
+ mock_client.break_snapmirror_svm.assert_called_once_with(
+ dest_vserver=fake.VSERVER1)
+
+ def test_wait_for_vserver_state(self,):
+ mock_client = mock.Mock()
+ vserver_info_default = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_default['subtype'] = 'default'
+ vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_dp['subtype'] = 'dp_destination'
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(side_effect=[vserver_info_dp,
+ vserver_info_default]))
+
+ self.dm_session.wait_for_vserver_state(fake.VSERVER1, mock_client,
+ state='running',
+ operational_state='running',
+ subtype='default', timeout=20)
+
+ mock_client.get_vserver_info.assert_has_calls([
+ mock.call(fake.VSERVER1), mock.call(fake.VSERVER1)])
+
+ def test_wait_for_vserver_state_timeout(self):
+ mock_client = mock.Mock()
+ vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO)
+ vserver_info_dp['subtype'] = 'dp_destination'
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(side_effect=[vserver_info_dp]))
+
+ self.assertRaises(
+ exception.NetAppException,
+ self.dm_session.wait_for_vserver_state,
+ fake.VSERVER1, mock_client, state='running',
+ operational_state='running', subtype='default', timeout=10)
+
+ mock_client.get_vserver_info.assert_called_once_with(fake.VSERVER1)
+
+ @ddt.data(mock.Mock(),
+ mock.Mock(side_effect=netapp_api.NaApiError(
+ code=netapp_api.EOBJECTNOTFOUND)))
+ def test_wait_for_snapmirror_release_svm(self, release_snapmirror_ret):
+ src_mock_client = mock.Mock()
+ get_snapmirrors_mock = self.mock_object(
+ src_mock_client, 'get_snapmirror_destinations_svm',
+ mock.Mock(side_effect=[['fake_snapmirror'], []]))
+ self.mock_object(src_mock_client, 'release_snapmirror_svm',
+ release_snapmirror_ret)
+
+ self.dm_session.wait_for_snapmirror_release_svm(fake.VSERVER1,
+ fake.VSERVER2,
+ src_mock_client,
+ timeout=20)
+ get_snapmirrors_mock.assert_has_calls([
+ mock.call(source_vserver=fake.VSERVER1,
+ dest_vserver=fake.VSERVER2),
+ mock.call(source_vserver=fake.VSERVER1,
+ dest_vserver=fake.VSERVER2)])
+ if release_snapmirror_ret.side_effect is None:
+ src_mock_client.release_snapmirror_svm.assert_called_once_with(
+ fake.VSERVER1, fake.VSERVER2)
+ else:
+ src_mock_client.release_snapmirror_svm.assert_called_once_with(
+ fake.VSERVER1, fake.VSERVER2
+ )
+
+ def test_wait_for_snapmirror_release_svm_timeout(self):
+ src_mock_client = mock.Mock()
+ get_snapmirrors_mock = self.mock_object(
+ src_mock_client, 'get_snapmirror_destinations_svm',
+ mock.Mock(side_effect=[['fake_snapmirror']]))
+ self.mock_object(src_mock_client, 'release_snapmirror_svm')
+
+ self.assertRaises(exception.NetAppException,
+ self.dm_session.wait_for_snapmirror_release_svm,
+ fake.VSERVER1, fake.VSERVER2,
+ src_mock_client, timeout=10)
+
+ get_snapmirrors_mock.assert_called_once_with(
+ source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2)
+ src_mock_client.release_snapmirror_svm.assert_called_once_with(
+ fake.VSERVER1, fake.VSERVER2
+ )
diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
index edd5f8d9e3..9bbbecea9b 100644
--- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
+++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
@@ -313,6 +313,13 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(expected, result)
+ def test__get_backend_snapmirror_policy_name_svm(self):
+ result = self.library._get_backend_snapmirror_policy_name_svm(
+ fake.SERVER_ID)
+ expected = 'snapmirror_policy_' + fake.SERVER_ID.replace('-', '_')
+
+ self.assertEqual(expected, result)
+
def test_get_aggregate_space_cluster_creds(self):
self.library._have_cluster_creds = True
@@ -1804,7 +1811,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_export_addresses_with_metadata.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
protocol_helper.create_share.assert_called_once_with(
- fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True)
+ fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True,
+ ensure_share_already_exists=False)
def test_create_export_lifs_not_found(self):
@@ -3117,8 +3125,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_server=None)
self.assertDictMatch(expected_model_update, model_update)
- mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE,
- fake.SHARE)
+ mock_dm_session.create_snapmirror.assert_called_once_with(
+ fake.SHARE, fake.SHARE)
data_motion.get_client_for_backend.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
@@ -3144,8 +3152,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_server=fake.SHARE_SERVER)
self.assertDictMatch(expected_model_update, model_update)
- mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE,
- fake.SHARE)
+ mock_dm_session.create_snapmirror.assert_called_once_with(
+ fake.SHARE, fake.SHARE)
data_motion.get_client_for_backend.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
@@ -3339,7 +3347,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
result = self.library.update_replica_state(
None, [replica], replica, None, [], share_server=None)
- self.assertEqual(1, self.mock_dm_session.create_snapmirror.call_count)
+ self.assertEqual(1,
+ self.mock_dm_session.create_snapmirror.call_count)
self.assertEqual(constants.STATUS_OUT_OF_SYNC, result)
def test_update_replica_state_broken_snapmirror(self):
@@ -3364,7 +3373,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE, None, [],
share_server=None)
- vserver_client.resync_snapmirror.assert_called_once_with(
+ vserver_client.resync_snapmirror_vol.assert_called_once_with(
fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name']
)
@@ -3428,13 +3437,14 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client)))
self.mock_dm_session.get_snapmirrors = mock.Mock(
return_value=[fake_snapmirror])
- vserver_client.resync_snapmirror.side_effect = netapp_api.NaApiError
+ vserver_client.resync_snapmirror_vol.side_effect = (
+ netapp_api.NaApiError)
result = self.library.update_replica_state(None, [fake.SHARE],
fake.SHARE, None, [],
share_server=None)
- vserver_client.resync_snapmirror.assert_called_once_with(
+ vserver_client.resync_snapmirror_vol.assert_called_once_with(
fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name']
)
@@ -5820,3 +5830,37 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE_SERVER,
cutover_action)
mock_warning_log.assert_not_called()
+
+ @ddt.data({'total': 20, 'free': 5, 'reserved': 10, 'thin': False,
+ 'over_sub': 0, 'size': 3, 'compatible': True, 'nb_pools': 1},
+ {'total': 20, 'free': 5, 'reserved': 10, 'thin': False,
+ 'over_sub': 0, 'size': 4, 'compatible': False, 'nb_pools': 1},
+ {'total': 20, 'free': 5, 'reserved': 20, 'thin': False,
+ 'over_sub': 1.1, 'size': 3, 'compatible': False, 'nb_pools': 1},
+ {'total': 20, 'free': 5, 'reserved': 10, 'thin': True,
+ 'over_sub': 2.0, 'size': 6, 'compatible': True, 'nb_pools': 1},
+ {'total': 20, 'free': 5, 'reserved': 10, 'thin': True,
+ 'over_sub': 1.0, 'size': 4, 'compatible': False, 'nb_pools': 1},
+ {'total': 'unknown', 'free': 5, 'reserved': 0, 'thin': False,
+ 'over_sub': 3.0, 'size': 1, 'compatible': False, 'nb_pools': 1},
+ {'total': 20, 'free': 5, 'reserved': 10, 'thin': True,
+ 'over_sub': 1.0, 'size': 6, 'compatible': True, 'nb_pools': 2},
+ {'total': 20, 'free': 5, 'reserved': 10, 'thin': True,
+ 'over_sub': 1.0, 'size': 7, 'compatible': False, 'nb_pools': 2},
+ )
+ @ddt.unpack
+ def test__check_capacity_compatibility(self, total, free, reserved, thin,
+ over_sub, size, compatible,
+ nb_pools):
+ pools = []
+ for p in range(nb_pools):
+ pool = copy.deepcopy(fake.POOLS[0])
+ pool['total_capacity_gb'] = total
+ pool['free_capacity_gb'] = free
+ pool['reserved_percentage'] = reserved
+ pool['max_over_subscription_ratio'] = over_sub
+ pools.append(pool)
+
+ result = self.library._check_capacity_compatibility(pools, thin, size)
+
+ self.assertEqual(compatible, result)
diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py
index 868dd705b4..0551012016 100644
--- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py
+++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py
@@ -87,6 +87,34 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.fake_client = mock.Mock()
self.library._default_nfs_config = fake.NFS_CONFIG_DEFAULT
+ # Server migration
+ self.dm_session = data_motion.DataMotionSession()
+ self.fake_src_share = copy.deepcopy(fake.SHARE)
+ self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER)
+ self.fake_src_vserver = 'source_vserver'
+ self.fake_src_backend_name = (
+ self.fake_src_share_server['host'].split('@')[1])
+ self.fake_src_share_server['backend_details']['vserver_name'] = (
+ self.fake_src_vserver
+ )
+ self.fake_src_share['share_server'] = self.fake_src_share_server
+ self.fake_src_share['id'] = 'fb9be037-8a75-4c2a-bb7d-f63dffe13015'
+ self.fake_src_vol_name = 'share_fb9be037_8a75_4c2a_bb7d_f63dffe13015'
+ self.fake_dest_share = copy.deepcopy(fake.SHARE)
+ self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER_2)
+ self.fake_dest_vserver = 'dest_vserver'
+ self.fake_dest_backend_name = (
+ self.fake_dest_share_server['host'].split('@')[1])
+ self.fake_dest_share_server['backend_details']['vserver_name'] = (
+ self.fake_dest_vserver
+ )
+ self.fake_dest_share['share_server'] = self.fake_dest_share_server
+ self.fake_dest_share['id'] = 'aa6a3941-f87f-4874-92ca-425d3df85457'
+ self.fake_dest_vol_name = 'share_aa6a3941_f87f_4874_92ca_425d3df85457'
+
+ self.mock_src_client = mock.Mock()
+ self.mock_dest_client = mock.Mock()
+
def test_check_for_setup_error_cluster_creds_no_vserver(self):
self.library._have_cluster_creds = True
self.mock_object(self.library,
@@ -137,10 +165,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._get_vserver)
def test_get_vserver_no_share_server_with_vserver_name(self):
- fake_vserver_client = 'fake_client'
+ fake_vserver_client = mock.Mock()
mock_vserver_exists = self.mock_object(
- self.library._client, 'vserver_exists',
+ fake_vserver_client, 'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.library,
'_get_api_client',
@@ -197,7 +225,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_get_vserver_not_found(self):
- self.library._client.vserver_exists.return_value = False
+ mock_client = mock.Mock()
+ mock_client.vserver_exists.return_value = False
+ self.mock_object(self.library,
+ '_get_api_client',
+ mock.Mock(return_value=mock_client))
kwargs = {'share_server': fake.SHARE_SERVER}
self.assertRaises(exception.VserverNotFound,
@@ -206,14 +238,15 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_get_vserver(self):
- self.library._client.vserver_exists.return_value = True
+ mock_client = mock.Mock()
+ mock_client.vserver_exists.return_value = True
self.mock_object(self.library,
'_get_api_client',
- mock.Mock(return_value='fake_client'))
+ mock.Mock(return_value=mock_client))
result = self.library._get_vserver(share_server=fake.SHARE_SERVER)
- self.assertTupleEqual((fake.VSERVER1, 'fake_client'), result)
+ self.assertTupleEqual((fake.VSERVER1, mock_client), result)
def test_get_ems_pool_info(self):
@@ -459,8 +492,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(vserver_name, actual_result)
- @ddt.data(None, fake.IPSPACE)
- def test_create_vserver(self, existing_ipspace):
+ @ddt.data({'existing_ipspace': None,
+ 'nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX},
+ {'existing_ipspace': fake.IPSPACE, 'nfs_config': None})
+ @ddt.unpack
+ def test_create_vserver(self, existing_ipspace, nfs_config):
versions = ['fake_v1', 'fake_v2']
self.library.configuration.netapp_enabled_share_protocols = versions
@@ -498,7 +534,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_create_vserver_routes')
self.library._create_vserver(vserver_name, fake.NETWORK_INFO,
- fake.NFS_CONFIG_TCP_UDP_MAX)
+ fake.NFS_CONFIG_TCP_UDP_MAX,
+ nfs_config=nfs_config)
get_ipspace_name_for_vlan_port.assert_called_once_with(
fake.CLUSTER_NODES[0],
@@ -519,11 +556,59 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._create_vserver_routes.assert_called_once_with(
vserver_client, fake.NETWORK_INFO)
vserver_client.enable_nfs.assert_called_once_with(
- versions, nfs_config=fake.NFS_CONFIG_TCP_UDP_MAX)
+ versions, nfs_config=nfs_config)
self.library._client.setup_security_services.assert_called_once_with(
fake.NETWORK_INFO['security_services'], vserver_client,
vserver_name)
+ @ddt.data(None, fake.IPSPACE)
+ def test_create_vserver_dp_destination(self, existing_ipspace):
+ versions = ['fake_v1', 'fake_v2']
+ self.library.configuration.netapp_enabled_share_protocols = versions
+ vserver_id = fake.NETWORK_INFO['server_id']
+ vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
+
+ self.mock_object(self.library._client,
+ 'vserver_exists',
+ mock.Mock(return_value=False))
+ self.mock_object(self.library._client,
+ 'list_cluster_nodes',
+ mock.Mock(return_value=fake.CLUSTER_NODES))
+ self.mock_object(self.library,
+ '_get_node_data_port',
+ mock.Mock(return_value='fake_port'))
+ self.mock_object(context,
+ 'get_admin_context',
+ mock.Mock(return_value='fake_admin_context'))
+ self.mock_object(self.library,
+ '_find_matching_aggregates',
+ mock.Mock(return_value=fake.AGGREGATES))
+ self.mock_object(self.library,
+ '_create_ipspace',
+ mock.Mock(return_value=fake.IPSPACE))
+
+ get_ipspace_name_for_vlan_port = self.mock_object(
+ self.library._client,
+ 'get_ipspace_name_for_vlan_port',
+ mock.Mock(return_value=existing_ipspace))
+ self.mock_object(self.library, '_create_port_and_broadcast_domain')
+
+ self.library._create_vserver(vserver_name, fake.NETWORK_INFO,
+ metadata={'migration_destination': True})
+
+ get_ipspace_name_for_vlan_port.assert_called_once_with(
+ fake.CLUSTER_NODES[0],
+ 'fake_port',
+ fake.NETWORK_INFO['segmentation_id'])
+ if not existing_ipspace:
+ self.library._create_ipspace.assert_called_once_with(
+ fake.NETWORK_INFO)
+ create_server_mock = self.library._client.create_vserver_dp_destination
+ create_server_mock.assert_called_once_with(
+ vserver_name, fake.AGGREGATES, fake.IPSPACE)
+ self.library._create_port_and_broadcast_domain.assert_called_once_with(
+ fake.IPSPACE, fake.NETWORK_INFO)
+
def test_create_vserver_already_present(self):
vserver_id = fake.NETWORK_INFO['server_id']
@@ -543,22 +628,23 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.NFS_CONFIG_TCP_UDP_MAX)
@ddt.data(
- {'lif_exception': netapp_api.NaApiError,
+ {'network_exception': netapp_api.NaApiError,
'existing_ipspace': fake.IPSPACE},
- {'lif_exception': netapp_api.NaApiError,
+ {'network_exception': netapp_api.NaApiError,
'existing_ipspace': None},
- {'lif_exception': exception.NetAppException,
+ {'network_exception': exception.NetAppException,
'existing_ipspace': None},
- {'lif_exception': exception.NetAppException,
+ {'network_exception': exception.NetAppException,
'existing_ipspace': fake.IPSPACE})
@ddt.unpack
def test_create_vserver_lif_creation_failure(self,
- lif_exception,
+ network_exception,
existing_ipspace):
vserver_id = fake.NETWORK_INFO['server_id']
vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
vserver_client = mock.Mock()
+ security_service = fake.NETWORK_INFO['security_services']
self.mock_object(self.library._client,
'list_cluster_nodes',
@@ -585,11 +671,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'_create_ipspace',
mock.Mock(return_value=fake.IPSPACE))
self.mock_object(self.library,
- '_create_vserver_lifs',
- mock.Mock(side_effect=lif_exception))
+ '_setup_network_for_vserver',
+ mock.Mock(side_effect=network_exception))
self.mock_object(self.library, '_delete_vserver')
- self.assertRaises(lif_exception,
+ self.assertRaises(network_exception,
self.library._create_vserver,
vserver_name,
fake.NETWORK_INFO,
@@ -597,15 +683,17 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._get_api_client.assert_called_with(vserver=vserver_name)
self.assertTrue(self.library._client.create_vserver.called)
- self.library._create_vserver_lifs.assert_called_with(
+ self.library._setup_network_for_vserver.assert_called_with(
vserver_name,
vserver_client,
fake.NETWORK_INFO,
- fake.IPSPACE)
+ fake.IPSPACE,
+ security_services=security_service,
+ nfs_config=None)
self.library._delete_vserver.assert_called_once_with(
vserver_name,
needs_lock=False,
- security_services=None)
+ security_services=security_service)
self.assertFalse(vserver_client.enable_nfs.called)
self.assertEqual(1, lib_multi_svm.LOG.error.call_count)
@@ -885,6 +973,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
+ self.mock_object(self.library._client,
+ 'get_snapmirror_policies',
+ mock.Mock(return_value=[]))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
@@ -924,8 +1015,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library._client,
'ipspace_has_data_vservers',
mock.Mock(return_value=True))
- mock_delete_vserver_vlans = self.mock_object(self.library,
- '_delete_vserver_vlans')
+ self.mock_object(self.library._client,
+ 'get_snapmirror_policies',
+ mock.Mock(return_value=[]))
self.mock_object(self.library, '_delete_vserver_peers')
self.mock_object(
vserver_client, 'get_network_interfaces',
@@ -943,8 +1035,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._delete_vserver_peers.assert_called_once_with(
fake.VSERVER1)
self.assertFalse(self.library._client.delete_ipspace.called)
- mock_delete_vserver_vlans.assert_called_once_with(
- [c_fake.NETWORK_INTERFACES_MULTIPLE[0]])
@ddt.data([], c_fake.NETWORK_INTERFACES)
def test_delete_vserver_with_ipspace(self, interfaces):
@@ -965,11 +1055,16 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(vserver_client,
'get_network_interfaces',
mock.Mock(return_value=interfaces))
+ self.mock_object(self.library._client,
+ 'get_snapmirror_policies',
+ mock.Mock(return_value=['fake_policy']))
security_services = fake.NETWORK_INFO['security_services']
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
+ vserver_client.delete_snapmirror_policy.assert_called_once_with(
+ 'fake_policy')
self.library._delete_vserver_peers.assert_called_once_with(
fake.VSERVER1
)
@@ -1401,6 +1496,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library,
"_get_nfs_config_provisioning_options",
mock.Mock(return_value=nfs_config))
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=('fake_name',
+ mock_client)))
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
server = self.library.choose_share_server_compatible_with_share(
None, fake.SHARE_SERVERS, fake.SHARE, None, share_group)
@@ -1433,6 +1534,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library,
"_get_nfs_config_provisioning_options",
mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT))
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=('fake_name',
+ mock_client)))
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
server = self.library.choose_share_server_compatible_with_share(
None, fake.SHARE_SERVERS, fake.SHARE, None, share_group)
@@ -1467,6 +1574,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library,
"_get_nfs_config_provisioning_options",
mock.Mock(return_value=nfs_config))
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=('fake_name',
+ mock_client)))
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
server = self.library.choose_share_server_compatible_with_share(
None, fake.SHARE_SERVERS, fake.SHARE)
@@ -1504,6 +1617,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library,
"_get_nfs_config_provisioning_options",
mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT))
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=('fake_name',
+ mock_client)))
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
server = self.library.choose_share_server_compatible_with_share(
None, share_servers, fake.SHARE)
@@ -1597,13 +1716,13 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'share_servers': [
fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NFS_UDP,
fake.SHARE_SERVER_NFS_DEFAULT, fake.SHARE_SERVER_NFS_TCP_UDP]},
- {'expected_server': fake.NFS_CONFIG_DEFAULT,
+ {'expected_server': fake.SHARE_SERVER_NO_DETAILS,
'nfs_config': None,
- 'share_servers': [fake.NFS_CONFIG_DEFAULT],
+ 'share_servers': [fake.SHARE_SERVER_NO_DETAILS],
'nfs_config_supported': False}
)
@ddt.unpack
- def test_choose_share_server_compatible_with_share_group(
+ def test_choose_share_server_compatible_with_share_group_nfs(
self, expected_server, nfs_config, share_servers,
nfs_config_supported=True):
self.library.is_nfs_config_supported = nfs_config_supported
@@ -1618,6 +1737,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library,
'_check_nfs_config_extra_specs_validity',
mock.Mock())
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=('fake_name',
+ mock_client)))
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake.VSERVER_INFO))
server = self.library.choose_share_server_compatible_with_share_group(
None, share_servers, fake.SHARE_GROUP_REF)
@@ -1629,3 +1754,754 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_nfs_config.assert_not_called()
mock_check_extra_spec.assert_not_called()
self.assertEqual(expected_server, server)
+
+ def test_share_server_migration_check_compatibility_same_backend(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self.library._have_cluster_creds = True
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(None, None)))
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_src_share_server['host'],
+ None, None, None)
+
+ self.assertEqual(not_compatible, result)
+
+ def _configure_mocks_share_server_migration_check_compatibility(
+ self, have_cluster_creds=True,
+ src_cluster_name=fake.CLUSTER_NAME,
+ dest_cluster_name=fake.CLUSTER_NAME_2,
+ src_svm_dr_support=True, dest_svm_dr_support=True,
+ check_capacity_result=True,
+ pools=fake.POOLS):
+ self.library._have_cluster_creds = have_cluster_creds
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(self.fake_src_vserver,
+ self.mock_src_client)))
+ self.mock_object(self.mock_src_client, 'get_cluster_name',
+ mock.Mock(return_value=src_cluster_name))
+ self.mock_object(self.client, 'get_cluster_name',
+ mock.Mock(return_value=dest_cluster_name))
+ self.mock_object(data_motion, 'get_client_for_backend',
+ mock.Mock(return_value=self.mock_dest_client))
+ self.mock_object(self.mock_src_client, 'is_svm_dr_supported',
+ mock.Mock(return_value=src_svm_dr_support))
+ self.mock_object(self.mock_dest_client, 'is_svm_dr_supported',
+ mock.Mock(return_value=dest_svm_dr_support))
+ self.mock_object(self.library, '_get_pools',
+ mock.Mock(return_value=pools))
+ self.mock_object(self.library, '_check_capacity_compatibility',
+ mock.Mock(return_value=check_capacity_result))
+
+ def test_share_server_migration_check_compatibility_dest_with_pool(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self.library._have_cluster_creds = True
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server, fake.MANILA_HOST_NAME,
+ None, None, None)
+
+ self.assertEqual(not_compatible, result)
+
+ def test_share_server_migration_check_compatibility_same_cluster(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self._configure_mocks_share_server_migration_check_compatibility(
+ src_cluster_name=fake.CLUSTER_NAME,
+ dest_cluster_name=fake.CLUSTER_NAME,
+ )
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ None, None, None)
+
+ self.assertEqual(not_compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+
+ def test_share_server_migration_check_compatibility_svm_dr_not_supported(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self._configure_mocks_share_server_migration_check_compatibility(
+ dest_svm_dr_support=False,
+ )
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ None, None, None)
+
+ self.assertEqual(not_compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ self.fake_dest_backend_name, vserver_name=None
+ )
+ self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
+ self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
+
+ def test_share_server_migration_check_compatibility_different_sec_service(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self._configure_mocks_share_server_migration_check_compatibility()
+ new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE)
+ new_sec_service['id'] = 'new_sec_serv_id'
+ new_share_network = copy.deepcopy(fake.SHARE_NETWORK)
+ new_share_network['id'] = 'fake_share_network_id_2'
+ new_share_network['security_services'] = [new_sec_service]
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ fake.SHARE_NETWORK, new_share_network, None)
+
+ self.assertEqual(not_compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ self.fake_dest_backend_name, vserver_name=None
+ )
+ self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
+ self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
+
+ @ddt.data('netapp_flexvol_encryption', 'revert_to_snapshot_support')
+ def test_share_server_migration_check_compatibility_invalid_capabilities(
+ self, capability):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ pools_without_capability = copy.deepcopy(fake.POOLS)
+ for pool in pools_without_capability:
+ pool[capability] = False
+ self._configure_mocks_share_server_migration_check_compatibility(
+ pools=pools_without_capability
+ )
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ fake.SHARE_NETWORK, fake.SHARE_NETWORK,
+ fake.SERVER_MIGRATION_REQUEST_SPEC)
+
+ self.assertEqual(not_compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ self.fake_dest_backend_name, vserver_name=None
+ )
+ self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
+ self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
+
+ def test_share_server_migration_check_compatibility_capacity_false(
+ self):
+ not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
+ self._configure_mocks_share_server_migration_check_compatibility(
+ check_capacity_result=False
+ )
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ fake.SHARE_NETWORK, fake.SHARE_NETWORK,
+ fake.SERVER_MIGRATION_REQUEST_SPEC)
+
+ self.assertEqual(not_compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ self.fake_dest_backend_name, vserver_name=None
+ )
+ self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
+ self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
+ total_size = (fake.SERVER_MIGRATION_REQUEST_SPEC['shares_size'] +
+ fake.SERVER_MIGRATION_REQUEST_SPEC['snapshots_size'])
+ self.library._check_capacity_compatibility.assert_called_once_with(
+ fake.POOLS,
+ self.library.configuration.max_over_subscription_ratio > 1,
+ total_size
+ )
+
+ def test_share_server_migration_check_compatibility_compatible(self):
+ compatible = {
+ 'compatible': True,
+ 'writable': True,
+ 'nondisruptive': False,
+ 'preserve_snapshots': True,
+ 'migration_cancel': True,
+ 'migration_get_progress': False,
+ 'share_network_id': fake.SHARE_NETWORK['id']
+ }
+ self._configure_mocks_share_server_migration_check_compatibility()
+
+ result = self.library.share_server_migration_check_compatibility(
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server['host'],
+ fake.SHARE_NETWORK, fake.SHARE_NETWORK,
+ fake.SERVER_MIGRATION_REQUEST_SPEC)
+
+ self.assertEqual(compatible, result)
+ self.library._get_vserver.assert_called_once_with(
+ self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name
+ )
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.client.get_cluster_name.called)
+ data_motion.get_client_for_backend.assert_called_once_with(
+ self.fake_dest_backend_name, vserver_name=None
+ )
+ self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
+ self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
+ total_size = (fake.SERVER_MIGRATION_REQUEST_SPEC['shares_size'] +
+ fake.SERVER_MIGRATION_REQUEST_SPEC['snapshots_size'])
+ self.library._check_capacity_compatibility.assert_called_once_with(
+ fake.POOLS,
+ self.library.configuration.max_over_subscription_ratio > 1,
+ total_size
+ )
+
+ @ddt.data({'vserver_peered': True, 'src_cluster': fake.CLUSTER_NAME},
+ {'vserver_peered': False, 'src_cluster': fake.CLUSTER_NAME},
+ {'vserver_peered': False,
+ 'src_cluster': fake.CLUSTER_NAME_2})
+ @ddt.unpack
+ def test_share_server_migration_start(self, vserver_peered,
+ src_cluster):
+ dest_cluster = fake.CLUSTER_NAME
+ dm_session_mock = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(side_effect=[
+ (self.fake_src_vserver, self.mock_src_client),
+ (self.fake_dest_vserver,
+ self.mock_dest_client)]))
+ self.mock_object(self.mock_src_client, 'get_cluster_name',
+ mock.Mock(return_value=src_cluster))
+ self.mock_object(self.mock_dest_client, 'get_cluster_name',
+ mock.Mock(return_value=dest_cluster))
+ self.mock_object(self.library, '_get_vserver_peers',
+ mock.Mock(return_value=vserver_peered))
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+
+ self.library.share_server_migration_start(
+ None, self.fake_src_share_server, self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [])
+
+ self.library._get_vserver.assert_has_calls([
+ mock.call(share_server=self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name),
+ mock.call(share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)])
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.mock_dest_client.get_cluster_name.called)
+ self.library._get_vserver_peers.assert_called_once_with(
+ self.fake_dest_vserver, self.fake_src_vserver
+ )
+ mock_vserver_peer = self.mock_dest_client.create_vserver_peer
+ if vserver_peered:
+ self.assertFalse(mock_vserver_peer.called)
+ else:
+ mock_vserver_peer.assert_called_once_with(
+ self.fake_dest_vserver, self.fake_src_vserver,
+ peer_cluster_name=src_cluster
+ )
+ accept_peer_mock = self.mock_src_client.accept_vserver_peer
+ if src_cluster != dest_cluster:
+ accept_peer_mock.assert_called_once_with(
+ self.fake_src_vserver, self.fake_dest_vserver
+ )
+ else:
+ self.assertFalse(accept_peer_mock.called)
+ dm_session_mock.create_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ def test_share_server_migration_start_snapmirror_start_failure(self):
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(side_effect=[
+ (self.fake_src_vserver, self.mock_src_client),
+ (self.fake_dest_vserver,
+ self.mock_dest_client)]))
+ self.mock_object(self.mock_src_client, 'get_cluster_name')
+ self.mock_object(self.mock_dest_client, 'get_cluster_name')
+ self.mock_object(self.library, '_get_vserver_peers',
+ mock.Mock(return_value=True))
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ create_snapmirror_mock = self.mock_object(
+ dm_session_mock, 'create_snapmirror_svm',
+ mock.Mock(
+ side_effect=exception.NetAppException(message='fake')))
+
+ self.assertRaises(exception.NetAppException,
+ self.library.share_server_migration_start,
+ None, self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [])
+
+ self.library._get_vserver.assert_has_calls([
+ mock.call(share_server=self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name),
+ mock.call(share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)])
+ self.assertTrue(self.mock_src_client.get_cluster_name.called)
+ self.assertTrue(self.mock_dest_client.get_cluster_name.called)
+ self.library._get_vserver_peers.assert_called_once_with(
+ self.fake_dest_vserver, self.fake_src_vserver
+ )
+ self.assertFalse(self.mock_dest_client.create_vserver_peer.called)
+
+ create_snapmirror_mock.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ dm_session_mock.cancel_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ def test__get_snapmirror_svm(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ fake_snapmirrors = ['mirror1']
+ self.mock_object(dm_session_mock, 'get_snapmirrors_svm',
+ mock.Mock(return_value=fake_snapmirrors))
+
+ result = self.library._get_snapmirror_svm(
+ self.fake_src_share_server, self.fake_dest_share_server)
+
+ dm_session_mock.get_snapmirrors_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.assertEqual(fake_snapmirrors, result)
+
+ def test__get_snapmirror_svm_fail_to_get_snapmirrors(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(dm_session_mock, 'get_snapmirrors_svm',
+ mock.Mock(
+ side_effect=netapp_api.NaApiError(code=0)))
+
+ self.assertRaises(exception.NetAppException,
+ self.library._get_snapmirror_svm,
+ self.fake_src_share_server,
+ self.fake_dest_share_server)
+
+ dm_session_mock.get_snapmirrors_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ def test_share_server_migration_continue_no_snapmirror(self):
+ self.mock_object(self.library, '_get_snapmirror_svm',
+ mock.Mock(return_value=[]))
+
+ self.assertRaises(exception.NetAppException,
+ self.library.share_server_migration_continue,
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [], [])
+
+ self.library._get_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ @ddt.data({'mirror_state': 'snapmirrored', 'status': 'idle'},
+ {'mirror_state': 'uninitialized', 'status': 'transferring'},
+ {'mirror_state': 'snapmirrored', 'status': 'quiescing'},)
+ @ddt.unpack
+ def test_share_server_migration_continue(self, mirror_state, status):
+ fake_snapmirror = {
+ 'mirror-state': mirror_state,
+ 'relationship-status': status,
+ }
+ self.mock_object(self.library, '_get_snapmirror_svm',
+ mock.Mock(return_value=[fake_snapmirror]))
+ expected = mirror_state == 'snapmirrored' and status == 'idle'
+
+ result = self.library.share_server_migration_continue(
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [], []
+ )
+
+ self.assertEqual(expected, result)
+ self.library._get_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ def test_share_server_migration_complete(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(side_effect=[
+ (self.fake_src_vserver, self.mock_src_client),
+ (self.fake_dest_vserver, self.mock_dest_client)]))
+ fake_ipspace = 'fake_ipspace'
+ self.mock_object(self.mock_dest_client, 'get_vserver_ipspace',
+ mock.Mock(return_value=fake_ipspace))
+ fake_share_name = self.library._get_backend_share_name(
+ fake.SHARE_INSTANCE['id'])
+ self.mock_object(self.library, '_setup_network_for_vserver')
+ fake_volume = copy.deepcopy(fake.CLIENT_GET_VOLUME_RESPONSE)
+ self.mock_object(self.mock_dest_client, 'get_volume',
+ mock.Mock(return_value=fake_volume))
+ self.mock_object(self.library, '_create_export',
+ mock.Mock(return_value=fake.NFS_EXPORTS))
+ self.mock_object(self.library, '_delete_share')
+
+ result = self.library.share_server_migration_complete(
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [],
+ fake.NETWORK_INFO
+ )
+
+ expected_share_updates = {
+ fake.SHARE_INSTANCE['id']: {
+ 'export_locations': fake.NFS_EXPORTS,
+ 'pool_name': fake_volume['aggregate']
+ }
+ }
+ expected_result = {
+ 'share_updates': expected_share_updates,
+ }
+
+ self.assertEqual(expected_result, result)
+ dm_session_mock.update_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.library._get_vserver.assert_has_calls([
+ mock.call(share_server=self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name),
+ mock.call(share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)])
+ quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm
+ quiesce_break_mock.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ dm_session_mock.wait_for_vserver_state.assert_called_once_with(
+ self.fake_dest_vserver, self.mock_dest_client, subtype='default',
+ state='running', operational_state='stopped',
+ timeout=(self.library.configuration.
+ netapp_server_migration_state_change_timeout)
+ )
+ self.mock_src_client.stop_vserver.assert_called_once_with(
+ self.fake_src_vserver
+ )
+ self.mock_dest_client.get_vserver_ipspace.assert_called_once_with(
+ self.fake_dest_vserver
+ )
+ self.library._setup_network_for_vserver.assert_called_once_with(
+ self.fake_dest_vserver, self.mock_dest_client, fake.NETWORK_INFO,
+ fake_ipspace, enable_nfs=False, security_services=None
+ )
+ self.mock_dest_client.start_vserver.assert_called_once_with(
+ self.fake_dest_vserver
+ )
+ dm_session_mock.delete_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.mock_dest_client.get_volume.assert_called_once_with(
+ fake_share_name)
+ self.library._delete_share.assert_called_once_with(
+ fake.SHARE_INSTANCE, self.mock_src_client, remove_export=True)
+
+ def test_share_server_migration_complete_failure_breaking(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(side_effect=[
+ (self.fake_src_vserver, self.mock_src_client),
+ (self.fake_dest_vserver, self.mock_dest_client)]))
+ self.mock_object(dm_session_mock, 'quiesce_and_break_snapmirror_svm',
+ mock.Mock(side_effect=exception.NetAppException))
+ self.mock_object(self.library, '_delete_share')
+
+ self.assertRaises(exception.NetAppException,
+ self.library.share_server_migration_complete,
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [],
+ fake.NETWORK_INFO)
+
+ dm_session_mock.update_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.library._get_vserver.assert_has_calls([
+ mock.call(share_server=self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name),
+ mock.call(share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)])
+ quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm
+ quiesce_break_mock.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.mock_src_client.start_vserver.assert_called_once_with(
+ self.fake_src_vserver
+ )
+ dm_session_mock.cancel_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.library._delete_share.assert_called_once_with(
+ fake.SHARE_INSTANCE, self.mock_dest_client, remove_export=False)
+
+ def test_share_server_migration_complete_failure_get_new_volume(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(side_effect=[
+ (self.fake_src_vserver, self.mock_src_client),
+ (self.fake_dest_vserver, self.mock_dest_client)]))
+ fake_ipspace = 'fake_ipspace'
+ self.mock_object(self.mock_dest_client, 'get_vserver_ipspace',
+ mock.Mock(return_value=fake_ipspace))
+ fake_share_name = self.library._get_backend_share_name(
+ fake.SHARE_INSTANCE['id'])
+ self.mock_object(self.library, '_setup_network_for_vserver')
+ self.mock_object(self.mock_dest_client, 'get_volume',
+ mock.Mock(side_effect=exception.NetAppException))
+
+ self.assertRaises(exception.NetAppException,
+ self.library.share_server_migration_complete,
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [],
+ fake.NETWORK_INFO)
+
+ dm_session_mock.update_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.library._get_vserver.assert_has_calls([
+ mock.call(share_server=self.fake_src_share_server,
+ backend_name=self.fake_src_backend_name),
+ mock.call(share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)])
+ quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm
+ quiesce_break_mock.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ dm_session_mock.wait_for_vserver_state.assert_called_once_with(
+ self.fake_dest_vserver, self.mock_dest_client, subtype='default',
+ state='running', operational_state='stopped',
+ timeout=(self.library.configuration.
+ netapp_server_migration_state_change_timeout)
+ )
+ self.mock_src_client.stop_vserver.assert_called_once_with(
+ self.fake_src_vserver
+ )
+ self.mock_dest_client.get_vserver_ipspace.assert_called_once_with(
+ self.fake_dest_vserver
+ )
+ self.library._setup_network_for_vserver.assert_called_once_with(
+ self.fake_dest_vserver, self.mock_dest_client, fake.NETWORK_INFO,
+ fake_ipspace, enable_nfs=False, security_services=None
+ )
+ self.mock_dest_client.start_vserver.assert_called_once_with(
+ self.fake_dest_vserver
+ )
+ dm_session_mock.delete_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.mock_dest_client.get_volume.assert_called_once_with(
+ fake_share_name)
+
+ @ddt.data([], ['fake_snapmirror'])
+ def test_share_server_migration_cancel(self, snapmirrors):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(self.fake_dest_vserver,
+ self.mock_dest_client)))
+ self.mock_object(self.library, '_get_snapmirror_svm',
+ mock.Mock(return_value=snapmirrors))
+ self.mock_object(self.library, '_delete_share')
+
+ self.library.share_server_migration_cancel(
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], []
+ )
+
+ self.library._get_vserver.assert_called_once_with(
+ share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)
+ self.library._get_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ if snapmirrors:
+ dm_session_mock.cancel_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ self.library._delete_share.assert_called_once_with(
+ fake.SHARE_INSTANCE, self.mock_dest_client, remove_export=False)
+
+ def test_share_server_migration_cancel_snapmirror_failure(self):
+ dm_session_mock = mock.Mock()
+ self.mock_object(data_motion, "DataMotionSession",
+ mock.Mock(return_value=dm_session_mock))
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(self.fake_dest_vserver,
+ self.mock_dest_client)))
+ self.mock_object(self.library, '_get_snapmirror_svm',
+ mock.Mock(return_value=['fake_snapmirror']))
+ self.mock_object(dm_session_mock, 'cancel_snapmirror_svm',
+ mock.Mock(side_effect=exception.NetAppException))
+
+ self.assertRaises(exception.NetAppException,
+ self.library.share_server_migration_cancel,
+ None,
+ self.fake_src_share_server,
+ self.fake_dest_share_server,
+ [fake.SHARE_INSTANCE], [])
+
+ self.library._get_vserver.assert_called_once_with(
+ share_server=self.fake_dest_share_server,
+ backend_name=self.fake_dest_backend_name)
+ self.library._get_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+ dm_session_mock.cancel_snapmirror_svm.assert_called_once_with(
+ self.fake_src_share_server, self.fake_dest_share_server
+ )
+
+ def test_share_server_migration_get_progress(self):
+ expected_result = {'total_progress': 0}
+
+ result = self.library.share_server_migration_get_progress(
+ None, None, None, None, None
+ )
+
+ self.assertEqual(expected_result, result)
+
+ @ddt.data({'subtype': 'default',
+ 'share_group': None,
+ 'compatible': True},
+ {'subtype': 'default',
+ 'share_group': {'share_server_id': fake.SHARE_SERVER['id']},
+ 'compatible': True},
+ {'subtype': 'dp_destination',
+ 'share_group': None,
+ 'compatible': False},
+ {'subtype': 'default',
+ 'share_group': {'share_server_id': 'another_fake_id'},
+ 'compatible': False})
+ @ddt.unpack
+ def test_choose_share_server_compatible_with_share_vserver_info(
+ self, subtype, share_group, compatible):
+ self.library.is_nfs_config_supported = False
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock_client)))
+ fake_vserver_info = {
+ 'operational_state': 'running',
+ 'state': 'running',
+ 'subtype': subtype
+ }
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake_vserver_info))
+
+ result = self.library.choose_share_server_compatible_with_share(
+ None, [fake.SHARE_SERVER], fake.SHARE_INSTANCE,
+ None, share_group
+ )
+ expected_result = fake.SHARE_SERVER if compatible else None
+ self.assertEqual(expected_result, result)
+ if (share_group and
+ share_group['share_server_id'] != fake.SHARE_SERVER['id']):
+ mock_client.get_vserver_info.assert_not_called()
+ self.library._get_vserver.assert_not_called()
+ else:
+ mock_client.get_vserver_info.assert_called_once_with(
+ fake.VSERVER1,
+ )
+ self.library._get_vserver.assert_called_once_with(
+ fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME
+ )
+
+ @ddt.data({'subtype': 'default', 'compatible': True},
+ {'subtype': 'dp_destination', 'compatible': False})
+ @ddt.unpack
+ def test_choose_share_server_compatible_with_share_group_vserver_info(
+ self, subtype, compatible):
+ self.library.is_nfs_config_supported = False
+ mock_client = mock.Mock()
+ self.mock_object(self.library, '_get_vserver',
+ mock.Mock(return_value=(fake.VSERVER1,
+ mock_client)))
+ fake_vserver_info = {
+ 'operational_state': 'running',
+ 'state': 'running',
+ 'subtype': subtype
+ }
+ self.mock_object(mock_client, 'get_vserver_info',
+ mock.Mock(return_value=fake_vserver_info))
+
+ result = self.library.choose_share_server_compatible_with_share_group(
+ None, [fake.SHARE_SERVER], None
+ )
+ expected_result = fake.SHARE_SERVER if compatible else None
+ self.assertEqual(expected_result, result)
+ self.library._get_vserver.assert_called_once_with(
+ fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME
+ )
+ mock_client.get_vserver_info.assert_called_once_with(
+ fake.VSERVER1,
+ )
+
+ def test__create_port_and_broadcast_domain(self):
+ self.mock_object(self.library._client,
+ 'list_cluster_nodes',
+ mock.Mock(return_value=fake.CLUSTER_NODES))
+ self.mock_object(self.library,
+ '_get_node_data_port',
+ mock.Mock(return_value='fake_port'))
+
+ self.library._create_port_and_broadcast_domain(fake.IPSPACE,
+ fake.NETWORK_INFO)
+ node_network_info = zip(fake.CLUSTER_NODES,
+ fake.NETWORK_INFO['network_allocations'])
+ get_node_port_calls = []
+ create_port_calls = []
+ for node, alloc in node_network_info:
+ get_node_port_calls.append(mock.call(node))
+ create_port_calls.append(mock.call(
+ node, 'fake_port', alloc['segmentation_id'], alloc['mtu'],
+ fake.IPSPACE
+ ))
+
+ self.library._get_node_data_port.assert_has_calls(get_node_port_calls)
+ self.library._client.create_port_and_broadcast_domain.assert_has_calls(
+ create_port_calls)
diff --git a/manila/tests/share/drivers/netapp/dataontap/fakes.py b/manila/tests/share/drivers/netapp/dataontap/fakes.py
index b17ccaaa99..b3a49efc0c 100644
--- a/manila/tests/share/drivers/netapp/dataontap/fakes.py
+++ b/manila/tests/share/drivers/netapp/dataontap/fakes.py
@@ -58,6 +58,7 @@ CONSISTENCY_GROUP_ID2 = '35f5c1ea-45fb-40c4-98ae-2a2a17554159'
CG_SNAPSHOT_ID = '6ddd8a6b-5df7-417b-a2ae-3f6e449f4eea'
CG_SNAPSHOT_MEMBER_ID1 = '629f79ef-b27e-4596-9737-30f084e5ba29'
CG_SNAPSHOT_MEMBER_ID2 = 'e876aa9c-a322-4391-bd88-9266178262be'
+SERVER_ID = 'd5e90724-6f28-4944-858a-553138bdbd29'
FREE_CAPACITY = 10000000000
TOTAL_CAPACITY = 20000000000
AGGREGATE = 'manila_aggr_1'
@@ -71,6 +72,7 @@ NODE_DATA_PORT = 'e0c'
NODE_DATA_PORTS = ('e0c', 'e0d')
LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s'
SHARE_TYPE_ID = '26e89a5b-960b-46bb-a8cf-0778e653098f'
+SHARE_TYPE_ID_2 = '2a06887e-25b5-486e-804a-d84c2d806feb'
SHARE_TYPE_NAME = 'fake_share_type'
IPSPACE = 'fake_ipspace'
IPSPACE_ID = '27d38c27-3e8b-4d7d-9d91-fcf295e3ac8f'
@@ -82,6 +84,10 @@ MANILA_HOST_NAME_2 = '%(host)s@%(backend)s#%(pool)s' % {
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME_2}
MANILA_HOST_NAME_3 = '%(host)s@%(backend)s#%(pool)s' % {
'host': HOST_NAME, 'backend': BACKEND_NAME_2, 'pool': POOL_NAME_2}
+SERVER_HOST = '%(host)s@%(backend)s' % {
+ 'host': HOST_NAME, 'backend': BACKEND_NAME}
+SERVER_HOST_2 = '%(host)s@%(backend)s' % {
+ 'host': HOST_NAME, 'backend': BACKEND_NAME_2}
QOS_EXTRA_SPEC = 'netapp:maxiops'
QOS_SIZE_DEPENDENT_EXTRA_SPEC = 'netapp:maxbpspergib'
QOS_NORMALIZED_SPEC = 'maxiops'
@@ -163,6 +169,15 @@ FLEXVOL = {
'owning-vserver-name': VSERVER1,
}
+SHARE_TYPE_EXTRA_SPEC = {
+ 'snapshot_support': True,
+ 'create_share_from_snapshot_support': True,
+ 'revert_to_snapshot_support': True,
+ 'mount_snapshot_support': False,
+ 'driver_handles_share_servers': True,
+ 'availability_zones': [],
+}
+
EXTRA_SPEC = {
'netapp:thin_provisioned': 'true',
'netapp:snapshot_policy': 'default',
@@ -438,6 +453,7 @@ SHARE_SERVER = {
},
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
+ 'host': SERVER_HOST,
}
SHARE_SERVER_2 = {
@@ -448,6 +464,14 @@ SHARE_SERVER_2 = {
},
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
+ 'host': SERVER_HOST_2,
+}
+
+VSERVER_INFO = {
+ 'name': 'fake_vserver_name',
+ 'subtype': 'default',
+ 'operational_state': 'running',
+ 'state': 'running',
}
SHARE_SERVER_NFS_TCP = {
@@ -456,6 +480,7 @@ SHARE_SERVER_NFS_TCP = {
'vserver_name': VSERVER2,
'nfs_config': jsonutils.dumps(NFS_CONFIG_TCP_MAX),
},
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVER_NFS_UDP = {
@@ -464,6 +489,7 @@ SHARE_SERVER_NFS_UDP = {
'vserver_name': VSERVER2,
'nfs_config': jsonutils.dumps(NFS_CONFIG_UDP_MAX),
},
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVER_NFS_TCP_UDP = {
@@ -472,6 +498,7 @@ SHARE_SERVER_NFS_TCP_UDP = {
'vserver_name': VSERVER2,
'nfs_config': jsonutils.dumps(NFS_CONFIG_TCP_UDP_MAX),
},
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVER_NO_NFS_NONE = {
@@ -479,10 +506,12 @@ SHARE_SERVER_NO_NFS_NONE = {
'backend_details': {
'vserver_name': VSERVER2,
},
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVER_NO_DETAILS = {
'id': 'id_no_datails',
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVER_NFS_DEFAULT = {
@@ -491,6 +520,7 @@ SHARE_SERVER_NFS_DEFAULT = {
'vserver_name': VSERVER2,
'nfs_config': jsonutils.dumps(NFS_CONFIG_DEFAULT),
},
+ 'host': 'fake_host@fake_backend',
}
SHARE_SERVERS = [
@@ -1441,6 +1471,100 @@ EXPANDED_PROCESSOR_COUNTERS = [
},
]
+SERVER_MIGRATION_CHECK_NOT_COMPATIBLE = {
+ 'compatible': False,
+ 'writable': None,
+ 'nondisruptive': None,
+ 'preserve_snapshots': None,
+ 'migration_cancel': None,
+ 'migration_get_progress': None,
+ 'share_network_id': None
+}
+
+CIFS_SECURITY_SERVICE = {
+ 'id': 'fake_id',
+ 'type': 'active_directory',
+ 'password': 'fake_password',
+ 'user': 'fake_user',
+ 'ou': 'fake_ou',
+ 'domain': 'fake_domain',
+ 'dns_ip': 'fake_dns_ip',
+ 'server': '',
+}
+
+SHARE_NETWORK_SUBNET = {
+ 'id': 'fake_share_net_subnet_d',
+ 'neutron_subnet_id': '34950f50-a142-4328-8026-418ad4410b09',
+ 'neutron_net_id': 'fa202676-531a-4446-bc0c-bcec15a72e82',
+ 'network_type': 'fake_network_type',
+ 'segmentation_id': 1234,
+ 'ip_version': 4,
+ 'cidr': 'fake_cidr',
+ 'gateway': 'fake_gateway',
+ 'mtu': 1509,
+}
+
+SHARE_NETWORK = {
+ 'id': 'fake_share_net_id',
+ 'project_id': 'fake_project_id',
+ 'status': 'fake_status',
+ 'name': 'fake_name',
+ 'description': 'fake_description',
+ 'security_services': [CIFS_SECURITY_SERVICE],
+ 'subnets': [SHARE_NETWORK_SUBNET],
+}
+
+SHARE_TYPE_2 = copy.deepcopy(SHARE_TYPE)
+SHARE_TYPE_2['id'] = SHARE_TYPE_ID_2
+SHARE_TYPE_2['extra_specs'].update(SHARE_TYPE_EXTRA_SPEC)
+
+SHARE_REQ_SPEC = {
+ 'share_properties': {
+ 'size': SHARE['size'],
+ 'project_id': SHARE['project_id'],
+ 'snapshot_support': SHARE_TYPE_EXTRA_SPEC['snapshot_support'],
+ 'create_share_from_snapshot_support':
+ SHARE_TYPE_EXTRA_SPEC['create_share_from_snapshot_support'],
+ 'revert_to_snapshot_support':
+ SHARE_TYPE_EXTRA_SPEC['revert_to_snapshot_support'],
+ 'mount_snapshot_support':
+ SHARE_TYPE_EXTRA_SPEC['mount_snapshot_support'],
+ 'share_proto': SHARE['share_proto'],
+ 'share_type_id': SHARE_TYPE_2['id'],
+ 'is_public': True,
+ 'share_group_id': None,
+ 'source_share_group_snapshot_member_id': None,
+ 'snapshot_id': None,
+ },
+ 'share_instance_properties': {
+ 'availability_zone_id': 'fake_az_1',
+ 'share_network_id': SHARE_NETWORK['id'],
+ 'share_server_id': SHARE_SERVER['id'],
+ 'share_id': SHARE_ID,
+ 'host': SHARE_INSTANCE['host'],
+ 'status': SHARE_INSTANCE['status'],
+ },
+ 'share_type': SHARE_TYPE_2,
+ 'share_id': SHARE_ID,
+}
+
+SERVER_MIGRATION_REQUEST_SPEC = {
+ 'shares_size': 10,
+ 'snapshots_size': 10,
+ 'shares_req_spec': [SHARE_REQ_SPEC],
+}
+
+CLIENT_GET_VOLUME_RESPONSE = {
+ 'aggregate': AGGREGATE,
+ 'junction-path': '/%s' % SHARE_NAME,
+ 'name': SHARE_NAME,
+ 'type': 'rw',
+ 'style': 'flex',
+ 'size': SHARE_SIZE,
+ 'owning-vserver-name': VSERVER1,
+ 'qos-policy-group-name': QOS_POLICY_GROUP_NAME,
+}
+
def get_config_cmode():
config = na_fakes.create_configuration_cmode()
diff --git a/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py b/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py
index 87af7a716b..04f1e8d043 100644
--- a/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py
+++ b/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py
@@ -40,9 +40,14 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
self.helper = cifs_cmode.NetAppCmodeCIFSHelper()
self.helper.set_client(self.mock_client)
- def test_create_share(self):
-
- result = self.helper.create_share(fake.CIFS_SHARE, fake.SHARE_NAME)
+ @ddt.data({'clear_export_policy': True, 'ensure_share_exists': False},
+ {'clear_export_policy': False, 'ensure_share_exists': True})
+ @ddt.unpack
+ def test_create_share(self, clear_export_policy, ensure_share_exists):
+ result = self.helper.create_share(
+ fake.CIFS_SHARE, fake.SHARE_NAME,
+ clear_current_export_policy=clear_export_policy,
+ ensure_share_already_exists=ensure_share_exists)
export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2]
export_paths = [result(address) for address in export_addresses]
@@ -51,10 +56,17 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
r'\\%s\%s' % (fake.SHARE_ADDRESS_2, fake.SHARE_NAME),
]
self.assertEqual(expected_paths, export_paths)
- self.mock_client.create_cifs_share.assert_called_once_with(
- fake.SHARE_NAME)
- self.mock_client.remove_cifs_share_access.assert_called_once_with(
- fake.SHARE_NAME, 'Everyone')
+ if ensure_share_exists:
+ self.mock_client.cifs_share_exists.assert_called_once_with(
+ fake.SHARE_NAME)
+ self.mock_client.create_cifs_share.assert_not_called()
+ else:
+ self.mock_client.create_cifs_share.assert_called_once_with(
+ fake.SHARE_NAME)
+ self.mock_client.cifs_share_exists.assert_not_called()
+ if clear_export_policy:
+ self.mock_client.remove_cifs_share_access.assert_called_once_with(
+ fake.SHARE_NAME, 'Everyone')
self.mock_client.set_volume_security_style.assert_called_once_with(
fake.SHARE_NAME, security_style='ntfs')
diff --git a/releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml b/releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml
new file mode 100644
index 0000000000..c77911c7de
--- /dev/null
+++ b/releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The NetApp ONTAP driver now supports migration of share servers across
+ clusters. While migrating a share server, the source remains writable
+ during the first phase of the migration, until the cutover is issued. It is
+ possible to specify a new share network for the destination share server,
+ only if the associated security services remain unchanged.
+ Share server migration relies on ONTAP features available only in versions
+ equal and greater than ``9.4``. In order to have share server migration
+ working across ONTAP clusters, they must be peered in advance.
+ In order to adapt to different workloads and provide more flexibility on
+ managing cluster’s free space a new configuration option was added:
+
+ - ``netapp_server_migration_check_capacity``:
+ Specifies if a capacity validation at the destination backend must be
+ made before proceeding with the share server migration. When enabled,
+ the NetApp driver will validate if the destination pools can hold all
+ shares and snapshots belonging to the source share server.