Browse Source

[NetApp] Add support for share server migration

This patch adds support for share server migration between NetApp
ONTAP drivers. This operation is now supported for migrating a share
server and all its resources between two different clusters.
Share server migration relies on ONTAP features available only in
versions equal and greater than ``9.4``. Also, in order to have share
server migration working across ONTAP clusters, they must be peered in
advance.
At this moment, share server migration doesn't support migrate a share
server without disrupting the access to shares, since the export locations
are updated at the migration complete phase.
The driver doesn't support changing security services while changing the
destination share network. This functionality can be added in the future.

Co-Authored-By: Andre Beltrami <debeltrami@gmail.com>

Implements: bp netapp-share-server-migration
Depends-On: Ic0751027d2c3f1ef7ab0f7836baff3070a230cfd
Change-Id: Idfac890c034cf8cbb65abf685ab6cab5ef13a4b1
Signed-off-by: Douglas Viroel <viroel@gmail.com>
changes/48/747048/5
Douglas Viroel 3 months ago
parent
commit
4bcf21eaf1
19 changed files with 3937 additions and 414 deletions
  1. +4
    -0
      manila/exception.py
  2. +3
    -0
      manila/share/drivers/netapp/dataontap/client/api.py
  3. +645
    -140
      manila/share/drivers/netapp/dataontap/client/client_cmode.py
  4. +374
    -76
      manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
  5. +38
    -3
      manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py
  6. +26
    -0
      manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py
  7. +48
    -11
      manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
  8. +531
    -46
      manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
  9. +8
    -2
      manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py
  10. +5
    -1
      manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py
  11. +31
    -2
      manila/share/drivers/netapp/options.py
  12. +81
    -0
      manila/tests/share/drivers/netapp/dataontap/client/fakes.py
  13. +529
    -41
      manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py
  14. +495
    -48
      manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py
  15. +53
    -9
      manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py
  16. +904
    -28
      manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py
  17. +124
    -0
      manila/tests/share/drivers/netapp/dataontap/fakes.py
  18. +19
    -7
      manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py
  19. +19
    -0
      releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml

+ 4
- 0
manila/exception.py View File

@@ -779,6 +779,10 @@ class VserverNotSpecified(NetAppException):
message = _("Vserver not specified.")


class VserverNotReady(NetAppException):
message = _("Vserver %(vserver)s is not ready yet.")


class EMCPowerMaxXMLAPIError(Invalid):
message = _("%(err)s")



+ 3
- 0
manila/share/drivers/netapp/dataontap/client/api.py View File

@@ -45,8 +45,10 @@ EVOLNOTCLONE = '13170'
EVOLMOVE_CANNOT_MOVE_TO_CFO = '13633'
EAGGRDOESNOTEXIST = '14420'
EVOL_NOT_MOUNTED = '14716'
EVSERVERALREADYSTARTED = '14923'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'
EVSERVERNOTFOUND = '15698'
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605'
ERELATION_EXISTS = '17122'
ENOTRANSFER_IN_PROGRESS = '17130'
@@ -55,6 +57,7 @@ EANOTHER_OP_ACTIVE = '17131'
ERELATION_NOT_QUIESCED = '17127'
ESOURCE_IS_DIFFERENT = '17105'
EVOL_CLONE_BEING_SPLIT = '17151'
ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS = '18815'


class NaServer(object):


+ 645
- 140
manila/share/drivers/netapp/dataontap/client/client_cmode.py
File diff suppressed because it is too large
View File


+ 374
- 76
manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py View File

@@ -94,32 +94,48 @@ class DataMotionSession(object):

def _get_backend_qos_policy_group_name(self, share):
"""Get QoS policy name according to QoS policy group name template."""
__, config = self._get_backend_config_obj(share)
__, config = self.get_backend_name_and_config_obj(share['host'])
return config.netapp_qos_policy_group_name_template % {
'share_id': share['id'].replace('-', '_')}

def _get_backend_snapmirror_policy_name_svm(self, share_server_id,
backend_name):
config = get_backend_configuration(backend_name)
return (config.netapp_snapmirror_policy_name_svm_template
% {'share_server_id': share_server_id.replace('-', '_')})

def get_vserver_from_share_server(self, share_server):
backend_details = share_server.get('backend_details')
if backend_details:
return backend_details.get('vserver_name')

def get_vserver_from_share(self, share_obj):
share_server = share_obj.get('share_server')
if share_server:
backend_details = share_server.get('backend_details')
if backend_details:
return backend_details.get('vserver_name')
return self.get_vserver_from_share_server(share_server)

def _get_backend_config_obj(self, share_obj):
backend_name = share_utils.extract_host(
share_obj['host'], level='backend_name')
def get_backend_name_and_config_obj(self, host):
backend_name = share_utils.extract_host(host, level='backend_name')
config = get_backend_configuration(backend_name)
return backend_name, config

def get_backend_info_for_share(self, share_obj):
backend_name, config = self._get_backend_config_obj(share_obj)
backend_name, config = self.get_backend_name_and_config_obj(
share_obj['host'])
vserver = (self.get_vserver_from_share(share_obj) or
config.netapp_vserver)
volume_name = self._get_backend_volume_name(
config, share_obj)
volume_name = self._get_backend_volume_name(config, share_obj)

return volume_name, vserver, backend_name

def get_client_and_vserver_name(self, share_server):
destination_host = share_server.get('host')
vserver = self.get_vserver_from_share_server(share_server)
backend, __ = self.get_backend_name_and_config_obj(destination_host)
client = get_client_for_backend(backend, vserver_name=vserver)

return client, vserver

def get_snapmirrors(self, source_share_obj, dest_share_obj):
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
@@ -130,8 +146,8 @@ class DataMotionSession(object):
source_share_obj)

snapmirrors = dest_client.get_snapmirrors(
src_vserver, src_volume_name,
dest_vserver, dest_volume_name,
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume_name, dest_volume=dest_volume_name,
desired_attributes=['relationship-status',
'mirror-state',
'source-vserver',
@@ -155,17 +171,17 @@ class DataMotionSession(object):

# 1. Create SnapMirror relationship
# TODO(ameade): Change the schedule from hourly to a config value
dest_client.create_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
schedule='hourly')

# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror(src_vserver,
dest_client.create_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_volume_name,
schedule='hourly')

# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)

def delete_snapmirror(self, source_share_obj, dest_share_obj,
release=True):
@@ -185,21 +201,21 @@ class DataMotionSession(object):

# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
clear_checkpoint=False)
dest_client.abort_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
clear_checkpoint=False)
except netapp_api.NaApiError:
# Snapmirror is already deleted
pass

# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.delete_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
@@ -218,10 +234,10 @@ class DataMotionSession(object):
# 3. Cleanup SnapMirror relationship on source
try:
if src_client:
src_client.release_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
src_client.release_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
@@ -242,50 +258,81 @@ class DataMotionSession(object):
source_share_obj)

# Update SnapMirror
dest_client.update_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.update_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)

def quiesce_then_abort_svm(self, source_share_server, dest_share_server):
source_client, source_vserver = self.get_client_and_vserver_name(
source_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)

# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror_svm(source_vserver, dest_vserver)

dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5

@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason="Snapmirror relationship is not quiesced.")

try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror_svm(source_vserver,
dest_vserver,
clear_checkpoint=False)

def quiesce_then_abort(self, source_share_obj, dest_share_obj):
dest_volume_name, dest_vserver, dest_backend = (
dest_volume, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)

src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
src_volume, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)

# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.quiesce_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume)

config = get_backend_configuration(share_utils.extract_host(
source_share_obj['host'], level='backend_name'))
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5

@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
src_vserver, src_volume_name, dest_vserver,
dest_volume_name, desired_attributes=['relationship-status',
'mirror-state']
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume, dest_volume=dest_volume,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason=("Snapmirror relationship is not quiesced."))
reason="Snapmirror relationship is not quiesced.")

try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
clear_checkpoint=False)
dest_client.abort_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume,
clear_checkpoint=False)

def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True):
"""Breaks SnapMirror relationship.
@@ -307,10 +354,10 @@ class DataMotionSession(object):
self.quiesce_then_abort(source_share_obj, dest_share_obj)

# 2. Break SnapMirror
dest_client.break_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.break_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)

# 3. Mount the destination volume and create a junction path
if mount:
@@ -326,10 +373,10 @@ class DataMotionSession(object):
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)

dest_client.resync_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.resync_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)

def resume_snapmirror(self, source_share_obj, dest_share_obj):
"""Resume SnapMirror relationship from a quiesced state."""
@@ -341,10 +388,10 @@ class DataMotionSession(object):
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)

dest_client.resume_snapmirror(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
dest_client.resume_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)

def change_snapmirror_source(self, replica,
orig_source_replica,
@@ -400,16 +447,16 @@ class DataMotionSession(object):

# 3. create
# TODO(ameade): Update the schedule if needed.
replica_client.create_snapmirror(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name,
schedule='hourly')
replica_client.create_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name,
schedule='hourly')
# 4. resync
replica_client.resync_snapmirror(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name)
replica_client.resync_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name)

@na_utils.trace
def remove_qos_on_old_active_replica(self, orig_active_replica):
@@ -430,3 +477,254 @@ class DataMotionSession(object):
"for replica %s to unset QoS policy and mark "
"the QoS policy group for deletion.",
orig_active_replica['id'])

def create_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Sets up a SnapMirror relationship between two vServers.

1. Create a SnapMirror policy for SVM DR
2. Create SnapMirror relationship
3. Initialize data transfer asynchronously
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)

# 1: Create SnapMirror policy for SVM DR
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
policy_name = self._get_backend_snapmirror_policy_name_svm(
dest_share_server['id'],
dest_backend_name,
)
dest_client.create_snapmirror_policy(policy_name)

# 2. Create SnapMirror relationship
dest_client.create_snapmirror_svm(src_vserver,
dest_vserver,
policy=policy_name,
schedule='hourly')

# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_svm(src_vserver,
dest_vserver)

def get_snapmirrors_svm(self, source_share_server, dest_share_server):
"""Get SnapMirrors between two vServers."""

dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)

snapmirrors = dest_client.get_snapmirrors_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status',
'mirror-state',
'last-transfer-end-timestamp'])
return snapmirrors

def get_snapmirror_destinations_svm(self, source_share_server,
dest_share_server):
"""Get SnapMirrors between two vServers."""

dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)

snapmirrors = dest_client.get_snapmirror_destinations_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver)
return snapmirrors

def update_snapmirror_svm(self, source_share_server, dest_share_server):
"""Schedule a SnapMirror update to happen on the backend."""

dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)

# Update SnapMirror
dest_client.update_snapmirror_svm(src_vserver, dest_vserver)

def quiesce_and_break_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Abort and break a SnapMirror relationship between vServers.

1. Quiesce SnapMirror
2. Break SnapMirror
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)

# 1. Attempt to quiesce, then abort
self.quiesce_then_abort_svm(source_share_server, dest_share_server)

# 2. Break SnapMirror
dest_client.break_snapmirror_svm(src_vserver, dest_vserver)

def cancel_snapmirror_svm(self, source_share_server, dest_share_server):
"""Cancels SnapMirror relationship between vServers."""

dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
dest_config = get_backend_configuration(dest_backend)
server_timeout = (
dest_config.netapp_server_migration_state_change_timeout)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)

snapmirrors = self.get_snapmirrors_svm(source_share_server,
dest_share_server)
if snapmirrors:
# 1. Attempt to quiesce and break snapmirror
self.quiesce_and_break_snapmirror_svm(source_share_server,
dest_share_server)

# NOTE(dviroel): Lets wait until the destination vserver be
# promoted to 'default' and state 'running', before starting
# shutting down the source
self.wait_for_vserver_state(dest_vserver, dest_client,
subtype='default', state='running',
operational_state='stopped',
timeout=server_timeout)
# 2. Delete SnapMirror
self.delete_snapmirror_svm(source_share_server, dest_share_server)
else:
dest_info = dest_client.get_vserver_info(dest_vserver)
if dest_info is None:
# NOTE(dviroel): Nothing to cancel since the destination does
# not exist.
return
if dest_info.get('subtype') == 'dp_destination':
# NOTE(dviroel): Can be a corner case where no snapmirror
# relationship was found but the destination vserver is stuck
# in DP mode. We need to convert it to 'default' to release
# its resources later.
self.convert_svm_to_default_subtype(dest_vserver, dest_client,
timeout=server_timeout)

def convert_svm_to_default_subtype(self, vserver_name, client,
is_dest_path=True, timeout=300):
interval = 10
retries = (timeout / interval or 1)

@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if vserver_info.get('subtype') != 'default':
if is_dest_path:
client.break_snapmirror_svm(dest_vserver=vserver_name)
else:
client.break_snapmirror_svm(source_vserver=vserver_name)
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)

def delete_snapmirror_svm(self, src_share_server, dest_share_server,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.

1. Abort SnapMirror
2. Delete the SnapMirror
3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
"""
src_client, src_vserver = self.get_client_and_vserver_name(
src_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError:
# SnapMirror is already deleted
pass

# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False

# 3. Release SnapMirror
if release:
src_backend = share_utils.extract_host(src_share_server['host'],
level='backend_name')
src_config = get_backend_configuration(src_backend)
release_timeout = (
src_config.netapp_snapmirror_release_timeout)
self.wait_for_snapmirror_release_svm(src_vserver,
dest_vserver,
src_client,
timeout=release_timeout)

def wait_for_vserver_state(self, vserver_name, client, state=None,
operational_state=None, subtype=None,
timeout=300):
interval = 10
retries = (timeout / interval or 1)

expected = {}
if state:
expected['state'] = state
if operational_state:
expected['operational_state'] = operational_state
if subtype:
expected['subtype'] = subtype

@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if not all(item in vserver_info.items() for
item in expected.items()):
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)

def wait_for_snapmirror_release_svm(self, source_vserver, dest_vserver,
src_client, timeout=300):
interval = 10
retries = (timeout / interval or 1)

@utils.retry(exception.NetAppException, interval=interval,
retries=retries, backoff_rate=1)
def release_snapmirror():
snapmirrors = src_client.get_snapmirror_destinations_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver)
if not snapmirrors:
LOG.debug("No snapmirrors to be released in source location.")
else:
try:
src_client.release_snapmirror_svm(source_vserver,
dest_vserver)
except netapp_api.NaApiError as e:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.debug('Snapmirror relationship does not exists '
'anymore.')

msg = _('Snapmirror release sent to source vserver. We will '
'wait for it to be released.')
raise exception.NetAppException(vserver=msg)

try:
release_snapmirror()
except exception.NetAppException:
msg = _("Unable to release the snapmirror from source vserver %s. "
"Retries exhausted. Aborting") % source_vserver
raise exception.NetAppException(message=msg)

+ 38
- 3
manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py View File

@@ -287,9 +287,38 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)

def choose_share_server_compatible_with_share(self, context,
share_servers, share,
snapshot=None,
def share_server_migration_check_compatibility(
self, context, share_server, dest_host, old_share_network,
new_share_network, shares_request_spec):

return self.library.share_server_migration_check_compatibility(
context, share_server, dest_host, old_share_network,
new_share_network, shares_request_spec)

def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
self.library.share_server_migration_start(
context, src_share_server, dest_share_server, shares, snapshots)

def share_server_migration_continue(self, context, src_share_server,
dest_share_server, shares, snapshots):
return self.library.share_server_migration_continue(
context, src_share_server, dest_share_server, shares, snapshots)

def share_server_migration_complete(self, context, src_share_server,
dest_share_server, shares, snapshots,
new_network_info):
return self.library.share_server_migration_complete(
context, src_share_server, dest_share_server, shares, snapshots,
new_network_info)

def share_server_migration_cancel(self, context, src_share_server,
dest_share_server, shares, snapshots):
self.library.share_server_migration_cancel(
context, src_share_server, dest_share_server, shares, snapshots)

def choose_share_server_compatible_with_share(self, context, share_servers,
share, snapshot=None,
share_group=None):
return self.library.choose_share_server_compatible_with_share(
context, share_servers, share, snapshot=snapshot,
@@ -301,3 +330,9 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
return self.library.choose_share_server_compatible_with_share_group(
context, share_servers, share_group_ref,
share_group_snapshot=share_group_snapshot)

def share_server_migration_get_progress(self, context, src_share_server,
dest_share_server, shares,
snapshots):
return self.library.share_server_migration_get_progress(
context, src_share_server, dest_share_server, shares, snapshots)

+ 26
- 0
manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py View File

@@ -284,6 +284,32 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)

def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
raise NotImplementedError

def share_server_migration_continue(self, context, src_share_server,
dest_share_server, shares, snapshots):
raise NotImplementedError

def share_server_migration_complete(self, context, src_share_server,
dest_share_server, shares, snapshots,
new_network_info):
raise NotImplementedError

def share_server_migration_cancel(self, context, src_share_server,
dest_share_server, shares, snapshots):
raise NotImplementedError

def share_server_migration_check_compatibility(
self, context, share_server, dest_host, old_share_network,
new_share_network, shares_request_spec):
raise NotImplementedError

def share_server_migration_get_progress(self, context, src_share_server,
dest_share_server):
raise NotImplementedError

def choose_share_server_compatible_with_share(self, context, share_servers,
share, snapshot=None,
share_group=None):


+ 48
- 11
manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py View File

@@ -273,6 +273,10 @@ class NetAppCmodeFileStorageLibrary(object):
return self.configuration.netapp_qos_policy_group_name_template % {
'share_id': share_id.replace('-', '_')}

def _get_backend_snapmirror_policy_name_svm(self, share_server_id):
return (self.configuration.netapp_snapmirror_policy_name_svm_template
% {'share_server_id': share_server_id.replace('-', '_')})

@na_utils.trace
def _get_aggregate_space(self):
aggregates = self._find_matching_aggregates()
@@ -1155,7 +1159,8 @@ class NetAppCmodeFileStorageLibrary(object):

@na_utils.trace
def _create_export(self, share, share_server, vserver, vserver_client,
clear_current_export_policy=True):
clear_current_export_policy=True,
ensure_share_already_exists=False):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
@@ -1177,7 +1182,8 @@ class NetAppCmodeFileStorageLibrary(object):
# Create the share and get a callback for generating export locations
callback = helper.create_share(
share, share_name,
clear_current_export_policy=clear_current_export_policy)
clear_current_export_policy=clear_current_export_policy,
ensure_share_already_exists=ensure_share_already_exists)

# Generate export locations using addresses, metadata and callback
export_locations = [
@@ -1919,14 +1925,16 @@ class NetAppCmodeFileStorageLibrary(object):

if snapmirror.get('mirror-state') != 'snapmirrored':
try:
vserver_client.resume_snapmirror(snapmirror['source-vserver'],
snapmirror['source-volume'],
vserver,
share_name)
vserver_client.resync_snapmirror(snapmirror['source-vserver'],
snapmirror['source-volume'],
vserver,
share_name)
vserver_client.resume_snapmirror_vol(
snapmirror['source-vserver'],
snapmirror['source-volume'],
vserver,
share_name)
vserver_client.resync_snapmirror_vol(
snapmirror['source-vserver'],
snapmirror['source-volume'],
vserver,
share_name)
return constants.REPLICA_STATE_OUT_OF_SYNC
except netapp_api.NaApiError:
LOG.exception("Could not resync snapmirror.")
@@ -2592,7 +2600,7 @@ class NetAppCmodeFileStorageLibrary(object):
msg_args = {
'share_move_state': move_status['state']
}
msg = _("Migration cancelation was not successful. The share "
msg = _("Migration cancellation was not successful. The share "
"migration state failed while transitioning from "
"%(share_move_state)s state to 'failed'. Retries "
"exhausted.") % msg_args
@@ -2842,3 +2850,32 @@ class NetAppCmodeFileStorageLibrary(object):
self.volume_rehost(share, src_vserver, dest_vserver)
# Mount the volume on the destination vserver
dest_vserver_client.mount_volume(volume_name)

def _check_capacity_compatibility(self, pools, thin_provision, size):
"""Check if the size requested is suitable for the available pools"""

backend_free_capacity = 0.0

for pool in pools:
if "unknown" in (pool['free_capacity_gb'],
pool['total_capacity_gb']):
return False
reserved = float(pool['reserved_percentage']) / 100

total_pool_free = math.floor(
pool['free_capacity_gb'] -
pool['total_capacity_gb'] * reserved)

if thin_provision:
# If thin provision is enabled it's necessary recalculate the
# total_pool_free considering the max over subscription ratio
# for each pool. After summing the free space for each pool we
# have the total backend free capacity to compare with the
# requested size.
if pool['max_over_subscription_ratio'] >= 1:
total_pool_free = math.floor(
total_pool_free * pool['max_over_subscription_ratio'])

backend_free_capacity += total_pool_free

return size <= backend_free_capacity

+ 531
- 46
manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py View File

@@ -29,6 +29,7 @@ from oslo_utils import excutils

from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
@@ -72,8 +73,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
check_for_setup_error())

@na_utils.trace
def _get_vserver(self, share_server=None, vserver_name=None):
def _get_vserver(self, share_server=None, vserver_name=None,
backend_name=None):
if share_server:
backend_details = share_server.get('backend_details')
vserver = backend_details.get(
@@ -86,13 +87,19 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
elif vserver_name:
vserver = vserver_name
else:
msg = _('Share server not provided')
msg = _('Share server or vserver name not provided')
raise exception.InvalidInput(reason=msg)

if not self._client.vserver_exists(vserver):
if backend_name:
vserver_client = data_motion.get_client_for_backend(
backend_name, vserver
)
else:
vserver_client = self._get_api_client(vserver)

if not vserver_client.vserver_exists(vserver):
raise exception.VserverNotFound(vserver=vserver)

vserver_client = self._get_api_client(vserver)
return vserver, vserver_client

def _get_ems_pool_info(self):
@@ -152,7 +159,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
server_details['nfs_config'] = jsonutils.dumps(nfs_config)

try:
self._create_vserver(vserver_name, network_info,
self._create_vserver(vserver_name, network_info, metadata,
nfs_config=nfs_config)
except Exception as e:
e.detail_data = {'server_details': server_details}
@@ -208,12 +215,20 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return self.configuration.netapp_vserver_name_template % server_id

@na_utils.trace
def _create_vserver(self, vserver_name, network_info, nfs_config=None):
def _create_vserver(self, vserver_name, network_info, metadata=None,
nfs_config=None):
"""Creates Vserver with given parameters if it doesn't exist."""

if self._client.vserver_exists(vserver_name):
msg = _('Vserver %s already exists.')
raise exception.NetAppException(msg % vserver_name)
# NOTE(dviroel): check if this vserver will be a data protection server
is_dp_destination = False
if metadata and metadata.get('migration_destination') is True:
is_dp_destination = True
msg = _("Starting creation of a vserver with 'dp_destination' "
"subtype.")
LOG.debug(msg)

# NOTE(lseki): If there's already an ipspace created for the same VLAN
# port, reuse it. It will be named after the previously created share
@@ -224,47 +239,66 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(network_info)

LOG.debug('Vserver %s does not exist, creating.', vserver_name)
self._client.create_vserver(
vserver_name,
self.configuration.netapp_root_volume_aggregate,
self.configuration.netapp_root_volume,
self._find_matching_aggregates(),
ipspace_name)

vserver_client = self._get_api_client(vserver=vserver_name)
security_services = None
try:
self._create_vserver_lifs(vserver_name,
vserver_client,
network_info,
ipspace_name)

self._create_vserver_admin_lif(vserver_name,
vserver_client,
network_info,
ipspace_name)
if is_dp_destination:
# Get Data ONTAP aggregate name as pool name.
LOG.debug('Creating a new Vserver (%s) for data protection.',
vserver_name)
self._client.create_vserver_dp_destination(
vserver_name,
self._find_matching_aggregates(),
ipspace_name)
# Set up port and broadcast domain for the current ipspace
self._create_port_and_broadcast_domain(ipspace_name, network_info)
else:
LOG.debug('Vserver %s does not exist, creating.', vserver_name)
self._client.create_vserver(
vserver_name,
self.configuration.netapp_root_volume_aggregate,
self.configuration.netapp_root_volume,
self._find_matching_aggregates(),
ipspace_name)

self._create_vserver_routes(vserver_client,
network_info)
vserver_client = self._get_api_client(vserver=vserver_name)

security_services = network_info.get('security_services')
try:
self._setup_network_for_vserver(
vserver_name, vserver_client, network_info, ipspace_name,
security_services=security_services, nfs_config=nfs_config)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to configure Vserver.")
# NOTE(dviroel): At this point, the lock was already
# acquired by the caller of _create_vserver.
self._delete_vserver(vserver_name,
security_services=security_services,
needs_lock=False)

def _setup_network_for_vserver(self, vserver_name, vserver_client,
network_info, ipspace_name,
enable_nfs=True, security_services=None,
nfs_config=None):
self._create_vserver_lifs(vserver_name,
vserver_client,
network_info,
ipspace_name)

self._create_vserver_admin_lif(vserver_name,
vserver_client,
network_info,
ipspace_name)

self._create_vserver_routes(vserver_client,
network_info)
if enable_nfs:
vserver_client.enable_nfs(
self.configuration.netapp_enabled_share_protocols,
nfs_config=nfs_config)

security_services = network_info.get('security_services')
if security_services:
self._client.setup_security_services(security_services,
vserver_client,
vserver_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to configure Vserver.")
# NOTE(dviroel): At this point, the lock was already acquired
# by the caller of _create_vserver.
self._delete_vserver(vserver_name,
security_services=security_services,
needs_lock=False)
if security_services:
self._client.setup_security_services(security_services,
vserver_client,
vserver_name)

def _get_valid_ipspace_name(self, network_id):
"""Get IPspace name according to network id."""
@@ -376,6 +410,21 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
ip_address, netmask, vlan, node_name, port, vserver_name,
lif_name, ipspace_name, mtu)

@na_utils.trace
def _create_port_and_broadcast_domain(self, ipspace_name, network_info):
nodes = self._client.list_cluster_nodes()
node_network_info = zip(nodes, network_info['network_allocations'])

for node_name, network_allocation in node_network_info:

port = self._get_node_data_port(node_name)
vlan = network_allocation['segmentation_id']
network_mtu = network_allocation.get('mtu')
mtu = network_mtu or DEFAULT_MTU

self._client.create_port_and_broadcast_domain(
node_name, port, vlan, mtu, ipspace_name)

@na_utils.trace
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
@@ -415,6 +464,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(

vserver_client = self._get_api_client(vserver=vserver)
network_interfaces = vserver_client.get_network_interfaces()
snapmirror_policies = self._client.get_snapmirror_policies(vserver)

interfaces_on_vlans = []
vlans = []
@@ -430,6 +480,11 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
vlan_id = None

def _delete_vserver_without_lock():
# NOTE(dviroel): always delete all policies before deleting the
# vserver
for policy in snapmirror_policies:
vserver_client.delete_snapmirror_policy(policy)

# NOTE(dviroel): Attempt to delete all vserver peering
# created by replication
self._delete_vserver_peers(vserver)
@@ -437,13 +492,17 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
self._client.delete_vserver(vserver,
vserver_client,
security_services=security_services)
ipspace_deleted = False
if (ipspace_name and ipspace_name not in CLUSTER_IPSPACES
and not self._client.ipspace_has_data_vservers(
ipspace_name)):
self._client.delete_ipspace(ipspace_name)
ipspace_deleted = True

self._delete_vserver_vlans(interfaces_on_vlans)
if not ipspace_name or ipspace_deleted:
# NOTE(dviroel): only delete vlans if they are not being used
# by any ipspaces and data vservers.
self._delete_vserver_vlans(interfaces_on_vlans)

@utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)
def _delete_vserver_with_lock():
@@ -592,8 +651,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(

def _get_snapmirrors(self, vserver, peer_vserver):
return self._client.get_snapmirrors(
source_vserver=vserver, source_volume=None,
destination_vserver=peer_vserver, destination_volume=None)
source_vserver=vserver, dest_vserver=peer_vserver)

def _get_vservers_from_replicas(self, context, replica_list, new_replica):
active_replica = self.find_active_replica(replica_list)
@@ -706,10 +764,13 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
extra_specs = share_types.get_extra_specs_from_share(share)
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)

# Avoid the reuse of 'dp_protection' vservers:
for share_server in share_servers:
if self._check_reuse_share_server(share_server, nfs_config,
share_group=share_group):
return share_server

# There is no compatible share server to be reused
return None

@na_utils.trace
@@ -720,6 +781,16 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
share_server['id']):
return False

backend_name = share_utils.extract_host(share_server['host'],
level='backend_name')
vserver_name, client = self._get_vserver(share_server,
backend_name=backend_name)
vserver_info = client.get_vserver_info(vserver_name)
if (vserver_info.get('operational_state') != 'running'
or vserver_info.get('state') != 'running'
or vserver_info.get('subtype') != 'default'):
return False

if self.is_nfs_config_supported:
# NOTE(felipe_rodrigues): Do not check that the share nfs_config
# matches with the group nfs_config, because the API guarantees
@@ -799,3 +870,417 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self).
manage_existing(share, driver_options,
share_server=share_server))

@na_utils.trace
def share_server_migration_check_compatibility(
self, context, source_share_server, dest_host, old_share_network,
new_share_network, shares_request_spec):

not_compatible = {
'compatible': False,
'writable': None,
'nondisruptive': None,
'preserve_snapshots': None,
'migration_cancel': None,
'migration_get_progress': None,
'share_network_id': None
}

# We need cluster creds, of course
if not self._have_cluster_creds:
msg = _("Cluster credentials have not been configured with this "
"share driver. Cannot perform server migration operation.")
LOG.error(msg)
return not_compatible

# Vserver will spread across aggregates in this implementation
if share_utils.extract_host(dest_host, level='pool') is not None:
msg = _("Cannot perform server migration to a specific pool. "
"Please choose a destination host 'host@backend' as "
"destination.")
LOG.error(msg)
return not_compatible

src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
source_share_server, backend_name=src_backend_name)
dest_backend_name = share_utils.extract_host(dest_host,
level='backend_name')
# Block migration within the same backend.
if src_backend_name == dest_backend_name:
msg = _("Cannot perform server migration within the same backend. "
"Please choose a destination host different from the "
"source.")
LOG.error(msg)
return not_compatible

src_cluster_name = src_client.get_cluster_name()
# NOTE(dviroel): This call is supposed to made in the destination host
dest_cluster_name = self._client.get_cluster_name()
# Must be in different clusters too, SVM-DR restriction
if src_cluster_name == dest_cluster_name:
msg = _("Cannot perform server migration within the same cluster. "
"Please choose a destination host that's in a different "
"cluster.")
LOG.error(msg)
return not_compatible

# Check for SVM DR support
# NOTE(dviroel): These clients can only be used for non-tunneling
# requests.
dst_client = data_motion.get_client_for_backend(dest_backend_name,
vserver_name=None)
if (not src_client.is_svm_dr_supported()
or not dst_client.is_svm_dr_supported()):
msg = _("Cannot perform server migration because at leat one of "
"the backends doesn't support SVM DR.")
LOG.error(msg)
return not_compatible

# Blocking different security services for now
if old_share_network['id'] != new_share_network['id']:
new_sec_services = new_share_network.get('security_services', [])
old_sec_services = old_share_network.get('security_services', [])
if new_sec_services or old_sec_services:
new_sec_serv_ids = [ss['id'] for ss in new_sec_services]
old_sec_serv_ids = [ss['id'] for ss in old_sec_services]
if not set(new_sec_serv_ids) == set(old_sec_serv_ids):
msg = _("Cannot perform server migration for different "
"security services. Please choose a suitable "
"share network that matches the source security "
"service.")
LOG.error(msg)
return not_compatible

pools = self._get_pools()
# Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support'
specs_to_validate = ('netapp_flexvol_encryption',
'revert_to_snapshot_support')
for req_spec in shares_request_spec.get('shares_req_spec', []):
extra_specs = req_spec.get('share_type', {}).get('extra_specs', {})
for spec in specs_to_validate:
if extra_specs.get(spec) and not pools[0][spec]:
msg = _("Cannot perform server migration since the "
"destination host doesn't support the required "
"extra-spec %s.") % spec
LOG.error(msg)
return not_compatible
# TODO(dviroel): disk_type extra-spec

# Check capacity
server_total_size = (shares_request_spec.get('shares_size', 0) +
shares_request_spec.get('snapshots_size', 0))
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
# configured and greater than 1, we'll consider thin provisioning
# enable for all shares.
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
if self.configuration.netapp_server_migration_check_capacity is True:
if not self._check_capacity_compatibility(pools, thin_provisioning,
server_total_size):
msg = _("Cannot perform server migration because destination "
"host doesn't have enough free space.")
LOG.error(msg)
return not_compatible

compatibility = {
'compatible': True,
'writable': True,
'nondisruptive': False,
'preserve_snapshots': True,
'share_network_id': new_share_network['id'],
'migration_cancel': True,
'migration_get_progress': False,
}

return compatibility

def share_server_migration_start(self, context, source_share_server,
dest_share_server, share_intances,
snapshot_instances):
"""Start share server migration using SVM DR.

1. Create vserver peering between source and destination
2. Create SnapMirror
"""
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
src_cluster = src_client.get_cluster_name()

dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
dest_cluster = dest_client.get_cluster_name()

# 1. Check and create vserver peer if needed
if not self._get_vserver_peers(dest_vserver, src_vserver):
# Request vserver peer creation from destination to source
# NOTE(dviroel): vserver peering rollback is handled by
# '_delete_vserver' function.
dest_client.create_vserver_peer(
dest_vserver, src_vserver,
peer_cluster_name=src_cluster)

# Accepts the vserver peering using active replica host's
# client (inter-cluster only)
if dest_cluster != src_cluster:
src_client.accept_vserver_peer(src_vserver, dest_vserver)

# 2. Create SnapMirror
dm_session = data_motion.DataMotionSession()
try:
dm_session.create_snapmirror_svm(source_share_server,
dest_share_server)
except Exception:
# NOTE(dviroel): vserver peer delete will be handled on vserver
# teardown
dm_session.cancel_snapmirror_svm(source_share_server,
dest_share_server)
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
}
msg = _('Could not initialize SnapMirror between %(src)s and '
'%(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)

msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
}
msg = _('Starting share server migration from %(src)s to %(dest)s.')
LOG.info(msg, msg_args)

def _get_snapmirror_svm(self, source_share_server, dest_share_server):
dm_session = data_motion.DataMotionSession()
try:
snapmirrors = dm_session.get_snapmirrors_svm(
source_share_server, dest_share_server)
except netapp_api.NaApiError:
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id']
}
msg = _("Could not retrieve snapmirrors between source "
"%(src)s and destination %(dest)s vServers.") % msg_args
LOG.exception(msg)
raise exception.NetAppException(message=msg)

return snapmirrors

@na_utils.trace
def share_server_migration_continue(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances):
"""Continues a share server migration using SVM DR."""
snapmirrors = self._get_snapmirror_svm(source_share_server,
dest_share_server)
if not snapmirrors:
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id']
}
msg = _("No snapmirror relationship was found between source "
"%(src)s and destination %(dest)s vServers.") % msg_args
LOG.exception(msg)
raise exception.NetAppException(message=msg)

snapmirror = snapmirrors[0]
in_progress_status = ['preparing', 'transferring', 'finalizing']
mirror_state = snapmirror.get('mirror-state')
status = snapmirror.get('relationship-status')
if mirror_state != 'snapmirrored' and status in in_progress_status:
LOG.debug("Data transfer still in progress.")
return False
elif mirror_state == 'snapmirrored' and status == 'idle':
LOG.info("Source and destination vServers are now snapmirrored.")
return True

msg = _("Snapmirror is not ready yet. The current mirror state is "
"'%(mirror_state)s' and relationship status is '%(status)s'.")
msg_args = {
'mirror_state': mirror_state,
'status': status,
}
LOG.debug(msg, msg_args)
return False

@na_utils.trace
def share_server_migration_complete(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances, new_network_alloc):
"""Completes share server migration using SVM DR.

1. Do a last SnapMirror update.
2. Quiesce, abort and then break the relationship.
3. Stop the source vserver
4. Configure network interfaces in the destination vserver
5. Start the destinarion vserver
6. Delete and release the snapmirror
7. Build the list of export_locations for each share
8. Release all resources from the source share server
"""
dm_session = data_motion.DataMotionSession()
try:
# 1. Start an update to try to get a last minute transfer before we
# quiesce and break
dm_session.update_snapmirror_svm(source_share_server,
dest_share_server)
except exception.StorageCommunicationException:
# Ignore any errors since the current source may be unreachable
pass

src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)

dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
try:
# 2. Attempt to quiesce, abort and then break SnapMirror
dm_session.quiesce_and_break_snapmirror_svm(source_share_server,
dest_share_server)
# NOTE(dviroel): Lets wait until the destination vserver be
# promoted to 'default' and state 'running', before starting
# shutting down the source
dm_session.wait_for_vserver_state(
dest_vserver, dest_client, subtype='default',
state='running', operational_state='stopped',
timeout=(self.configuration.
netapp_server_migration_state_change_timeout))

# 3. Stop source vserver
src_client.stop_vserver(src_vserver)

# 4. Setup network configuration
ipspace_name = dest_client.get_vserver_ipspace(dest_vserver)

# NOTE(dviroel): Security service and NFS configuration should be
# handled by SVM DR, so no changes will be made here.
vlan = new_network_alloc['segmentation_id']

@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_network_for_destination_vserver():
self._setup_network_for_vserver(
dest_vserver, dest_client, new_network_alloc, ipspace_name,
enable_nfs=False,
security_services=None)

setup_network_for_destination_vserver()

# 5. Start the destination.
dest_client.start_vserver(dest_vserver)

except Exception:
# Try to recover source vserver
try:
src_client.start_vserver(src_vserver)
except Exception:
LOG.warning("Unable to recover source share server after a "
"migration failure.")
# Destroy any snapmirror and make destination vserver to have its
# subtype set to 'default'
dm_session.cancel_snapmirror_svm(source_share_server,
dest_share_server)
# Rollback resources transferred to the destination
for instance in share_instances:
self._delete_share(instance, dest_client, remove_export=False)

msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
}
msg = _('Could not complete the migration between %(src)s and '
'%(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)

# 6. Delete/release snapmirror
dm_session.delete_snapmirror_svm(source_share_server,
dest_share_server)

# 7. Build a dict with shares/snapshot location updates
# NOTE(dviroel): For SVM DR, the share names aren't modified, only the
# export_locations are updated due to network changes.
share_updates = {}
for instance in share_instances:
# Get the volume to find out the associated aggregate
try:
share_name = self._get_backend_share_name(instance['id'])
volume = dest_client.get_volume(share_name)
except Exception:
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
}
msg = _('Could not complete the migration between %(src)s and '
'%(dest)s vservers. One of the shares was not found '
'in the destination vserver.') % msg_args
raise exception.NetAppException(message=msg)

export_locations = self._create_export(
instance, dest_share_server, dest_vserver, dest_client,
clear_current_export_policy=False,
ensure_share_already_exists=True)

share_updates.update({
instance['id']: {
'export_locations': export_locations,
'pool_name': volume.get('aggregate')
}})

# NOTE(dviroel): Nothing to update in snapshot instances since the
# provider location didn't change.

# 8. Release source share resources
for instance in share_instances:
self._delete_share(instance, src_client, remove_export=True)

# NOTE(dviroel): source share server deletion must be triggered by
# the manager after finishing the migration
LOG.info('Share server migration completed.')
return {
'share_updates': share_updates,
}

def share_server_migration_cancel(self, context, source_share_server,
dest_share_server, shares, snapshots):
"""Cancel a share server migration that is using SVM DR."""

dm_session = data_motion.DataMotionSession()
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)

try:
snapmirrors = self._get_snapmirror_svm(source_share_server,
dest_share_server)
if snapmirrors:
dm_session.cancel_snapmirror_svm(source_share_server,
dest_share_server)
# Do a simple volume cleanup in the destination vserver
for instance in shares:
self._delete_share(instance, dest_client, remove_export=False)

except Exception:
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
}
msg = _('Unable to cancel SnapMirror relationship between %(src)s '
'and %(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)

LOG.info('Share server migration was cancelled.')

def share_server_migration_get_progress(self, context, src_share_server,
dest_share_server, shares,
snapshots):
# TODO(dviroel): get snapmirror info to infer the progress
return {'total_progress': 0}

+ 8
- 2
manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py View File

@@ -29,9 +29,15 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):

@na_utils.trace
def create_share(self, share, share_name,
clear_current_export_policy=True):
clear_current_export_policy=True,
ensure_share_already_exists=False):
"""Creates CIFS share on Data ONTAP Vserver."""
self._client.create_cifs_share(share_name)
if not ensure_share_already_exists:
self._client.create_cifs_share(share_name)
elif not self._client.cifs_share_exists(share_name):
msg = _("The expected CIFS share %(share_name)s was not found.")
msg_args = {'share_name': share_name}
raise exception.NetAppException(msg % msg_args)
if clear_current_export_policy:
self._client.remove_cifs_share_access(share_name, 'Everyone')



+ 5
- 1
manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py View File

@@ -41,8 +41,12 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):

@na_utils.trace
def create_share(self, share, share_name,
clear_current_export_policy=True):
clear_current_export_policy=True,
ensure_share_already_exists=False):
"""Creates NFS share."""
# TODO(dviroel): Ensure that nfs share already exists if
# ensure_share_already_exists is True. Although, no conflicts are
# expected here since there is no create share operation being made.
if clear_current_export_policy:
self._client.clear_nfs_export_policy_for_volume(share_name)
self._ensure_export_policy(share, share_name)


+ 31
- 2
manila/share/drivers/netapp/options.py View File

@@ -111,7 +111,11 @@ netapp_provisioning_opts = [
"nothing will be changed during startup. This will not "
"affect new shares, which will have their snapshot "
"directory always visible, unless toggled by the share "
"type extra spec 'netapp:hide_snapdir'."), ]
"type extra spec 'netapp:hide_snapdir'."),
cfg.StrOpt('netapp_snapmirror_policy_name_svm_template',
help='NetApp SnapMirror policy name template for Storage '
'Virtual Machines (Vservers).',
default='snapmirror_policy_%(share_server_id)s'), ]

netapp_cluster_opts = [
cfg.StrOpt('netapp_vserver',
@@ -145,6 +149,11 @@ netapp_data_motion_opts = [
help='The maximum time in seconds to wait for existing '
'snapmirror transfers to complete before aborting when '
'promoting a replica.'),
cfg.IntOpt('netapp_snapmirror_release_timeout',
min=0,
default=3600, # One Hour
help='The maximum time in seconds to wait for a snapmirror '
'release when breaking snapmirror relationships.'),
cfg.IntOpt('netapp_volume_move_cutover_timeout',
min=0,
default=3600, # One Hour,
@@ -162,7 +171,27 @@ netapp_data_motion_opts = [
default=3600, # One Hour,
help='The maximum time in seconds that migration cancel '
'waits for all migration operations be completely '
'aborted.'), ]
'aborted.'),
cfg.IntOpt('netapp_server_migration_state_change_timeout',
min=0,
default=3600, # One hour,
help='The maximum time in seconds that a share server '
'migration waits for a vserver to change its internal '
'states.'),
cfg.BoolOpt('netapp_server_migration_check_capacity',
default=True,
help='Specify if the capacity check must be made by the '
'driver while performing a share server migration. '
'If enabled, the driver will validate if the destination '
'backend can hold all shares and snapshots capacities '
'from the source share server.'),
cfg.IntOpt('netapp_server_migration_state_change_timeout',
min=0,
default=3600, # One hour,
help='The maximum time in seconds that a share server '
'migration waits for a vserver to change its internal '
'states.'),
]

CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)


+ 81
- 0
manila/tests/share/drivers/netapp/dataontap/client/fakes.py View File

@@ -74,6 +74,17 @@ DELETED_EXPORT_POLICIES = {
}
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_MAX_THROUGHPUT = '5000B/s'
VSERVER_TYPE_DEFAULT = 'default'
VSERVER_TYPE_DP_DEST = 'dp_destination'
VSERVER_OP_STATE_RUNNING = 'running'
VSERVER_STATE = 'running'
VSERVER_INFO = {
'name': VSERVER_NAME,
'subtype': VSERVER_TYPE_DEFAULT,
'operational_state': VSERVER_OP_STATE_RUNNING,
'state': VSERVER_STATE,
}
SNAPMIRROR_POLICY_NAME = 'fake_snapmirror_policy'

USER_NAME = 'fake_user'

@@ -198,6 +209,20 @@ VSERVER_GET_ITER_RESPONSE = etree.XML("""
</results>
""" % {'fake_vserver': VSERVER_NAME})

VSERVER_GET_ITER_RESPONSE_INFO = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<operational-state>%(operational_state)s</operational-state>
<state>%(state)s</state>
<vserver-name>%(name)s</vserver-name>
<vserver-subtype>%(subtype)s</vserver-subtype>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % VSERVER_INFO)

VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
@@ -1702,6 +1727,18 @@ CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML("""
</results>
""" % {'volume': SHARE_NAME})

CIFS_SHARE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<cifs-share>
<share-name>%(share_name)s</share-name>
<vserver>fake_vserver</vserver>
</cifs-share>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'share_name': SHARE_NAME})

NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20')

NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML("""
@@ -2373,6 +2410,7 @@ SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
<destination-volume>fake_destination_volume</destination-volume>
<is-healthy>true</is-healthy>
<mirror-state>snapmirrored</mirror-state>
<relationship-status>idle</relationship-status>
<schedule>daily</schedule>
<source-vserver>fake_source_vserver</source-vserver>
<source-volume>fake_source_volume</source-volume>
@@ -2382,6 +2420,35 @@ SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
</results>
""")

SNAPMIRROR_GET_ITER_FILTERED_RESPONSE_2 = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<source-vserver>fake_source_vserver</source-vserver>
<destination-vserver>fake_destination_vserver</destination-vserver>
<mirror-state>snapmirrored</mirror-state>
<relationship-status>idle</relationship-status>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")

SNAPMIRROR_GET_DESTINATIONS_ITER_FILTERED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-destination-info>
<destination-location>fake_destination_vserver:</destination-location>
<destination-vserver>fake_destination_vserver</destination-vserver>
<relationship-id>fake_relationship_id</relationship-id>
<source-location>fake_source_vserver:</source-location>
<source-vserver>fake_source_vserver</source-vserver>
</snapmirror-destination-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")

SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
<results status="passed">
<result-status>succeeded</result-status>
@@ -2605,6 +2672,20 @@ QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML("""
'max_throughput': QOS_MAX_THROUGHPUT,
})

SNAPMIRROR_POLICY_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-policy-info>
<policy-name>%(policy_name)s</policy-name>
<vserver-name>%(vserver_name)s</vserver-name>
</snapmirror-policy-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % {
'policy_name': SNAPMIRROR_POLICY_NAME,
'vserver_name': VSERVER_NAME,
})

FAKE_VOL_XML = """<volume-info>
<name>open123</name>
<state>online</state>


+ 529
- 41
manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py View File

@@ -472,6 +472,31 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.call('vserver-create', vserver_create_args),
mock.call('vserver-modify', vserver_modify_args)])

def test_create_vserver_dp_destination(self):

self.client.features.add_feature('IPSPACES')
self.mock_object(self.client, 'send_request')

vserver_create_args = {
'vserver-name': fake.VSERVER_NAME,
'ipspace': fake.IPSPACE_NAME,
'vserver-subtype': fake.VSERVER_TYPE_DP_DEST,
}
vserver_modify_args = {
'aggr-list': [{'aggr-name': aggr_name} for aggr_name
in fake.SHARE_AGGREGATE_NAMES],
'vserver-name': fake.VSERVER_NAME
}

self.client.create_vserver_dp_destination(
fake.VSERVER_NAME,
fake.SHARE_AGGREGATE_NAMES,
fake.IPSPACE_NAME)

self.client.send_request.assert_has_calls([
mock.call('vserver-create', vserver_create_args),
mock.call('vserver-modify', vserver_modify_args)])

def test_create_vserver_ipspaces_not_supported(self):

self.assertRaises(exception.NetAppException,
@@ -680,8 +705,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_no_volumes(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
'get_vserver_info',
mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -707,8 +732,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
'get_vserver_info',
mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -734,8 +759,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume_already_offline(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
'get_vserver_info',
mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -765,8 +790,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_one_volume_api_error(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
'get_vserver_info',
mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -787,8 +812,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_multiple_volumes(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
'get_vserver_info',
mock.Mock(return_value=fake.VSERVER_INFO))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
@@ -804,8 +829,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_delete_vserver_not_found(self):

self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=False))
'get_vserver_info',
mock.Mock(return_value=None))

self.client.delete_vserver(fake.VSERVER_NAME,
self.vserver_client)
@@ -5771,7 +5796,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
def test_create_snapmirror(self, schedule, policy):
self.mock_object(self.client, 'send_request')

self.client.create_snapmirror(
self.client.create_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
schedule=schedule, policy=policy)
@@ -5795,7 +5820,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
code=netapp_api.ERELATION_EXISTS))
self.mock_object(self.client, 'send_request', mock_send_req)

self.client.create_snapmirror(
self.client.create_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)

@@ -5814,11 +5839,29 @@ class NetAppClientCmodeTestCase(test.TestCase):
code=0))
self.mock_object(self.client, 'send_request', mock_send_req)

self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror,
self.assertRaises(netapp_api.NaApiError,
self.client.create_snapmirror_vol,
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)
self.assertTrue(self.client.send_request.called)

def test_create_snapmirror_svm(self):
self.mock_object(self.client, 'send_request')

self.client.create_snapmirror_svm(fake.SM_SOURCE_VSERVER,
fake.SM_DEST_VSERVER,
max_transfer_rate='fake_xfer_rate')

snapmirror_create_args = {
'source-vserver': fake.SM_SOURCE_VSERVER,
'destination-vserver': fake.SM_DEST_VSERVER,
'relationship-type': 'data_protection',
'identity-preserve': 'true',
'max-transfer-rate': 'fake_xfer_rate'
}
self.client.send_request.assert_has_calls([
mock.call('snapmirror-create', snapmirror_create_args)])

@ddt.data(
{
'source_snapshot': 'fake_snapshot',
@@ -5837,7 +5880,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
'send_request',
mock.Mock(return_value=api_response))

result = self.client.initialize_snapmirror(
result = self.client.initialize_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
source_snapshot=source_snapshot,
@@ -5865,12 +5908,38 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
self.assertEqual(expected, result)

def test_initialize_snapmirror_svm(self):

api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))

result = self.client.initialize_snapmirror_svm(fake.SM_SOURCE_VSERVER,
fake.SM_DEST_VSERVER)

snapmirror_initialize_args = {
'source-location': fake.SM_SOURCE_VSERVER + ':',
'destination-location': fake.SM_DEST_VSERVER + ':',
}
self.client.send_request.assert_has_calls([
mock.call('snapmirror-initialize', snapmirror_initialize_args)])

expected = {
'operation-id': None,
'status': 'succeeded',
'jobid': None,
'error-code': None,
'error-message': None
}
self.assertEqual(expected, result)

@ddt.data(True, False)
def test_release_snapmirror(self, relationship_info_only):

self.mock_object(self.client, 'send_request')

self.client.release_snapmirror(
self.client.release_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME,
relationship_info_only=relationship_info_only)
@@ -5887,14 +5956,35 @@ class NetAppClientCmodeTestCase(test.TestCase):
}
}
}

self.client.send_request.assert_has_calls([
mock.call('snapmirror-release-iter', snapmirror_release_args)])
mock.call('snapmirror-release-iter', snapmirror_release_args,
enable_tunneling=True)])

def test_release_snapmirror_svm(self):
self.mock_object(self.client, 'send_request')

self.client.release_snapmirror_svm(
fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)

snapmirror_release_args = {
'query': {
'snapmirror-destination-info': {
'source-location': fake.SM_SOURCE_VSERVER + ':',
'destination-location': fake.SM_DEST_VSERVER + ':',
'relationship-info-only': 'false'
}
}
}
self.client.send_request.assert_has_calls([
mock.call('snapmirror-release-iter', snapmirror_release_args,
enable_tunneling=False)])

def test_quiesce_snapmirror(self):

self.mock_object(self.client, 'send_request')

self.client.quiesce_snapmirror(
self.client.quiesce_snapmirror_vol(
fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME,
fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME)

@@ -5907,12 +5997,26 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapmirror-quiesce', snapmirror_quiesce_args)])

def test_quiesce_snapmirror_svm(self):

self.mock_object(self.client, 'send_request')

self.client.quiesce_snapmirror_svm(
fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER)